From cc0c71619c3565ada7f5e7d5ef4093cc97f1da46 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Sat, 7 Feb 2026 15:06:08 +0000 Subject: [PATCH] feat(vscode-lm): add Copilot modelOptions for agent request classification When the VS Code LM model vendor is "copilot", attach modelOptions with copilot-integration-id to sendRequest calls. This allows the Copilot backend to distinguish agent-initiated requests from direct user chat, which may help with proper billing classification of premium vs non-premium requests. Requires VS Code >= 1.96 where modelOptions became stable; older versions silently ignore the extra property. Refs: #11289 --- src/api/providers/__tests__/vscode-lm.spec.ts | 105 ++++++++++++++++++ src/api/providers/vscode-lm.ts | 49 +++++++- 2 files changed, 151 insertions(+), 3 deletions(-) diff --git a/src/api/providers/__tests__/vscode-lm.spec.ts b/src/api/providers/__tests__/vscode-lm.spec.ts index 305305d2289..640309154fb 100644 --- a/src/api/providers/__tests__/vscode-lm.spec.ts +++ b/src/api/providers/__tests__/vscode-lm.spec.ts @@ -391,6 +391,63 @@ describe("VsCodeLmHandler", () => { await expect(handler.createMessage(systemPrompt, messages).next()).rejects.toThrow("API Error") }) + + it("should include modelOptions when model vendor is copilot", async () => { + // Create a Copilot-vendor mock model + const copilotModel = { + ...mockLanguageModelChat, + vendor: "copilot", + name: "GPT-4o", + } + handler["client"] = copilotModel as any + + const systemPrompt = "You are a helpful assistant" + const messages: Anthropic.Messages.MessageParam[] = [{ role: "user" as const, content: "Hello" }] + + copilotModel.sendRequest.mockResolvedValueOnce({ + stream: (async function* () { + yield new vscode.LanguageModelTextPart("Hi there") + })(), + }) + copilotModel.countTokens.mockResolvedValue(5) + + const stream = handler.createMessage(systemPrompt, messages) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(copilotModel.sendRequest).toHaveBeenCalledWith( + expect.any(Array), + expect.objectContaining({ + modelOptions: { "copilot-integration-id": "roo-code" }, + }), + expect.anything(), + ) + }) + + it("should NOT include modelOptions when model vendor is not copilot", async () => { + const systemPrompt = "You are a helpful assistant" + const messages: Anthropic.Messages.MessageParam[] = [{ role: "user" as const, content: "Hello" }] + + mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ + stream: (async function* () { + yield new vscode.LanguageModelTextPart("Hi there") + })(), + }) + mockLanguageModelChat.countTokens.mockResolvedValue(5) + + const stream = handler.createMessage(systemPrompt, messages) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + // sendRequest should have been called without modelOptions + const callArgs = mockLanguageModelChat.sendRequest.mock.calls[0] + const requestOptions = callArgs[1] + expect(requestOptions.modelOptions).toBeUndefined() + }) }) describe("getModel", () => { @@ -535,5 +592,53 @@ describe("VsCodeLmHandler", () => { const promise = handler.completePrompt("Test prompt") await expect(promise).rejects.toThrow("VSCode LM completion error: Completion failed") }) + + it("should include modelOptions in completePrompt when model vendor is copilot", async () => { + const copilotModel = { + ...mockLanguageModelChat, + vendor: "copilot", + name: "GPT-4o", + sendRequest: vi.fn(), + countTokens: vi.fn(), + } + handler["client"] = copilotModel as any + + const responseText = "Completed text" + copilotModel.sendRequest.mockResolvedValueOnce({ + stream: (async function* () { + yield new vscode.LanguageModelTextPart(responseText) + return + })(), + }) + + const result = await handler.completePrompt("Test prompt") + expect(result).toBe(responseText) + expect(copilotModel.sendRequest).toHaveBeenCalledWith( + expect.any(Array), + expect.objectContaining({ + modelOptions: { "copilot-integration-id": "roo-code" }, + }), + expect.anything(), + ) + }) + + it("should NOT include modelOptions in completePrompt when model vendor is not copilot", async () => { + handler["client"] = mockLanguageModelChat + + const responseText = "Completed text" + mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ + stream: (async function* () { + yield new vscode.LanguageModelTextPart(responseText) + return + })(), + }) + + const result = await handler.completePrompt("Test prompt") + expect(result).toBe(responseText) + + const callArgs = mockLanguageModelChat.sendRequest.mock.calls[0] + const requestOptions = callArgs[1] + expect(requestOptions.modelOptions).toBeUndefined() + }) }) }) diff --git a/src/api/providers/vscode-lm.ts b/src/api/providers/vscode-lm.ts index 8fb564a9d59..670ad6ac717 100644 --- a/src/api/providers/vscode-lm.ts +++ b/src/api/providers/vscode-lm.ts @@ -14,6 +14,40 @@ import { convertToVsCodeLmMessages, extractTextCountFromMessage } from "../trans import { BaseProvider } from "./base-provider" import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" +/** + * The vendor string used by GitHub Copilot models in the VS Code LM API. + */ +const COPILOT_VENDOR = "copilot" + +/** + * Checks whether the given model client is backed by GitHub Copilot. + * + * @param client - A VS Code LanguageModelChat instance (may be null during init). + * @returns `true` when the model's vendor matches the Copilot vendor string. + */ +function isCopilotModel(client: vscode.LanguageModelChat | null): boolean { + return client?.vendor?.toLowerCase() === COPILOT_VENDOR +} + +/** + * Builds model-specific options for GitHub Copilot requests. + * + * These options are forwarded by the VS Code Copilot Chat extension to the + * Copilot backend and help it properly categorise requests that originate + * from agent/extension activity (as opposed to direct user chat). + * + * Requires VS Code >= 1.96 where `modelOptions` on + * `LanguageModelChatRequestOptions` became stable. On older versions the + * extra property is silently ignored. + * + * @see https://github.com/RooCodeInc/Roo-Code/issues/11289 + */ +function buildCopilotModelOptions(): Record { + return { + "copilot-integration-id": "roo-code", + } +} + /** * Converts OpenAI-format tools to VSCode Language Model tools. * Normalizes the JSON Schema to draft 2020-12 compliant format required by @@ -393,11 +427,17 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan let accumulatedText: string = "" try { - // Create the response stream with required options + // Create the response stream with required options. + // When the model is backed by GitHub Copilot we attach + // modelOptions so that the Copilot backend can properly + // categorise agent-initiated requests for billing purposes. + // `modelOptions` requires VS Code >= 1.96; older versions + // silently ignore the extra property. const requestOptions: vscode.LanguageModelChatRequestOptions = { justification: `Roo Code would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`, tools: convertToVsCodeLmTools(metadata?.tools ?? []), - } + ...(isCopilotModel(client) ? { modelOptions: buildCopilotModelOptions() } : {}), + } as vscode.LanguageModelChatRequestOptions const response: vscode.LanguageModelChatResponse = await client.sendRequest( vsCodeLmMessages, @@ -565,9 +605,12 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan async completePrompt(prompt: string): Promise { try { const client = await this.getClient() + const requestOptions = { + ...(isCopilotModel(client) ? { modelOptions: buildCopilotModelOptions() } : {}), + } as vscode.LanguageModelChatRequestOptions const response = await client.sendRequest( [vscode.LanguageModelChatMessage.User(prompt)], - {}, + requestOptions, new vscode.CancellationTokenSource().token, ) let result = ""