Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
105 changes: 105 additions & 0 deletions src/api/providers/__tests__/vscode-lm.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,63 @@ describe("VsCodeLmHandler", () => {

await expect(handler.createMessage(systemPrompt, messages).next()).rejects.toThrow("API Error")
})

it("should include modelOptions when model vendor is copilot", async () => {
// Create a Copilot-vendor mock model
const copilotModel = {
...mockLanguageModelChat,
vendor: "copilot",
name: "GPT-4o",
}
handler["client"] = copilotModel as any

const systemPrompt = "You are a helpful assistant"
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user" as const, content: "Hello" }]

copilotModel.sendRequest.mockResolvedValueOnce({
stream: (async function* () {
yield new vscode.LanguageModelTextPart("Hi there")
})(),
})
copilotModel.countTokens.mockResolvedValue(5)

const stream = handler.createMessage(systemPrompt, messages)
const chunks = []
for await (const chunk of stream) {
chunks.push(chunk)
}

expect(copilotModel.sendRequest).toHaveBeenCalledWith(
expect.any(Array),
expect.objectContaining({
modelOptions: { "copilot-integration-id": "roo-code" },
}),
expect.anything(),
)
})

it("should NOT include modelOptions when model vendor is not copilot", async () => {
const systemPrompt = "You are a helpful assistant"
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user" as const, content: "Hello" }]

mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
stream: (async function* () {
yield new vscode.LanguageModelTextPart("Hi there")
})(),
})
mockLanguageModelChat.countTokens.mockResolvedValue(5)

const stream = handler.createMessage(systemPrompt, messages)
const chunks = []
for await (const chunk of stream) {
chunks.push(chunk)
}

// sendRequest should have been called without modelOptions
const callArgs = mockLanguageModelChat.sendRequest.mock.calls[0]
const requestOptions = callArgs[1]
expect(requestOptions.modelOptions).toBeUndefined()
})
})

describe("getModel", () => {
Expand Down Expand Up @@ -535,5 +592,53 @@ describe("VsCodeLmHandler", () => {
const promise = handler.completePrompt("Test prompt")
await expect(promise).rejects.toThrow("VSCode LM completion error: Completion failed")
})

it("should include modelOptions in completePrompt when model vendor is copilot", async () => {
const copilotModel = {
...mockLanguageModelChat,
vendor: "copilot",
name: "GPT-4o",
sendRequest: vi.fn(),
countTokens: vi.fn(),
}
handler["client"] = copilotModel as any

const responseText = "Completed text"
copilotModel.sendRequest.mockResolvedValueOnce({
stream: (async function* () {
yield new vscode.LanguageModelTextPart(responseText)
return
})(),
})

const result = await handler.completePrompt("Test prompt")
expect(result).toBe(responseText)
expect(copilotModel.sendRequest).toHaveBeenCalledWith(
expect.any(Array),
expect.objectContaining({
modelOptions: { "copilot-integration-id": "roo-code" },
}),
expect.anything(),
)
})

it("should NOT include modelOptions in completePrompt when model vendor is not copilot", async () => {
handler["client"] = mockLanguageModelChat

const responseText = "Completed text"
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
stream: (async function* () {
yield new vscode.LanguageModelTextPart(responseText)
return
})(),
})

const result = await handler.completePrompt("Test prompt")
expect(result).toBe(responseText)

const callArgs = mockLanguageModelChat.sendRequest.mock.calls[0]
const requestOptions = callArgs[1]
expect(requestOptions.modelOptions).toBeUndefined()
})
})
})
49 changes: 46 additions & 3 deletions src/api/providers/vscode-lm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,40 @@ import { convertToVsCodeLmMessages, extractTextCountFromMessage } from "../trans
import { BaseProvider } from "./base-provider"
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"

/**
* The vendor string used by GitHub Copilot models in the VS Code LM API.
*/
const COPILOT_VENDOR = "copilot"

/**
* Checks whether the given model client is backed by GitHub Copilot.
*
* @param client - A VS Code LanguageModelChat instance (may be null during init).
* @returns `true` when the model's vendor matches the Copilot vendor string.
*/
function isCopilotModel(client: vscode.LanguageModelChat | null): boolean {
return client?.vendor?.toLowerCase() === COPILOT_VENDOR
}

/**
* Builds model-specific options for GitHub Copilot requests.
*
* These options are forwarded by the VS Code Copilot Chat extension to the
* Copilot backend and help it properly categorise requests that originate
* from agent/extension activity (as opposed to direct user chat).
*
* Requires VS Code >= 1.96 where `modelOptions` on
* `LanguageModelChatRequestOptions` became stable. On older versions the
* extra property is silently ignored.
*
* @see https://github.com/RooCodeInc/Roo-Code/issues/11289
*/
function buildCopilotModelOptions(): Record<string, unknown> {
return {
"copilot-integration-id": "roo-code",
}
}

/**
* Converts OpenAI-format tools to VSCode Language Model tools.
* Normalizes the JSON Schema to draft 2020-12 compliant format required by
Expand Down Expand Up @@ -393,11 +427,17 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
let accumulatedText: string = ""

try {
// Create the response stream with required options
// Create the response stream with required options.
// When the model is backed by GitHub Copilot we attach
// modelOptions so that the Copilot backend can properly
// categorise agent-initiated requests for billing purposes.
// `modelOptions` requires VS Code >= 1.96; older versions
// silently ignore the extra property.
const requestOptions: vscode.LanguageModelChatRequestOptions = {
justification: `Roo Code would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`,
tools: convertToVsCodeLmTools(metadata?.tools ?? []),
}
...(isCopilotModel(client) ? { modelOptions: buildCopilotModelOptions() } : {}),
} as vscode.LanguageModelChatRequestOptions
Comment on lines 436 to +440
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[nit] The as vscode.LanguageModelChatRequestOptions assertion is unnecessary here (and at line 610 in completePrompt). The resolved @types/vscode@1.100.0 already includes modelOptions in LanguageModelChatRequestOptions, so the object literal is assignable without a cast. In createMessage the variable already carries the same type annotation on the left-hand side, making the assertion doubly redundant. Dropping the as casts lets TypeScript catch property-name typos in the object literal.

Suggested change
const requestOptions: vscode.LanguageModelChatRequestOptions = {
justification: `Roo Code would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`,
tools: convertToVsCodeLmTools(metadata?.tools ?? []),
}
...(isCopilotModel(client) ? { modelOptions: buildCopilotModelOptions() } : {}),
} as vscode.LanguageModelChatRequestOptions
const requestOptions: vscode.LanguageModelChatRequestOptions = {
justification: `Roo Code would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`,
tools: convertToVsCodeLmTools(metadata?.tools ?? []),
...(isCopilotModel(client) ? { modelOptions: buildCopilotModelOptions() } : {}),
}

Fix it with Roo Code or mention @roomote and request a fix.


const response: vscode.LanguageModelChatResponse = await client.sendRequest(
vsCodeLmMessages,
Expand Down Expand Up @@ -565,9 +605,12 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
async completePrompt(prompt: string): Promise<string> {
try {
const client = await this.getClient()
const requestOptions = {
...(isCopilotModel(client) ? { modelOptions: buildCopilotModelOptions() } : {}),
} as vscode.LanguageModelChatRequestOptions
const response = await client.sendRequest(
[vscode.LanguageModelChatMessage.User(prompt)],
{},
requestOptions,
new vscode.CancellationTokenSource().token,
)
let result = ""
Expand Down
Loading