Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 80 additions & 0 deletions src/api/providers/__tests__/gemini.spec.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
// npx vitest run src/api/providers/__tests__/gemini.spec.ts

import { NoOutputGeneratedError } from "ai"

const mockCaptureException = vitest.fn()

vitest.mock("@roo-code/telemetry", () => ({
Expand Down Expand Up @@ -149,6 +151,84 @@ describe("GeminiHandler", () => {
)
})

it("should yield informative message when stream produces no text content", async () => {
// Stream with only reasoning (no text-delta) simulates thinking-only response
const mockFullStream = (async function* () {
yield { type: "reasoning-delta", id: "1", text: "thinking..." }
})()

mockStreamText.mockReturnValue({
fullStream: mockFullStream,
usage: Promise.resolve({ inputTokens: 10, outputTokens: 0 }),
providerMetadata: Promise.resolve({}),
})

const stream = handler.createMessage(systemPrompt, mockMessages)
const chunks = []

for await (const chunk of stream) {
chunks.push(chunk)
}

// Should have: reasoning chunk, empty-stream informative message, usage
const textChunks = chunks.filter((c) => c.type === "text")
expect(textChunks).toHaveLength(1)
expect(textChunks[0]).toEqual({
type: "text",
text: "Model returned an empty response. This may be caused by an unsupported thinking configuration or content filtering.",
})
})

it("should suppress NoOutputGeneratedError when no text content was yielded", async () => {
// Empty stream - nothing yielded at all
const mockFullStream = (async function* () {
// empty stream
})()

mockStreamText.mockReturnValue({
fullStream: mockFullStream,
usage: Promise.reject(new NoOutputGeneratedError({ message: "No output generated." })),
providerMetadata: Promise.resolve({}),
})

const stream = handler.createMessage(systemPrompt, mockMessages)
const chunks = []

// Should NOT throw - the error is suppressed
for await (const chunk of stream) {
chunks.push(chunk)
}

// Should have the informative empty-stream message only (no usage since it errored)
const textChunks = chunks.filter((c) => c.type === "text")
expect(textChunks).toHaveLength(1)
expect(textChunks[0]).toMatchObject({
type: "text",
text: expect.stringContaining("empty response"),
})
})

it("should re-throw NoOutputGeneratedError when text content was yielded", async () => {
// Stream yields text content but usage still throws NoOutputGeneratedError (unexpected)
const mockFullStream = (async function* () {
yield { type: "text-delta", text: "Hello" }
})()

mockStreamText.mockReturnValue({
fullStream: mockFullStream,
usage: Promise.reject(new NoOutputGeneratedError({ message: "No output generated." })),
providerMetadata: Promise.resolve({}),
})

const stream = handler.createMessage(systemPrompt, mockMessages)

await expect(async () => {
for await (const _chunk of stream) {
// consume stream
}
}).rejects.toThrow()
})

it("should handle API errors", async () => {
const mockError = new Error("Gemini API error")
// eslint-disable-next-line require-yield
Expand Down
37 changes: 33 additions & 4 deletions src/api/providers/gemini.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import type { Anthropic } from "@anthropic-ai/sdk"
import { createGoogleGenerativeAI, type GoogleGenerativeAIProvider } from "@ai-sdk/google"
import { streamText, generateText, ToolSet } from "ai"
import { streamText, generateText, NoOutputGeneratedError, ToolSet } from "ai"

import {
type ModelInfo,
Expand Down Expand Up @@ -131,6 +131,9 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
// Use streamText for streaming responses
const result = streamText(requestOptions)

// Track whether any text content was yielded (not just reasoning/thinking)
let hasContent = false

// Process the full stream to get all events including reasoning
for await (const part of result.fullStream) {
// Capture thoughtSignature from tool-call events (Gemini 3 thought signatures)
Expand All @@ -143,10 +146,22 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
}

for (const chunk of processAiSdkStreamPart(part)) {
if (chunk.type === "text" || chunk.type === "tool_call_start") {
hasContent = true
}
yield chunk
}
}

// If the stream completed without yielding any text content, inform the user
// TODO: Move to i18n key common:errors.gemini.empty_response once translation pipeline is updated
if (!hasContent) {
yield {
type: "text" as const,
text: "Model returned an empty response. This may be caused by an unsupported thinking configuration or content filtering.",
}
}

// Extract grounding sources from providerMetadata if available
const providerMetadata = await result.providerMetadata
const groundingMetadata = providerMetadata?.google as
Expand All @@ -167,9 +182,23 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
}

// Yield usage metrics at the end
const usage = await result.usage
if (usage) {
yield this.processUsageMetrics(usage, info, providerMetadata)
// Wrap in try-catch to handle NoOutputGeneratedError thrown by the AI SDK
// when the stream produces no output (e.g., thinking-only, safety block)
try {
const usage = await result.usage
if (usage) {
yield this.processUsageMetrics(usage, info, providerMetadata)
}
} catch (usageError) {
if (usageError instanceof NoOutputGeneratedError) {
// If we already yielded the empty-stream message, suppress this error
if (hasContent) {
throw usageError
}
// Otherwise the informative message was already yielded above — no-op
} else {
throw usageError
}
}
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error)
Expand Down
123 changes: 123 additions & 0 deletions src/api/transform/__tests__/reasoning.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -765,6 +765,7 @@ describe("reasoning.ts", () => {
}

const result = getGeminiReasoning(options)
// "none" is not a valid GeminiThinkingLevel, so no fallback — returns undefined
expect(result).toBeUndefined()
})

Expand Down Expand Up @@ -838,6 +839,128 @@ describe("reasoning.ts", () => {
const result = getGeminiReasoning(options) as GeminiReasoningParams | undefined
expect(result).toEqual({ thinkingLevel: "medium", includeThoughts: true })
})

it("should fall back to model default when settings effort is not in supportsReasoningEffort array", () => {
// Simulates gemini-3-pro-preview which only supports ["low", "high"]
// but user has reasoningEffort: "medium" from a different model
const geminiModel: ModelInfo = {
...baseModel,
supportsReasoningEffort: ["low", "high"] as ModelInfo["supportsReasoningEffort"],
reasoningEffort: "low",
}

const settings: ProviderSettings = {
apiProvider: "gemini",
reasoningEffort: "medium",
}

const options: GetModelReasoningOptions = {
model: geminiModel,
reasoningBudget: undefined,
reasoningEffort: "medium",
settings,
}

const result = getGeminiReasoning(options) as GeminiReasoningParams | undefined
// "medium" is not in ["low", "high"], so falls back to model.reasoningEffort "low"
expect(result).toEqual({ thinkingLevel: "low", includeThoughts: true })
})

it("should return undefined when unsupported effort and model default is also invalid", () => {
const geminiModel: ModelInfo = {
...baseModel,
supportsReasoningEffort: ["low", "high"] as ModelInfo["supportsReasoningEffort"],
// No reasoningEffort default set
}

const settings: ProviderSettings = {
apiProvider: "gemini",
reasoningEffort: "medium",
}

const options: GetModelReasoningOptions = {
model: geminiModel,
reasoningBudget: undefined,
reasoningEffort: "medium",
settings,
}

const result = getGeminiReasoning(options)
// "medium" is not in ["low", "high"], fallback is undefined → returns undefined
expect(result).toBeUndefined()
})

it("should pass through effort that IS in the supportsReasoningEffort array", () => {
const geminiModel: ModelInfo = {
...baseModel,
supportsReasoningEffort: ["low", "high"] as ModelInfo["supportsReasoningEffort"],
reasoningEffort: "low",
}

const settings: ProviderSettings = {
apiProvider: "gemini",
reasoningEffort: "high",
}

const options: GetModelReasoningOptions = {
model: geminiModel,
reasoningBudget: undefined,
reasoningEffort: "high",
settings,
}

const result = getGeminiReasoning(options) as GeminiReasoningParams | undefined
// "high" IS in ["low", "high"], so it should be used directly
expect(result).toEqual({ thinkingLevel: "high", includeThoughts: true })
})

it("should skip validation when supportsReasoningEffort is boolean (not array)", () => {
const geminiModel: ModelInfo = {
...baseModel,
supportsReasoningEffort: true,
reasoningEffort: "low",
}

const settings: ProviderSettings = {
apiProvider: "gemini",
reasoningEffort: "medium",
}

const options: GetModelReasoningOptions = {
model: geminiModel,
reasoningBudget: undefined,
reasoningEffort: "medium",
settings,
}

const result = getGeminiReasoning(options) as GeminiReasoningParams | undefined
// boolean supportsReasoningEffort should not trigger array validation
expect(result).toEqual({ thinkingLevel: "medium", includeThoughts: true })
})

it("should fall back to model default when settings has 'minimal' but model only supports ['low', 'high']", () => {
const geminiModel: ModelInfo = {
...baseModel,
supportsReasoningEffort: ["low", "high"] as ModelInfo["supportsReasoningEffort"],
reasoningEffort: "low",
}

const settings: ProviderSettings = {
apiProvider: "gemini",
reasoningEffort: "minimal",
}

const options: GetModelReasoningOptions = {
model: geminiModel,
reasoningBudget: undefined,
reasoningEffort: "minimal",
settings,
}

const result = getGeminiReasoning(options) as GeminiReasoningParams | undefined
// "minimal" is not in ["low", "high"], falls back to "low"
expect(result).toEqual({ thinkingLevel: "low", includeThoughts: true })
})
})

describe("Integration scenarios", () => {
Expand Down
14 changes: 12 additions & 2 deletions src/api/transform/reasoning.ts
Original file line number Diff line number Diff line change
Expand Up @@ -150,10 +150,20 @@ export const getGeminiReasoning = ({
return undefined
}

// Validate that the selected effort is supported by this specific model.
// e.g. gemini-3-pro-preview only supports ["low", "high"] — sending
// "medium" (carried over from a different model's settings) causes errors.
const effortToUse =
Array.isArray(model.supportsReasoningEffort) &&
isGeminiThinkingLevel(selectedEffort) &&
!model.supportsReasoningEffort.includes(selectedEffort)
? model.reasoningEffort
: selectedEffort

// Effort-based models on Google GenAI support minimal/low/medium/high levels.
if (!isGeminiThinkingLevel(selectedEffort)) {
if (!effortToUse || !isGeminiThinkingLevel(effortToUse)) {
return undefined
}

return { thinkingLevel: selectedEffort, includeThoughts: true }
return { thinkingLevel: effortToUse, includeThoughts: true }
}
Loading