From e8158b4286118e52dd745a069892279c01e14f59 Mon Sep 17 00:00:00 2001 From: Hannes Rudolph Date: Sat, 7 Feb 2026 16:28:18 -0700 Subject: [PATCH 1/9] refactor: remove 9 low-usage providers (Phase 0) Remove Cerebras, Chutes, DeepInfra, Doubao, Featherless, Groq, Hugging Face, IO Intelligence, and Unbound providers from the codebase. Each provider removal includes: handler, tests, model definitions, type schemas, UI settings components, fetchers, i18n references, and all wiring in shared registration/config files. - Delete 42 provider-specific files (handlers, tests, fetchers, UI components) - Remove @ai-sdk/cerebras and @ai-sdk/groq npm dependencies - Clean provider references from 68 shared files across src/, packages/types/, webview-ui/, and apps/cli/ - Remove ~490 dead i18n translation keys across 36 locale files - Add docs/ai-sdk-migration-guide.md with updated migration status - All TypeScript checks pass, 6505 tests pass with 0 failures --- apps/cli/src/lib/utils/context-window.ts | 10 +- apps/web-evals/package.json | 2 +- packages/types/src/global-settings.ts | 9 - packages/types/src/provider-settings.ts | 117 +--- packages/types/src/providers/cerebras.ts | 58 -- packages/types/src/providers/chutes.ts | 421 ------------- packages/types/src/providers/deepinfra.ts | 14 - packages/types/src/providers/doubao.ts | 44 -- packages/types/src/providers/featherless.ts | 58 -- packages/types/src/providers/groq.ts | 84 --- packages/types/src/providers/huggingface.ts | 17 - packages/types/src/providers/index.ts | 35 -- .../types/src/providers/io-intelligence.ts | 44 -- packages/types/src/providers/unbound.ts | 14 - packages/types/src/vscode-extension-host.ts | 19 - pnpm-lock.yaml | 41 +- src/api/index.ts | 30 - src/api/providers/__tests__/cerebras.spec.ts | 455 -------------- src/api/providers/__tests__/chutes.spec.ts | 490 --------------- src/api/providers/__tests__/deepinfra.spec.ts | 386 ------------ .../providers/__tests__/featherless.spec.ts | 356 ----------- src/api/providers/__tests__/groq.spec.ts | 578 ------------------ .../providers/__tests__/huggingface.spec.ts | 553 ----------------- .../__tests__/io-intelligence.spec.ts | 197 ------ src/api/providers/__tests__/unbound.spec.ts | 549 ----------------- src/api/providers/cerebras.ts | 169 ----- src/api/providers/chutes.ts | 242 -------- src/api/providers/deepinfra.ts | 164 ----- src/api/providers/doubao.ts | 87 --- src/api/providers/featherless.ts | 140 ----- .../fetchers/__tests__/chutes.spec.ts | 342 ----------- .../fetchers/__tests__/modelCache.spec.ts | 42 -- src/api/providers/fetchers/chutes.ts | 89 --- src/api/providers/fetchers/deepinfra.ts | 71 --- src/api/providers/fetchers/huggingface.ts | 252 -------- src/api/providers/fetchers/io-intelligence.ts | 158 ----- src/api/providers/fetchers/modelCache.ts | 22 - src/api/providers/fetchers/unbound.ts | 52 -- src/api/providers/groq.ts | 181 ------ src/api/providers/huggingface.ts | 215 ------- src/api/providers/index.ts | 9 - src/api/providers/io-intelligence.ts | 62 -- src/api/providers/unbound.ts | 208 ------- src/api/transform/__tests__/ai-sdk.spec.ts | 6 +- .../__tests__/context-error-handling.test.ts | 38 -- .../context-error-handling.ts | 21 +- .../webview/__tests__/ClineProvider.spec.ts | 46 -- ...webviewMessageHandler.routerModels.spec.ts | 6 - .../__tests__/webviewMessageHandler.spec.ts | 63 -- src/core/webview/webviewMessageHandler.ts | 41 -- src/i18n/locales/ca/common.json | 14 +- src/i18n/locales/de/common.json | 14 +- src/i18n/locales/en/common.json | 9 - src/i18n/locales/es/common.json | 14 +- src/i18n/locales/fr/common.json | 14 +- src/i18n/locales/hi/common.json | 14 +- src/i18n/locales/id/common.json | 14 +- src/i18n/locales/it/common.json | 14 +- src/i18n/locales/ja/common.json | 14 +- src/i18n/locales/ko/common.json | 14 +- src/i18n/locales/nl/common.json | 14 +- src/i18n/locales/pl/common.json | 14 +- src/i18n/locales/pt-BR/common.json | 14 +- src/i18n/locales/ru/common.json | 14 +- src/i18n/locales/tr/common.json | 14 +- src/i18n/locales/vi/common.json | 14 +- src/i18n/locales/zh-CN/common.json | 14 +- src/i18n/locales/zh-TW/common.json | 14 +- src/package.json | 2 - src/shared/ProfileValidator.ts | 9 - src/shared/__tests__/ProfileValidator.spec.ts | 34 -- .../__tests__/checkExistApiConfig.spec.ts | 1 - src/shared/api.ts | 5 - .../src/components/settings/ApiOptions.tsx | 103 +--- .../src/components/settings/ModelPicker.tsx | 3 - .../ApiOptions.provider-filtering.spec.tsx | 2 - .../src/components/settings/constants.ts | 17 - .../settings/providers/Cerebras.tsx | 50 -- .../components/settings/providers/Chutes.tsx | 76 --- .../settings/providers/DeepInfra.tsx | 100 --- .../components/settings/providers/Doubao.tsx | 53 -- .../settings/providers/Featherless.tsx | 50 -- .../components/settings/providers/Groq.tsx | 50 -- .../settings/providers/HuggingFace.tsx | 277 --------- .../settings/providers/IOIntelligence.tsx | 80 --- .../components/settings/providers/Unbound.tsx | 197 ------ .../providers/__tests__/HuggingFace.spec.tsx | 300 --------- .../components/settings/providers/index.ts | 9 - .../settings/utils/providerModelConfig.ts | 17 - .../hooks/__tests__/useSelectedModel.spec.ts | 22 +- .../components/ui/hooks/useSelectedModel.ts | 60 -- webview-ui/src/i18n/locales/ca/settings.json | 30 - webview-ui/src/i18n/locales/de/settings.json | 30 - webview-ui/src/i18n/locales/en/settings.json | 30 - webview-ui/src/i18n/locales/es/settings.json | 30 - webview-ui/src/i18n/locales/fr/settings.json | 30 - webview-ui/src/i18n/locales/hi/settings.json | 30 - webview-ui/src/i18n/locales/id/settings.json | 30 - webview-ui/src/i18n/locales/it/settings.json | 30 - webview-ui/src/i18n/locales/ja/settings.json | 30 - webview-ui/src/i18n/locales/ko/settings.json | 30 - webview-ui/src/i18n/locales/nl/settings.json | 30 - webview-ui/src/i18n/locales/pl/settings.json | 30 - .../src/i18n/locales/pt-BR/settings.json | 30 - webview-ui/src/i18n/locales/ru/settings.json | 30 - webview-ui/src/i18n/locales/tr/settings.json | 30 - webview-ui/src/i18n/locales/vi/settings.json | 30 - .../src/i18n/locales/zh-CN/settings.json | 30 - .../src/i18n/locales/zh-TW/settings.json | 30 - .../src/utils/__tests__/validate.spec.ts | 5 - webview-ui/src/utils/validate.ts | 33 - 111 files changed, 32 insertions(+), 9617 deletions(-) delete mode 100644 packages/types/src/providers/cerebras.ts delete mode 100644 packages/types/src/providers/chutes.ts delete mode 100644 packages/types/src/providers/deepinfra.ts delete mode 100644 packages/types/src/providers/doubao.ts delete mode 100644 packages/types/src/providers/featherless.ts delete mode 100644 packages/types/src/providers/groq.ts delete mode 100644 packages/types/src/providers/huggingface.ts delete mode 100644 packages/types/src/providers/io-intelligence.ts delete mode 100644 packages/types/src/providers/unbound.ts delete mode 100644 src/api/providers/__tests__/cerebras.spec.ts delete mode 100644 src/api/providers/__tests__/chutes.spec.ts delete mode 100644 src/api/providers/__tests__/deepinfra.spec.ts delete mode 100644 src/api/providers/__tests__/featherless.spec.ts delete mode 100644 src/api/providers/__tests__/groq.spec.ts delete mode 100644 src/api/providers/__tests__/huggingface.spec.ts delete mode 100644 src/api/providers/__tests__/io-intelligence.spec.ts delete mode 100644 src/api/providers/__tests__/unbound.spec.ts delete mode 100644 src/api/providers/cerebras.ts delete mode 100644 src/api/providers/chutes.ts delete mode 100644 src/api/providers/deepinfra.ts delete mode 100644 src/api/providers/doubao.ts delete mode 100644 src/api/providers/featherless.ts delete mode 100644 src/api/providers/fetchers/__tests__/chutes.spec.ts delete mode 100644 src/api/providers/fetchers/chutes.ts delete mode 100644 src/api/providers/fetchers/deepinfra.ts delete mode 100644 src/api/providers/fetchers/huggingface.ts delete mode 100644 src/api/providers/fetchers/io-intelligence.ts delete mode 100644 src/api/providers/fetchers/unbound.ts delete mode 100644 src/api/providers/groq.ts delete mode 100644 src/api/providers/huggingface.ts delete mode 100644 src/api/providers/io-intelligence.ts delete mode 100644 src/api/providers/unbound.ts delete mode 100644 webview-ui/src/components/settings/providers/Cerebras.tsx delete mode 100644 webview-ui/src/components/settings/providers/Chutes.tsx delete mode 100644 webview-ui/src/components/settings/providers/DeepInfra.tsx delete mode 100644 webview-ui/src/components/settings/providers/Doubao.tsx delete mode 100644 webview-ui/src/components/settings/providers/Featherless.tsx delete mode 100644 webview-ui/src/components/settings/providers/Groq.tsx delete mode 100644 webview-ui/src/components/settings/providers/HuggingFace.tsx delete mode 100644 webview-ui/src/components/settings/providers/IOIntelligence.tsx delete mode 100644 webview-ui/src/components/settings/providers/Unbound.tsx delete mode 100644 webview-ui/src/components/settings/providers/__tests__/HuggingFace.spec.tsx diff --git a/apps/cli/src/lib/utils/context-window.ts b/apps/cli/src/lib/utils/context-window.ts index c1224c8b1ec..df878e16b02 100644 --- a/apps/cli/src/lib/utils/context-window.ts +++ b/apps/cli/src/lib/utils/context-window.ts @@ -48,18 +48,10 @@ function getModelIdForProvider(config: ProviderSettings): string | undefined { return config.requestyModelId case "litellm": return config.litellmModelId - case "deepinfra": - return config.deepInfraModelId - case "huggingface": - return config.huggingFaceModelId - case "unbound": - return config.unboundModelId case "vercel-ai-gateway": return config.vercelAiGatewayModelId - case "io-intelligence": - return config.ioIntelligenceModelId default: - // For anthropic, bedrock, vertex, gemini, xai, groq, etc. + // For anthropic, bedrock, vertex, gemini, xai, etc. return config.apiModelId } } diff --git a/apps/web-evals/package.json b/apps/web-evals/package.json index 0a721bf36cf..83d69edd592 100644 --- a/apps/web-evals/package.json +++ b/apps/web-evals/package.json @@ -27,7 +27,7 @@ "@radix-ui/react-tabs": "^1.1.3", "@radix-ui/react-tooltip": "^1.2.8", "@roo-code/evals": "workspace:^", - "@roo-code/types": "^1.108.0", + "@roo-code/types": "workspace:^", "@tanstack/react-query": "^5.69.0", "archiver": "^7.0.1", "class-variance-authority": "^0.7.1", diff --git a/packages/types/src/global-settings.ts b/packages/types/src/global-settings.ts index fce48cfb5d5..44cf95a63ed 100644 --- a/packages/types/src/global-settings.ts +++ b/packages/types/src/global-settings.ts @@ -267,19 +267,13 @@ export const SECRET_STATE_KEYS = [ "ollamaApiKey", "geminiApiKey", "openAiNativeApiKey", - "cerebrasApiKey", "deepSeekApiKey", - "doubaoApiKey", "moonshotApiKey", "mistralApiKey", "minimaxApiKey", - "unboundApiKey", "requestyApiKey", "xaiApiKey", - "groqApiKey", - "chutesApiKey", "litellmApiKey", - "deepInfraApiKey", "codeIndexOpenAiKey", "codeIndexQdrantApiKey", "codebaseIndexOpenAiCompatibleApiKey", @@ -287,12 +281,9 @@ export const SECRET_STATE_KEYS = [ "codebaseIndexMistralApiKey", "codebaseIndexVercelAiGatewayApiKey", "codebaseIndexOpenRouterApiKey", - "huggingFaceApiKey", "sambaNovaApiKey", "zaiApiKey", "fireworksApiKey", - "featherlessApiKey", - "ioIntelligenceApiKey", "vercelAiGatewayApiKey", "basetenApiKey", ] as const diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 555513500b8..31064be09b5 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -6,14 +6,9 @@ import { anthropicModels, basetenModels, bedrockModels, - cerebrasModels, deepSeekModels, - doubaoModels, - featherlessModels, fireworksModels, geminiModels, - groqModels, - ioIntelligenceModels, mistralModels, moonshotModels, openAiCodexModels, @@ -39,18 +34,7 @@ export const DEFAULT_CONSECUTIVE_MISTAKE_LIMIT = 3 * Dynamic provider requires external API calls in order to get the model list. */ -export const dynamicProviders = [ - "openrouter", - "vercel-ai-gateway", - "huggingface", - "litellm", - "deepinfra", - "io-intelligence", - "requesty", - "unbound", - "roo", - "chutes", -] as const +export const dynamicProviders = ["openrouter", "vercel-ai-gateway", "litellm", "requesty", "roo"] as const export type DynamicProvider = (typeof dynamicProviders)[number] @@ -121,14 +105,10 @@ export const providerNames = [ "anthropic", "bedrock", "baseten", - "cerebras", - "doubao", "deepseek", - "featherless", "fireworks", "gemini", "gemini-cli", - "groq", "mistral", "moonshot", "minimax", @@ -300,17 +280,6 @@ const deepSeekSchema = apiModelIdProviderModelSchema.extend({ deepSeekApiKey: z.string().optional(), }) -const deepInfraSchema = apiModelIdProviderModelSchema.extend({ - deepInfraBaseUrl: z.string().optional(), - deepInfraApiKey: z.string().optional(), - deepInfraModelId: z.string().optional(), -}) - -const doubaoSchema = apiModelIdProviderModelSchema.extend({ - doubaoBaseUrl: z.string().optional(), - doubaoApiKey: z.string().optional(), -}) - const moonshotSchema = apiModelIdProviderModelSchema.extend({ moonshotBaseUrl: z .union([z.literal("https://api.moonshot.ai/v1"), z.literal("https://api.moonshot.cn/v1")]) @@ -325,11 +294,6 @@ const minimaxSchema = apiModelIdProviderModelSchema.extend({ minimaxApiKey: z.string().optional(), }) -const unboundSchema = baseProviderSettingsSchema.extend({ - unboundApiKey: z.string().optional(), - unboundModelId: z.string().optional(), -}) - const requestySchema = baseProviderSettingsSchema.extend({ requestyBaseUrl: z.string().optional(), requestyApiKey: z.string().optional(), @@ -344,20 +308,6 @@ const xaiSchema = apiModelIdProviderModelSchema.extend({ xaiApiKey: z.string().optional(), }) -const groqSchema = apiModelIdProviderModelSchema.extend({ - groqApiKey: z.string().optional(), -}) - -const huggingFaceSchema = baseProviderSettingsSchema.extend({ - huggingFaceApiKey: z.string().optional(), - huggingFaceModelId: z.string().optional(), - huggingFaceInferenceProvider: z.string().optional(), -}) - -const chutesSchema = apiModelIdProviderModelSchema.extend({ - chutesApiKey: z.string().optional(), -}) - const litellmSchema = baseProviderSettingsSchema.extend({ litellmBaseUrl: z.string().optional(), litellmApiKey: z.string().optional(), @@ -365,10 +315,6 @@ const litellmSchema = baseProviderSettingsSchema.extend({ litellmUsePromptCache: z.boolean().optional(), }) -const cerebrasSchema = apiModelIdProviderModelSchema.extend({ - cerebrasApiKey: z.string().optional(), -}) - const sambaNovaSchema = apiModelIdProviderModelSchema.extend({ sambaNovaApiKey: z.string().optional(), }) @@ -386,15 +332,6 @@ const fireworksSchema = apiModelIdProviderModelSchema.extend({ fireworksApiKey: z.string().optional(), }) -const featherlessSchema = apiModelIdProviderModelSchema.extend({ - featherlessApiKey: z.string().optional(), -}) - -const ioIntelligenceSchema = apiModelIdProviderModelSchema.extend({ - ioIntelligenceModelId: z.string().optional(), - ioIntelligenceApiKey: z.string().optional(), -}) - const qwenCodeSchema = apiModelIdProviderModelSchema.extend({ qwenCodeOauthPath: z.string().optional(), }) @@ -432,25 +369,16 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv openAiNativeSchema.merge(z.object({ apiProvider: z.literal("openai-native") })), mistralSchema.merge(z.object({ apiProvider: z.literal("mistral") })), deepSeekSchema.merge(z.object({ apiProvider: z.literal("deepseek") })), - deepInfraSchema.merge(z.object({ apiProvider: z.literal("deepinfra") })), - doubaoSchema.merge(z.object({ apiProvider: z.literal("doubao") })), moonshotSchema.merge(z.object({ apiProvider: z.literal("moonshot") })), minimaxSchema.merge(z.object({ apiProvider: z.literal("minimax") })), - unboundSchema.merge(z.object({ apiProvider: z.literal("unbound") })), requestySchema.merge(z.object({ apiProvider: z.literal("requesty") })), fakeAiSchema.merge(z.object({ apiProvider: z.literal("fake-ai") })), xaiSchema.merge(z.object({ apiProvider: z.literal("xai") })), - groqSchema.merge(z.object({ apiProvider: z.literal("groq") })), basetenSchema.merge(z.object({ apiProvider: z.literal("baseten") })), - huggingFaceSchema.merge(z.object({ apiProvider: z.literal("huggingface") })), - chutesSchema.merge(z.object({ apiProvider: z.literal("chutes") })), litellmSchema.merge(z.object({ apiProvider: z.literal("litellm") })), - cerebrasSchema.merge(z.object({ apiProvider: z.literal("cerebras") })), sambaNovaSchema.merge(z.object({ apiProvider: z.literal("sambanova") })), zaiSchema.merge(z.object({ apiProvider: z.literal("zai") })), fireworksSchema.merge(z.object({ apiProvider: z.literal("fireworks") })), - featherlessSchema.merge(z.object({ apiProvider: z.literal("featherless") })), - ioIntelligenceSchema.merge(z.object({ apiProvider: z.literal("io-intelligence") })), qwenCodeSchema.merge(z.object({ apiProvider: z.literal("qwen-code") })), rooSchema.merge(z.object({ apiProvider: z.literal("roo") })), vercelAiGatewaySchema.merge(z.object({ apiProvider: z.literal("vercel-ai-gateway") })), @@ -473,25 +401,16 @@ export const providerSettingsSchema = z.object({ ...openAiNativeSchema.shape, ...mistralSchema.shape, ...deepSeekSchema.shape, - ...deepInfraSchema.shape, - ...doubaoSchema.shape, ...moonshotSchema.shape, ...minimaxSchema.shape, - ...unboundSchema.shape, ...requestySchema.shape, ...fakeAiSchema.shape, ...xaiSchema.shape, - ...groqSchema.shape, ...basetenSchema.shape, - ...huggingFaceSchema.shape, - ...chutesSchema.shape, ...litellmSchema.shape, - ...cerebrasSchema.shape, ...sambaNovaSchema.shape, ...zaiSchema.shape, ...fireworksSchema.shape, - ...featherlessSchema.shape, - ...ioIntelligenceSchema.shape, ...qwenCodeSchema.shape, ...rooSchema.shape, ...vercelAiGatewaySchema.shape, @@ -521,13 +440,9 @@ export const modelIdKeys = [ "ollamaModelId", "lmStudioModelId", "lmStudioDraftModelId", - "unboundModelId", "requestyModelId", "litellmModelId", - "huggingFaceModelId", - "ioIntelligenceModelId", "vercelAiGatewayModelId", - "deepInfraModelId", ] as const satisfies readonly (keyof ProviderSettings)[] export type ModelIdKey = (typeof modelIdKeys)[number] @@ -561,23 +476,14 @@ export const modelIdKeysByProvider: Record = { moonshot: "apiModelId", minimax: "apiModelId", deepseek: "apiModelId", - deepinfra: "deepInfraModelId", - doubao: "apiModelId", "qwen-code": "apiModelId", - unbound: "unboundModelId", requesty: "requestyModelId", xai: "apiModelId", - groq: "apiModelId", baseten: "apiModelId", - chutes: "apiModelId", litellm: "litellmModelId", - huggingface: "huggingFaceModelId", - cerebras: "apiModelId", sambanova: "apiModelId", zai: "apiModelId", fireworks: "apiModelId", - featherless: "apiModelId", - "io-intelligence": "ioIntelligenceModelId", roo: "apiModelId", "vercel-ai-gateway": "vercelAiGatewayModelId", } @@ -629,22 +535,11 @@ export const MODELS_BY_PROVIDER: Record< label: "Amazon Bedrock", models: Object.keys(bedrockModels), }, - cerebras: { - id: "cerebras", - label: "Cerebras", - models: Object.keys(cerebrasModels), - }, deepseek: { id: "deepseek", label: "DeepSeek", models: Object.keys(deepSeekModels), }, - doubao: { id: "doubao", label: "Doubao", models: Object.keys(doubaoModels) }, - featherless: { - id: "featherless", - label: "Featherless", - models: Object.keys(featherlessModels), - }, fireworks: { id: "fireworks", label: "Fireworks", @@ -655,12 +550,6 @@ export const MODELS_BY_PROVIDER: Record< label: "Google Gemini", models: Object.keys(geminiModels), }, - groq: { id: "groq", label: "Groq", models: Object.keys(groqModels) }, - "io-intelligence": { - id: "io-intelligence", - label: "IO Intelligence", - models: Object.keys(ioIntelligenceModels), - }, mistral: { id: "mistral", label: "Mistral", @@ -708,14 +597,10 @@ export const MODELS_BY_PROVIDER: Record< baseten: { id: "baseten", label: "Baseten", models: Object.keys(basetenModels) }, // Dynamic providers; models pulled from remote APIs. - huggingface: { id: "huggingface", label: "Hugging Face", models: [] }, litellm: { id: "litellm", label: "LiteLLM", models: [] }, openrouter: { id: "openrouter", label: "OpenRouter", models: [] }, requesty: { id: "requesty", label: "Requesty", models: [] }, - unbound: { id: "unbound", label: "Unbound", models: [] }, - deepinfra: { id: "deepinfra", label: "DeepInfra", models: [] }, "vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] }, - chutes: { id: "chutes", label: "Chutes AI", models: [] }, // Local providers; models discovered from localhost endpoints. lmstudio: { id: "lmstudio", label: "LM Studio", models: [] }, diff --git a/packages/types/src/providers/cerebras.ts b/packages/types/src/providers/cerebras.ts deleted file mode 100644 index 2e9fccaa9df..00000000000 --- a/packages/types/src/providers/cerebras.ts +++ /dev/null @@ -1,58 +0,0 @@ -import type { ModelInfo } from "../model.js" - -// https://inference-docs.cerebras.ai/api-reference/chat-completions -export type CerebrasModelId = keyof typeof cerebrasModels - -export const cerebrasDefaultModelId: CerebrasModelId = "gpt-oss-120b" - -export const cerebrasModels = { - "zai-glm-4.7": { - maxTokens: 16384, // Conservative default to avoid premature rate limiting (Cerebras reserves quota upfront) - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: true, - supportsTemperature: true, - defaultTemperature: 1.0, - inputPrice: 0, - outputPrice: 0, - description: - "Highly capable general-purpose model on Cerebras (up to 1,000 tokens/s), competitive with leading proprietary models on coding tasks.", - }, - "qwen-3-235b-a22b-instruct-2507": { - maxTokens: 16384, // Conservative default to avoid premature rate limiting - contextWindow: 64000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Intelligent model with ~1400 tokens/s", - }, - "llama-3.3-70b": { - maxTokens: 16384, // Conservative default to avoid premature rate limiting - contextWindow: 64000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Powerful model with ~2600 tokens/s", - }, - "qwen-3-32b": { - maxTokens: 16384, // Conservative default to avoid premature rate limiting - contextWindow: 64000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "SOTA coding performance with ~2500 tokens/s", - }, - "gpt-oss-120b": { - maxTokens: 16384, // Conservative default to avoid premature rate limiting - contextWindow: 64000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: - "OpenAI GPT OSS model with ~2800 tokens/s\n\n• 64K context window\n• Excels at efficient reasoning across science, math, and coding", - }, -} as const satisfies Record diff --git a/packages/types/src/providers/chutes.ts b/packages/types/src/providers/chutes.ts deleted file mode 100644 index 69e6b2e68b7..00000000000 --- a/packages/types/src/providers/chutes.ts +++ /dev/null @@ -1,421 +0,0 @@ -import type { ModelInfo } from "../model.js" - -// https://llm.chutes.ai/v1 (OpenAI compatible) -export type ChutesModelId = - | "deepseek-ai/DeepSeek-R1-0528" - | "deepseek-ai/DeepSeek-R1" - | "deepseek-ai/DeepSeek-V3" - | "deepseek-ai/DeepSeek-V3.1" - | "deepseek-ai/DeepSeek-V3.1-Terminus" - | "deepseek-ai/DeepSeek-V3.1-turbo" - | "deepseek-ai/DeepSeek-V3.2-Exp" - | "unsloth/Llama-3.3-70B-Instruct" - | "chutesai/Llama-4-Scout-17B-16E-Instruct" - | "unsloth/Mistral-Nemo-Instruct-2407" - | "unsloth/gemma-3-12b-it" - | "NousResearch/DeepHermes-3-Llama-3-8B-Preview" - | "unsloth/gemma-3-4b-it" - | "nvidia/Llama-3_3-Nemotron-Super-49B-v1" - | "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1" - | "chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8" - | "deepseek-ai/DeepSeek-V3-Base" - | "deepseek-ai/DeepSeek-R1-Zero" - | "deepseek-ai/DeepSeek-V3-0324" - | "Qwen/Qwen3-235B-A22B" - | "Qwen/Qwen3-235B-A22B-Instruct-2507" - | "Qwen/Qwen3-32B" - | "Qwen/Qwen3-30B-A3B" - | "Qwen/Qwen3-14B" - | "Qwen/Qwen3-8B" - | "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8" - | "microsoft/MAI-DS-R1-FP8" - | "tngtech/DeepSeek-R1T-Chimera" - | "zai-org/GLM-4.5-Air" - | "zai-org/GLM-4.5-FP8" - | "zai-org/GLM-4.5-turbo" - | "zai-org/GLM-4.6-FP8" - | "zai-org/GLM-4.6-turbo" - | "meituan-longcat/LongCat-Flash-Thinking-FP8" - | "moonshotai/Kimi-K2-Instruct-75k" - | "moonshotai/Kimi-K2-Instruct-0905" - | "Qwen/Qwen3-235B-A22B-Thinking-2507" - | "Qwen/Qwen3-Next-80B-A3B-Instruct" - | "Qwen/Qwen3-Next-80B-A3B-Thinking" - | "Qwen/Qwen3-VL-235B-A22B-Thinking" - -export const chutesDefaultModelId: ChutesModelId = "deepseek-ai/DeepSeek-R1-0528" - -export const chutesModels = { - "deepseek-ai/DeepSeek-R1-0528": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "DeepSeek R1 0528 model.", - }, - "deepseek-ai/DeepSeek-R1": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "DeepSeek R1 model.", - }, - "deepseek-ai/DeepSeek-V3": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "DeepSeek V3 model.", - }, - "deepseek-ai/DeepSeek-V3.1": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "DeepSeek V3.1 model.", - }, - "deepseek-ai/DeepSeek-V3.1-Terminus": { - maxTokens: 163840, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.23, - outputPrice: 0.9, - description: - "DeepSeek‑V3.1‑Terminus is an update to V3.1 that improves language consistency by reducing CN/EN mix‑ups and eliminating random characters, while strengthening agent capabilities with notably better Code Agent and Search Agent performance.", - }, - "deepseek-ai/DeepSeek-V3.1-turbo": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 1.0, - outputPrice: 3.0, - description: - "DeepSeek-V3.1-turbo is an FP8, speculative-decoding turbo variant optimized for ultra-fast single-shot queries (~200 TPS), with outputs close to the originals and solid function calling/reasoning/structured output, priced at $1/M input and $3/M output tokens, using 2× quota per request and not intended for bulk workloads.", - }, - "deepseek-ai/DeepSeek-V3.2-Exp": { - maxTokens: 163840, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.25, - outputPrice: 0.35, - description: - "DeepSeek-V3.2-Exp is an experimental LLM that introduces DeepSeek Sparse Attention to improve long‑context training and inference efficiency while maintaining performance comparable to V3.1‑Terminus.", - }, - "unsloth/Llama-3.3-70B-Instruct": { - maxTokens: 32768, // From Groq - contextWindow: 131072, // From Groq - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Unsloth Llama 3.3 70B Instruct model.", - }, - "chutesai/Llama-4-Scout-17B-16E-Instruct": { - maxTokens: 32768, - contextWindow: 512000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "ChutesAI Llama 4 Scout 17B Instruct model, 512K context.", - }, - "unsloth/Mistral-Nemo-Instruct-2407": { - maxTokens: 32768, - contextWindow: 128000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Unsloth Mistral Nemo Instruct model.", - }, - "unsloth/gemma-3-12b-it": { - maxTokens: 32768, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Unsloth Gemma 3 12B IT model.", - }, - "NousResearch/DeepHermes-3-Llama-3-8B-Preview": { - maxTokens: 32768, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Nous DeepHermes 3 Llama 3 8B Preview model.", - }, - "unsloth/gemma-3-4b-it": { - maxTokens: 32768, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Unsloth Gemma 3 4B IT model.", - }, - "nvidia/Llama-3_3-Nemotron-Super-49B-v1": { - maxTokens: 32768, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Nvidia Llama 3.3 Nemotron Super 49B model.", - }, - "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1": { - maxTokens: 32768, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Nvidia Llama 3.1 Nemotron Ultra 253B model.", - }, - "chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8": { - maxTokens: 32768, - contextWindow: 256000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "ChutesAI Llama 4 Maverick 17B Instruct FP8 model.", - }, - "deepseek-ai/DeepSeek-V3-Base": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "DeepSeek V3 Base model.", - }, - "deepseek-ai/DeepSeek-R1-Zero": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "DeepSeek R1 Zero model.", - }, - "deepseek-ai/DeepSeek-V3-0324": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "DeepSeek V3 (0324) model.", - }, - "Qwen/Qwen3-235B-A22B-Instruct-2507": { - maxTokens: 32768, - contextWindow: 262144, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Qwen3 235B A22B Instruct 2507 model with 262K context window.", - }, - "Qwen/Qwen3-235B-A22B": { - maxTokens: 32768, - contextWindow: 40960, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Qwen3 235B A22B model.", - }, - "Qwen/Qwen3-32B": { - maxTokens: 32768, - contextWindow: 40960, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Qwen3 32B model.", - }, - "Qwen/Qwen3-30B-A3B": { - maxTokens: 32768, - contextWindow: 40960, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Qwen3 30B A3B model.", - }, - "Qwen/Qwen3-14B": { - maxTokens: 32768, - contextWindow: 40960, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Qwen3 14B model.", - }, - "Qwen/Qwen3-8B": { - maxTokens: 32768, - contextWindow: 40960, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Qwen3 8B model.", - }, - "microsoft/MAI-DS-R1-FP8": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Microsoft MAI-DS-R1 FP8 model.", - }, - "tngtech/DeepSeek-R1T-Chimera": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "TNGTech DeepSeek R1T Chimera model.", - }, - "zai-org/GLM-4.5-Air": { - maxTokens: 32768, - contextWindow: 151329, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: - "GLM-4.5-Air model with 151,329 token context window and 106B total parameters with 12B activated.", - }, - "zai-org/GLM-4.5-FP8": { - maxTokens: 32768, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: - "GLM-4.5-FP8 model with 128k token context window, optimized for agent-based applications with MoE architecture.", - }, - "zai-org/GLM-4.5-turbo": { - maxTokens: 32768, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 1, - outputPrice: 3, - description: "GLM-4.5-turbo model with 128K token context window, optimized for fast inference.", - }, - "zai-org/GLM-4.6-FP8": { - maxTokens: 32768, - contextWindow: 202752, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: - "GLM-4.6 introduces major upgrades over GLM-4.5, including a longer 200K-token context window for complex tasks, stronger coding performance in benchmarks and real-world tools (such as Claude Code, Cline, Roo Code, and Kilo Code), improved reasoning with tool use during inference, more capable and efficient agent integration, and refined writing that better matches human style, readability, and natural role-play scenarios.", - }, - "zai-org/GLM-4.6-turbo": { - maxTokens: 202752, // From Chutes /v1/models: max_output_length - contextWindow: 202752, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 1.15, - outputPrice: 3.25, - description: "GLM-4.6-turbo model with 200K-token context window, optimized for fast inference.", - }, - "meituan-longcat/LongCat-Flash-Thinking-FP8": { - maxTokens: 32768, - contextWindow: 128000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: - "LongCat Flash Thinking FP8 model with 128K context window, optimized for complex reasoning and coding tasks.", - }, - "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8": { - maxTokens: 32768, - contextWindow: 262144, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Qwen3 Coder 480B A35B Instruct FP8 model, optimized for coding tasks.", - }, - "moonshotai/Kimi-K2-Instruct-75k": { - maxTokens: 32768, - contextWindow: 75000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.1481, - outputPrice: 0.5926, - description: "Moonshot AI Kimi K2 Instruct model with 75k context window.", - }, - "moonshotai/Kimi-K2-Instruct-0905": { - maxTokens: 32768, - contextWindow: 262144, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.1999, - outputPrice: 0.8001, - description: "Moonshot AI Kimi K2 Instruct 0905 model with 256k context window.", - }, - "Qwen/Qwen3-235B-A22B-Thinking-2507": { - maxTokens: 32768, - contextWindow: 262144, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.077968332, - outputPrice: 0.31202496, - description: "Qwen3 235B A22B Thinking 2507 model with 262K context window.", - }, - "Qwen/Qwen3-Next-80B-A3B-Instruct": { - maxTokens: 32768, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: - "Fast, stable instruction-tuned model optimized for complex tasks, RAG, and tool use without thinking traces.", - }, - "Qwen/Qwen3-Next-80B-A3B-Thinking": { - maxTokens: 32768, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: - "Reasoning-first model with structured thinking traces for multi-step problems, math proofs, and code synthesis.", - }, - "Qwen/Qwen3-VL-235B-A22B-Thinking": { - maxTokens: 262144, - contextWindow: 262144, - supportsImages: true, - supportsPromptCache: false, - inputPrice: 0.16, - outputPrice: 0.65, - description: - "Qwen3‑VL‑235B‑A22B‑Thinking is an open‑weight MoE vision‑language model (235B total, ~22B activated) optimized for deliberate multi‑step reasoning with strong text‑image‑video understanding and long‑context capabilities.", - }, -} as const satisfies Record - -export const chutesDefaultModelInfo: ModelInfo = chutesModels[chutesDefaultModelId] diff --git a/packages/types/src/providers/deepinfra.ts b/packages/types/src/providers/deepinfra.ts deleted file mode 100644 index 9a430b3789f..00000000000 --- a/packages/types/src/providers/deepinfra.ts +++ /dev/null @@ -1,14 +0,0 @@ -import type { ModelInfo } from "../model.js" - -// Default fallback values for DeepInfra when model metadata is not yet loaded. -export const deepInfraDefaultModelId = "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo" - -export const deepInfraDefaultModelInfo: ModelInfo = { - maxTokens: 16384, - contextWindow: 262144, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.3, - outputPrice: 1.2, - description: "Qwen 3 Coder 480B A35B Instruct Turbo model, 256K context.", -} diff --git a/packages/types/src/providers/doubao.ts b/packages/types/src/providers/doubao.ts deleted file mode 100644 index f948450bc42..00000000000 --- a/packages/types/src/providers/doubao.ts +++ /dev/null @@ -1,44 +0,0 @@ -import type { ModelInfo } from "../model.js" - -export const doubaoDefaultModelId = "doubao-seed-1-6-250615" - -export const doubaoModels = { - "doubao-seed-1-6-250615": { - maxTokens: 32_768, - contextWindow: 128_000, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 0.0001, // $0.0001 per million tokens (cache miss) - outputPrice: 0.0004, // $0.0004 per million tokens - cacheWritesPrice: 0.0001, // $0.0001 per million tokens (cache miss) - cacheReadsPrice: 0.00002, // $0.00002 per million tokens (cache hit) - description: `Doubao Seed 1.6 is a powerful model designed for high-performance tasks with extensive context handling.`, - }, - "doubao-seed-1-6-thinking-250715": { - maxTokens: 32_768, - contextWindow: 128_000, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 0.0002, // $0.0002 per million tokens - outputPrice: 0.0008, // $0.0008 per million tokens - cacheWritesPrice: 0.0002, // $0.0002 per million - cacheReadsPrice: 0.00004, // $0.00004 per million tokens (cache hit) - description: `Doubao Seed 1.6 Thinking is optimized for reasoning tasks, providing enhanced performance in complex problem-solving scenarios.`, - }, - "doubao-seed-1-6-flash-250715": { - maxTokens: 32_768, - contextWindow: 128_000, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 0.00015, // $0.00015 per million tokens - outputPrice: 0.0006, // $0.0006 per million tokens - cacheWritesPrice: 0.00015, // $0.00015 per million - cacheReadsPrice: 0.00003, // $0.00003 per million tokens (cache hit) - description: `Doubao Seed 1.6 Flash is tailored for speed and efficiency, making it ideal for applications requiring rapid responses.`, - }, -} as const satisfies Record - -export const doubaoDefaultModelInfo: ModelInfo = doubaoModels[doubaoDefaultModelId] - -export const DOUBAO_API_BASE_URL = "https://ark.cn-beijing.volces.com/api/v3" -export const DOUBAO_API_CHAT_PATH = "/chat/completions" diff --git a/packages/types/src/providers/featherless.ts b/packages/types/src/providers/featherless.ts deleted file mode 100644 index 20cfe966546..00000000000 --- a/packages/types/src/providers/featherless.ts +++ /dev/null @@ -1,58 +0,0 @@ -import type { ModelInfo } from "../model.js" - -export type FeatherlessModelId = - | "deepseek-ai/DeepSeek-V3-0324" - | "deepseek-ai/DeepSeek-R1-0528" - | "moonshotai/Kimi-K2-Instruct" - | "openai/gpt-oss-120b" - | "Qwen/Qwen3-Coder-480B-A35B-Instruct" - -export const featherlessModels = { - "deepseek-ai/DeepSeek-V3-0324": { - maxTokens: 4096, - contextWindow: 32678, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "DeepSeek V3 0324 model.", - }, - "deepseek-ai/DeepSeek-R1-0528": { - maxTokens: 4096, - contextWindow: 32678, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "DeepSeek R1 0528 model.", - }, - "moonshotai/Kimi-K2-Instruct": { - maxTokens: 4096, - contextWindow: 32678, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Kimi K2 Instruct model.", - }, - "openai/gpt-oss-120b": { - maxTokens: 4096, - contextWindow: 32678, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "GPT-OSS 120B model.", - }, - "Qwen/Qwen3-Coder-480B-A35B-Instruct": { - maxTokens: 4096, - contextWindow: 32678, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Qwen3 Coder 480B A35B Instruct model.", - }, -} as const satisfies Record - -export const featherlessDefaultModelId: FeatherlessModelId = "moonshotai/Kimi-K2-Instruct" diff --git a/packages/types/src/providers/groq.ts b/packages/types/src/providers/groq.ts deleted file mode 100644 index 30e7c42ca1a..00000000000 --- a/packages/types/src/providers/groq.ts +++ /dev/null @@ -1,84 +0,0 @@ -import type { ModelInfo } from "../model.js" - -// https://console.groq.com/docs/models -export type GroqModelId = - | "llama-3.1-8b-instant" - | "llama-3.3-70b-versatile" - | "meta-llama/llama-4-scout-17b-16e-instruct" - | "qwen/qwen3-32b" - | "moonshotai/kimi-k2-instruct-0905" - | "openai/gpt-oss-120b" - | "openai/gpt-oss-20b" - -export const groqDefaultModelId: GroqModelId = "moonshotai/kimi-k2-instruct-0905" - -export const groqModels = { - // Models based on API response: https://api.groq.com/openai/v1/models - "llama-3.1-8b-instant": { - maxTokens: 8192, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.05, - outputPrice: 0.08, - description: "Meta Llama 3.1 8B Instant model, 128K context.", - }, - "llama-3.3-70b-versatile": { - maxTokens: 8192, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.59, - outputPrice: 0.79, - description: "Meta Llama 3.3 70B Versatile model, 128K context.", - }, - "meta-llama/llama-4-scout-17b-16e-instruct": { - maxTokens: 8192, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.11, - outputPrice: 0.34, - description: "Meta Llama 4 Scout 17B Instruct model, 128K context.", - }, - "qwen/qwen3-32b": { - maxTokens: 8192, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.29, - outputPrice: 0.59, - description: "Alibaba Qwen 3 32B model, 128K context.", - }, - "moonshotai/kimi-k2-instruct-0905": { - maxTokens: 16384, - contextWindow: 262144, - supportsImages: false, - supportsPromptCache: true, - inputPrice: 0.6, - outputPrice: 2.5, - cacheReadsPrice: 0.15, - description: - "Kimi K2 model gets a new version update: Agentic coding: more accurate, better generalization across scaffolds. Frontend coding: improved aesthetics and functionalities on web, 3d, and other tasks. Context length: extended from 128k to 256k, providing better long-horizon support.", - }, - "openai/gpt-oss-120b": { - maxTokens: 32766, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.15, - outputPrice: 0.75, - description: - "GPT-OSS 120B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 128 experts.", - }, - "openai/gpt-oss-20b": { - maxTokens: 32768, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.1, - outputPrice: 0.5, - description: - "GPT-OSS 20B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 32 experts.", - }, -} as const satisfies Record diff --git a/packages/types/src/providers/huggingface.ts b/packages/types/src/providers/huggingface.ts deleted file mode 100644 index d2571a073e7..00000000000 --- a/packages/types/src/providers/huggingface.ts +++ /dev/null @@ -1,17 +0,0 @@ -/** - * HuggingFace provider constants - */ - -// Default values for HuggingFace models -export const HUGGINGFACE_DEFAULT_MAX_TOKENS = 2048 -export const HUGGINGFACE_MAX_TOKENS_FALLBACK = 8192 -export const HUGGINGFACE_DEFAULT_CONTEXT_WINDOW = 128_000 - -// UI constants -export const HUGGINGFACE_SLIDER_STEP = 256 -export const HUGGINGFACE_SLIDER_MIN = 1 -export const HUGGINGFACE_TEMPERATURE_MAX_VALUE = 2 - -// API constants -export const HUGGINGFACE_API_URL = "https://router.huggingface.co/v1/models?collection=roocode" -export const HUGGINGFACE_CACHE_DURATION = 1000 * 60 * 60 // 1 hour diff --git a/packages/types/src/providers/index.ts b/packages/types/src/providers/index.ts index 2018954bbdd..a9c1e8804c4 100644 --- a/packages/types/src/providers/index.ts +++ b/packages/types/src/providers/index.ts @@ -1,16 +1,9 @@ export * from "./anthropic.js" export * from "./baseten.js" export * from "./bedrock.js" -export * from "./cerebras.js" -export * from "./chutes.js" export * from "./deepseek.js" -export * from "./doubao.js" -export * from "./featherless.js" export * from "./fireworks.js" export * from "./gemini.js" -export * from "./groq.js" -export * from "./huggingface.js" -export * from "./io-intelligence.js" export * from "./lite-llm.js" export * from "./lm-studio.js" export * from "./mistral.js" @@ -24,27 +17,19 @@ export * from "./qwen-code.js" export * from "./requesty.js" export * from "./roo.js" export * from "./sambanova.js" -export * from "./unbound.js" export * from "./vertex.js" export * from "./vscode-llm.js" export * from "./xai.js" export * from "./vercel-ai-gateway.js" export * from "./zai.js" -export * from "./deepinfra.js" export * from "./minimax.js" import { anthropicDefaultModelId } from "./anthropic.js" import { basetenDefaultModelId } from "./baseten.js" import { bedrockDefaultModelId } from "./bedrock.js" -import { cerebrasDefaultModelId } from "./cerebras.js" -import { chutesDefaultModelId } from "./chutes.js" import { deepSeekDefaultModelId } from "./deepseek.js" -import { doubaoDefaultModelId } from "./doubao.js" -import { featherlessDefaultModelId } from "./featherless.js" import { fireworksDefaultModelId } from "./fireworks.js" import { geminiDefaultModelId } from "./gemini.js" -import { groqDefaultModelId } from "./groq.js" -import { ioIntelligenceDefaultModelId } from "./io-intelligence.js" import { litellmDefaultModelId } from "./lite-llm.js" import { mistralDefaultModelId } from "./mistral.js" import { moonshotDefaultModelId } from "./moonshot.js" @@ -54,13 +39,11 @@ import { qwenCodeDefaultModelId } from "./qwen-code.js" import { requestyDefaultModelId } from "./requesty.js" import { rooDefaultModelId } from "./roo.js" import { sambaNovaDefaultModelId } from "./sambanova.js" -import { unboundDefaultModelId } from "./unbound.js" import { vertexDefaultModelId } from "./vertex.js" import { vscodeLlmDefaultModelId } from "./vscode-llm.js" import { xaiDefaultModelId } from "./xai.js" import { vercelAiGatewayDefaultModelId } from "./vercel-ai-gateway.js" import { internationalZAiDefaultModelId, mainlandZAiDefaultModelId } from "./zai.js" -import { deepInfraDefaultModelId } from "./deepinfra.js" import { minimaxDefaultModelId } from "./minimax.js" // Import the ProviderName type from provider-settings to avoid duplication @@ -80,18 +63,10 @@ export function getProviderDefaultModelId( return openRouterDefaultModelId case "requesty": return requestyDefaultModelId - case "unbound": - return unboundDefaultModelId case "litellm": return litellmDefaultModelId case "xai": return xaiDefaultModelId - case "groq": - return groqDefaultModelId - case "huggingface": - return "meta-llama/Llama-3.3-70B-Instruct" - case "chutes": - return chutesDefaultModelId case "baseten": return basetenDefaultModelId case "bedrock": @@ -102,8 +77,6 @@ export function getProviderDefaultModelId( return geminiDefaultModelId case "deepseek": return deepSeekDefaultModelId - case "doubao": - return doubaoDefaultModelId case "moonshot": return moonshotDefaultModelId case "minimax": @@ -122,20 +95,12 @@ export function getProviderDefaultModelId( return "" // Ollama uses dynamic model selection case "lmstudio": return "" // LMStudio uses dynamic model selection - case "deepinfra": - return deepInfraDefaultModelId case "vscode-lm": return vscodeLlmDefaultModelId - case "cerebras": - return cerebrasDefaultModelId case "sambanova": return sambaNovaDefaultModelId case "fireworks": return fireworksDefaultModelId - case "featherless": - return featherlessDefaultModelId - case "io-intelligence": - return ioIntelligenceDefaultModelId case "roo": return rooDefaultModelId case "qwen-code": diff --git a/packages/types/src/providers/io-intelligence.ts b/packages/types/src/providers/io-intelligence.ts deleted file mode 100644 index a9b845393f5..00000000000 --- a/packages/types/src/providers/io-intelligence.ts +++ /dev/null @@ -1,44 +0,0 @@ -import type { ModelInfo } from "../model.js" - -export type IOIntelligenceModelId = - | "deepseek-ai/DeepSeek-R1-0528" - | "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8" - | "Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar" - | "openai/gpt-oss-120b" - -export const ioIntelligenceDefaultModelId: IOIntelligenceModelId = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8" - -export const ioIntelligenceDefaultBaseUrl = "https://api.intelligence.io.solutions/api/v1" - -export const IO_INTELLIGENCE_CACHE_DURATION = 1000 * 60 * 60 // 1 hour - -export const ioIntelligenceModels = { - "deepseek-ai/DeepSeek-R1-0528": { - maxTokens: 8192, - contextWindow: 128000, - supportsImages: false, - supportsPromptCache: false, - description: "DeepSeek R1 reasoning model", - }, - "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": { - maxTokens: 8192, - contextWindow: 430000, - supportsImages: true, - supportsPromptCache: false, - description: "Llama 4 Maverick 17B model", - }, - "Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar": { - maxTokens: 8192, - contextWindow: 106000, - supportsImages: false, - supportsPromptCache: false, - description: "Qwen3 Coder 480B specialized for coding", - }, - "openai/gpt-oss-120b": { - maxTokens: 8192, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - description: "OpenAI GPT-OSS 120B model", - }, -} as const satisfies Record diff --git a/packages/types/src/providers/unbound.ts b/packages/types/src/providers/unbound.ts deleted file mode 100644 index 9715b835c9b..00000000000 --- a/packages/types/src/providers/unbound.ts +++ /dev/null @@ -1,14 +0,0 @@ -import type { ModelInfo } from "../model.js" - -export const unboundDefaultModelId = "anthropic/claude-sonnet-4-5" - -export const unboundDefaultModelInfo: ModelInfo = { - maxTokens: 8192, - contextWindow: 200_000, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 3.0, - outputPrice: 15.0, - cacheWritesPrice: 3.75, - cacheReadsPrice: 0.3, -} diff --git a/packages/types/src/vscode-extension-host.ts b/packages/types/src/vscode-extension-host.ts index 51c7fa49d5e..eb2abd490d2 100644 --- a/packages/types/src/vscode-extension-host.ts +++ b/packages/types/src/vscode-extension-host.ts @@ -47,7 +47,6 @@ export interface ExtensionMessage { | "ollamaModels" | "lmStudioModels" | "vsCodeLmModels" - | "huggingFaceModels" | "vsCodeLmApiAvailable" | "updatePrompt" | "systemPrompt" @@ -144,23 +143,6 @@ export interface ExtensionMessage { ollamaModels?: ModelRecord lmStudioModels?: ModelRecord vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[] - huggingFaceModels?: Array<{ - id: string - object: string - created: number - owned_by: string - providers: Array<{ - provider: string - status: "live" | "staging" | "error" - supports_tools?: boolean - supports_structured_output?: boolean - context_length?: number - pricing?: { - input: number - output: number - } - }> - }> mcpServers?: McpServer[] commits?: GitCommit[] listApiConfig?: ProviderSettingsEntry[] @@ -467,7 +449,6 @@ export interface WebviewMessage { | "requestRooModels" | "requestRooCreditBalance" | "requestVsCodeLmModels" - | "requestHuggingFaceModels" | "openImage" | "saveImage" | "openFile" diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ff809db9add..f92481c97d9 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -244,8 +244,8 @@ importers: specifier: workspace:^ version: link:../../packages/evals '@roo-code/types': - specifier: ^1.108.0 - version: 1.108.0 + specifier: workspace:^ + version: link:../../packages/types '@tanstack/react-query': specifier: ^5.69.0 version: 5.76.1(react@18.3.1) @@ -752,9 +752,6 @@ importers: '@ai-sdk/baseten': specifier: ^1.0.31 version: 1.0.31(zod@3.25.76) - '@ai-sdk/cerebras': - specifier: ^2.0.31 - version: 2.0.31(zod@3.25.76) '@ai-sdk/deepseek': specifier: ^2.0.18 version: 2.0.18(zod@3.25.76) @@ -767,9 +764,6 @@ importers: '@ai-sdk/google-vertex': specifier: ^4.0.45 version: 4.0.45(zod@3.25.76) - '@ai-sdk/groq': - specifier: ^3.0.22 - version: 3.0.22(zod@3.25.76) '@ai-sdk/mistral': specifier: ^3.0.19 version: 3.0.19(zod@3.25.76) @@ -1441,12 +1435,6 @@ packages: peerDependencies: zod: 3.25.76 - '@ai-sdk/cerebras@2.0.31': - resolution: {integrity: sha512-s7o4BRsbG2RFina4VwHs46RWlQPGCL1CrfOoMomYneJeA0CgpxPigPqwlrupaWWB42KIDDHN5gNOIsLst0oOPg==} - engines: {node: '>=18'} - peerDependencies: - zod: 3.25.76 - '@ai-sdk/deepseek@2.0.18': resolution: {integrity: sha512-AwtmFm7acnCsz3z82Yu5QKklSZz+cBwtxrc2hbw47tPF/38xr1zX3Vf/pP627EHwWkLV18UWivIxg0SHPP2w3A==} engines: {node: '>=18'} @@ -1477,12 +1465,6 @@ packages: peerDependencies: zod: 3.25.76 - '@ai-sdk/groq@3.0.22': - resolution: {integrity: sha512-QBkqBmlts2qz2vX54gXeP9IdztMFxZw7xPNwjOjHYhEL7RynzB2aFafPIbAYTVNosrU0YEETxhw9LISjS2TtXw==} - engines: {node: '>=18'} - peerDependencies: - zod: 3.25.76 - '@ai-sdk/mistral@3.0.19': resolution: {integrity: sha512-yd0OJ3fm2YKdwxh1pd9m720sENVVcylAD+Bki8C80QqVpUxGNL1/C4N4JJGb56eCCWr6VU/3gHFe9PKui9n/Hg==} engines: {node: '>=18'} @@ -11067,13 +11049,6 @@ snapshots: '@basetenlabs/performance-client': 0.0.10 zod: 3.25.76 - '@ai-sdk/cerebras@2.0.31(zod@3.25.76)': - dependencies: - '@ai-sdk/openai-compatible': 2.0.28(zod@3.25.76) - '@ai-sdk/provider': 3.0.8 - '@ai-sdk/provider-utils': 4.0.14(zod@3.25.76) - zod: 3.25.76 - '@ai-sdk/deepseek@2.0.18(zod@3.25.76)': dependencies: '@ai-sdk/provider': 3.0.8 @@ -11111,12 +11086,6 @@ snapshots: '@ai-sdk/provider-utils': 4.0.14(zod@3.25.76) zod: 3.25.76 - '@ai-sdk/groq@3.0.22(zod@3.25.76)': - dependencies: - '@ai-sdk/provider': 3.0.8 - '@ai-sdk/provider-utils': 4.0.14(zod@3.25.76) - zod: 3.25.76 - '@ai-sdk/mistral@3.0.19(zod@3.25.76)': dependencies: '@ai-sdk/provider': 3.0.8 @@ -14958,7 +14927,7 @@ snapshots: sirv: 3.0.1 tinyglobby: 0.2.14 tinyrainbow: 2.0.0 - vitest: 3.2.4(@types/debug@4.1.12)(@types/node@20.17.50)(@vitest/ui@3.2.4)(jiti@2.4.2)(jsdom@26.1.0)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) + vitest: 3.2.4(@types/debug@4.1.12)(@types/node@24.2.1)(@vitest/ui@3.2.4)(jiti@2.4.2)(jsdom@26.1.0)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) '@vitest/utils@3.2.4': dependencies: @@ -22251,8 +22220,8 @@ snapshots: zhipu-ai-provider@0.2.2(zod@3.25.76): dependencies: - '@ai-sdk/provider': 2.0.1 - '@ai-sdk/provider-utils': 3.0.20(zod@3.25.76) + '@ai-sdk/provider': 2.0.0 + '@ai-sdk/provider-utils': 3.0.5(zod@3.25.76) transitivePeerDependencies: - zod diff --git a/src/api/index.ts b/src/api/index.ts index 0e25a739a64..78609d51736 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -8,7 +8,6 @@ import { ApiStream } from "./transform/stream" import { AnthropicHandler, AwsBedrockHandler, - CerebrasHandler, OpenRouterHandler, VertexHandler, AnthropicVertexHandler, @@ -21,24 +20,16 @@ import { MoonshotHandler, MistralHandler, VsCodeLmHandler, - UnboundHandler, RequestyHandler, FakeAIHandler, XAIHandler, - GroqHandler, - HuggingFaceHandler, - ChutesHandler, LiteLLMHandler, QwenCodeHandler, SambaNovaHandler, - IOIntelligenceHandler, - DoubaoHandler, ZAiHandler, FireworksHandler, RooHandler, - FeatherlessHandler, VercelAiGatewayHandler, - DeepInfraHandler, MiniMaxHandler, BasetenHandler, } from "./providers" @@ -51,16 +42,13 @@ export interface SingleCompletionHandler { export interface ApiHandlerCreateMessageMetadata { /** * Task ID used for tracking and provider-specific features: - * - DeepInfra: Used as prompt_cache_key for caching * - Roo: Sent as X-Roo-Task-ID header * - Requesty: Sent as trace_id - * - Unbound: Sent in unbound_metadata */ taskId: string /** * Current mode slug for provider-specific tracking: * - Requesty: Sent in extra metadata - * - Unbound: Sent in unbound_metadata */ mode?: string suppressPreviousResponseId?: boolean @@ -156,8 +144,6 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { return new OpenAiNativeHandler(options) case "deepseek": return new DeepSeekHandler(options) - case "doubao": - return new DoubaoHandler(options) case "qwen-code": return new QwenCodeHandler(options) case "moonshot": @@ -166,40 +152,24 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { return new VsCodeLmHandler(options) case "mistral": return new MistralHandler(options) - case "unbound": - return new UnboundHandler(options) case "requesty": return new RequestyHandler(options) case "fake-ai": return new FakeAIHandler(options) case "xai": return new XAIHandler(options) - case "groq": - return new GroqHandler(options) - case "deepinfra": - return new DeepInfraHandler(options) - case "huggingface": - return new HuggingFaceHandler(options) - case "chutes": - return new ChutesHandler(options) case "litellm": return new LiteLLMHandler(options) - case "cerebras": - return new CerebrasHandler(options) case "sambanova": return new SambaNovaHandler(options) case "zai": return new ZAiHandler(options) case "fireworks": return new FireworksHandler(options) - case "io-intelligence": - return new IOIntelligenceHandler(options) case "roo": // Never throw exceptions from provider constructors // The provider-proxy server will handle authentication and return appropriate error codes return new RooHandler(options) - case "featherless": - return new FeatherlessHandler(options) case "vercel-ai-gateway": return new VercelAiGatewayHandler(options) case "minimax": diff --git a/src/api/providers/__tests__/cerebras.spec.ts b/src/api/providers/__tests__/cerebras.spec.ts deleted file mode 100644 index caf8861b46d..00000000000 --- a/src/api/providers/__tests__/cerebras.spec.ts +++ /dev/null @@ -1,455 +0,0 @@ -// Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls -const { mockStreamText, mockGenerateText } = vi.hoisted(() => ({ - mockStreamText: vi.fn(), - mockGenerateText: vi.fn(), -})) - -vi.mock("ai", async (importOriginal) => { - const actual = await importOriginal() - return { - ...actual, - streamText: mockStreamText, - generateText: mockGenerateText, - } -}) - -vi.mock("@ai-sdk/cerebras", () => ({ - createCerebras: vi.fn(() => { - // Return a function that returns a mock language model - return vi.fn(() => ({ - modelId: "llama-3.3-70b", - provider: "cerebras", - })) - }), -})) - -import type { Anthropic } from "@anthropic-ai/sdk" - -import { cerebrasDefaultModelId, cerebrasModels, type CerebrasModelId } from "@roo-code/types" - -import type { ApiHandlerOptions } from "../../../shared/api" - -import { CerebrasHandler } from "../cerebras" - -describe("CerebrasHandler", () => { - let handler: CerebrasHandler - let mockOptions: ApiHandlerOptions - - beforeEach(() => { - mockOptions = { - cerebrasApiKey: "test-api-key", - apiModelId: "llama-3.3-70b" as CerebrasModelId, - } - handler = new CerebrasHandler(mockOptions) - vi.clearAllMocks() - }) - - describe("constructor", () => { - it("should initialize with provided options", () => { - expect(handler).toBeInstanceOf(CerebrasHandler) - expect(handler.getModel().id).toBe(mockOptions.apiModelId) - }) - - it("should use default model ID if not provided", () => { - const handlerWithoutModel = new CerebrasHandler({ - ...mockOptions, - apiModelId: undefined, - }) - expect(handlerWithoutModel.getModel().id).toBe(cerebrasDefaultModelId) - }) - }) - - describe("getModel", () => { - it("should return model info for valid model ID", () => { - const model = handler.getModel() - expect(model.id).toBe(mockOptions.apiModelId) - expect(model.info).toBeDefined() - expect(model.info.maxTokens).toBe(16384) - expect(model.info.contextWindow).toBe(64000) - expect(model.info.supportsImages).toBe(false) - expect(model.info.supportsPromptCache).toBe(false) - }) - - it("should return provided model ID with default model info if model does not exist", () => { - const handlerWithInvalidModel = new CerebrasHandler({ - ...mockOptions, - apiModelId: "invalid-model", - }) - const model = handlerWithInvalidModel.getModel() - expect(model.id).toBe("invalid-model") // Returns provided ID - expect(model.info).toBeDefined() - // Should have the same base properties as default model - expect(model.info.contextWindow).toBe(cerebrasModels[cerebrasDefaultModelId].contextWindow) - }) - - it("should return default model if no model ID is provided", () => { - const handlerWithoutModel = new CerebrasHandler({ - ...mockOptions, - apiModelId: undefined, - }) - const model = handlerWithoutModel.getModel() - expect(model.id).toBe(cerebrasDefaultModelId) - expect(model.info).toBeDefined() - }) - - it("should include model parameters from getModelParams", () => { - const model = handler.getModel() - expect(model).toHaveProperty("temperature") - expect(model).toHaveProperty("maxTokens") - }) - }) - - describe("createMessage", () => { - const systemPrompt = "You are a helpful assistant." - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [ - { - type: "text" as const, - text: "Hello!", - }, - ], - }, - ] - - it("should handle streaming responses", async () => { - // Mock the fullStream async generator - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response" } - } - - // Mock usage promise - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks.length).toBeGreaterThan(0) - const textChunks = chunks.filter((chunk) => chunk.type === "text") - expect(textChunks).toHaveLength(1) - expect(textChunks[0].text).toBe("Test response") - }) - - it("should include usage information", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const usageChunks = chunks.filter((chunk) => chunk.type === "usage") - expect(usageChunks.length).toBeGreaterThan(0) - expect(usageChunks[0].inputTokens).toBe(10) - expect(usageChunks[0].outputTokens).toBe(5) - }) - - it("should handle reasoning content in streaming responses", async () => { - // Mock the fullStream async generator with reasoning content - async function* mockFullStream() { - yield { type: "reasoning", text: "Let me think about this..." } - yield { type: "reasoning", text: " I'll analyze step by step." } - yield { type: "text-delta", text: "Test response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - details: { - reasoningTokens: 15, - }, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - // Should have reasoning chunks - const reasoningChunks = chunks.filter((chunk) => chunk.type === "reasoning") - expect(reasoningChunks.length).toBe(2) - expect(reasoningChunks[0].text).toBe("Let me think about this...") - expect(reasoningChunks[1].text).toBe(" I'll analyze step by step.") - - // Should also have text chunks - const textChunks = chunks.filter((chunk) => chunk.type === "text") - expect(textChunks.length).toBe(1) - expect(textChunks[0].text).toBe("Test response") - }) - }) - - describe("completePrompt", () => { - it("should complete a prompt using generateText", async () => { - mockGenerateText.mockResolvedValue({ - text: "Test completion", - }) - - const result = await handler.completePrompt("Test prompt") - - expect(result).toBe("Test completion") - expect(mockGenerateText).toHaveBeenCalledWith( - expect.objectContaining({ - prompt: "Test prompt", - }), - ) - }) - }) - - describe("processUsageMetrics", () => { - it("should correctly process usage metrics", () => { - // We need to access the protected method, so we'll create a test subclass - class TestCerebrasHandler extends CerebrasHandler { - public testProcessUsageMetrics(usage: any) { - return this.processUsageMetrics(usage) - } - } - - const testHandler = new TestCerebrasHandler(mockOptions) - - const usage = { - inputTokens: 100, - outputTokens: 50, - details: { - cachedInputTokens: 20, - reasoningTokens: 30, - }, - } - - const result = testHandler.testProcessUsageMetrics(usage) - - expect(result.type).toBe("usage") - expect(result.inputTokens).toBe(100) - expect(result.outputTokens).toBe(50) - expect(result.cacheReadTokens).toBe(20) - expect(result.reasoningTokens).toBe(30) - }) - - it("should handle missing cache metrics gracefully", () => { - class TestCerebrasHandler extends CerebrasHandler { - public testProcessUsageMetrics(usage: any) { - return this.processUsageMetrics(usage) - } - } - - const testHandler = new TestCerebrasHandler(mockOptions) - - const usage = { - inputTokens: 100, - outputTokens: 50, - } - - const result = testHandler.testProcessUsageMetrics(usage) - - expect(result.type).toBe("usage") - expect(result.inputTokens).toBe(100) - expect(result.outputTokens).toBe(50) - expect(result.cacheReadTokens).toBeUndefined() - expect(result.reasoningTokens).toBeUndefined() - }) - }) - - describe("getMaxOutputTokens", () => { - it("should return maxTokens from model info", () => { - class TestCerebrasHandler extends CerebrasHandler { - public testGetMaxOutputTokens() { - return this.getMaxOutputTokens() - } - } - - const testHandler = new TestCerebrasHandler(mockOptions) - const result = testHandler.testGetMaxOutputTokens() - - // llama-3.3-70b maxTokens is 16384 - expect(result).toBe(16384) - }) - - it("should use modelMaxTokens when provided", () => { - class TestCerebrasHandler extends CerebrasHandler { - public testGetMaxOutputTokens() { - return this.getMaxOutputTokens() - } - } - - const customMaxTokens = 5000 - const testHandler = new TestCerebrasHandler({ - ...mockOptions, - modelMaxTokens: customMaxTokens, - }) - - const result = testHandler.testGetMaxOutputTokens() - expect(result).toBe(customMaxTokens) - }) - - it("should fall back to modelInfo.maxTokens when modelMaxTokens is not provided", () => { - class TestCerebrasHandler extends CerebrasHandler { - public testGetMaxOutputTokens() { - return this.getMaxOutputTokens() - } - } - - const testHandler = new TestCerebrasHandler(mockOptions) - const result = testHandler.testGetMaxOutputTokens() - - // llama-3.3-70b has maxTokens of 16384 - expect(result).toBe(16384) - }) - }) - - describe("tool handling", () => { - const systemPrompt = "You are a helpful assistant." - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [{ type: "text" as const, text: "Hello!" }], - }, - ] - - it("should handle tool calls in streaming", async () => { - async function* mockFullStream() { - yield { - type: "tool-input-start", - id: "tool-call-1", - toolName: "read_file", - } - yield { - type: "tool-input-delta", - id: "tool-call-1", - delta: '{"path":"test.ts"}', - } - yield { - type: "tool-input-end", - id: "tool-call-1", - } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - const stream = handler.createMessage(systemPrompt, messages, { - taskId: "test-task", - tools: [ - { - type: "function", - function: { - name: "read_file", - description: "Read a file", - parameters: { - type: "object", - properties: { path: { type: "string" } }, - required: ["path"], - }, - }, - }, - ], - }) - - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const toolCallStartChunks = chunks.filter((c) => c.type === "tool_call_start") - const toolCallDeltaChunks = chunks.filter((c) => c.type === "tool_call_delta") - const toolCallEndChunks = chunks.filter((c) => c.type === "tool_call_end") - - expect(toolCallStartChunks.length).toBe(1) - expect(toolCallStartChunks[0].id).toBe("tool-call-1") - expect(toolCallStartChunks[0].name).toBe("read_file") - - expect(toolCallDeltaChunks.length).toBe(1) - expect(toolCallDeltaChunks[0].delta).toBe('{"path":"test.ts"}') - - expect(toolCallEndChunks.length).toBe(1) - expect(toolCallEndChunks[0].id).toBe("tool-call-1") - }) - - it("should ignore tool-call events to prevent duplicate tools in UI", async () => { - // tool-call events are intentionally ignored because tool-input-start/delta/end - // already provide complete tool call information. Emitting tool-call would cause - // duplicate tools in the UI for AI SDK providers (e.g., DeepSeek, Moonshot, Cerebras). - async function* mockFullStream() { - yield { - type: "tool-call", - toolCallId: "tool-call-1", - toolName: "read_file", - input: { path: "test.ts" }, - } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - const stream = handler.createMessage(systemPrompt, messages, { - taskId: "test-task", - tools: [ - { - type: "function", - function: { - name: "read_file", - description: "Read a file", - parameters: { - type: "object", - properties: { path: { type: "string" } }, - required: ["path"], - }, - }, - }, - ], - }) - - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - // tool-call events are ignored, so no tool_call chunks should be emitted - const toolCallChunks = chunks.filter((c) => c.type === "tool_call") - expect(toolCallChunks.length).toBe(0) - }) - }) -}) diff --git a/src/api/providers/__tests__/chutes.spec.ts b/src/api/providers/__tests__/chutes.spec.ts deleted file mode 100644 index 22da3500034..00000000000 --- a/src/api/providers/__tests__/chutes.spec.ts +++ /dev/null @@ -1,490 +0,0 @@ -// npx vitest run api/providers/__tests__/chutes.spec.ts - -const { mockStreamText, mockGenerateText, mockGetModels, mockGetModelsFromCache } = vi.hoisted(() => ({ - mockStreamText: vi.fn(), - mockGenerateText: vi.fn(), - mockGetModels: vi.fn(), - mockGetModelsFromCache: vi.fn(), -})) - -vi.mock("ai", async (importOriginal) => { - const actual = await importOriginal() - return { - ...actual, - streamText: mockStreamText, - generateText: mockGenerateText, - } -}) - -vi.mock("@ai-sdk/openai-compatible", () => ({ - createOpenAICompatible: vi.fn(() => { - return vi.fn((modelId: string) => ({ - modelId, - provider: "chutes", - })) - }), -})) - -vi.mock("../fetchers/modelCache", () => ({ - getModels: mockGetModels, - getModelsFromCache: mockGetModelsFromCache, -})) - -import type { Anthropic } from "@anthropic-ai/sdk" - -import { chutesDefaultModelId, chutesDefaultModelInfo, DEEP_SEEK_DEFAULT_TEMPERATURE } from "@roo-code/types" - -import { ChutesHandler } from "../chutes" - -describe("ChutesHandler", () => { - let handler: ChutesHandler - - beforeEach(() => { - vi.clearAllMocks() - mockGetModels.mockResolvedValue({ - [chutesDefaultModelId]: chutesDefaultModelInfo, - }) - mockGetModelsFromCache.mockReturnValue(undefined) - handler = new ChutesHandler({ chutesApiKey: "test-key" }) - }) - - afterEach(() => { - vi.restoreAllMocks() - }) - - describe("constructor", () => { - it("should initialize with provided options", () => { - expect(handler).toBeInstanceOf(ChutesHandler) - }) - - it("should use default model when no model ID is provided", () => { - const model = handler.getModel() - expect(model.id).toBe(chutesDefaultModelId) - }) - }) - - describe("getModel", () => { - it("should return default model when no model is specified and no cache", () => { - const model = handler.getModel() - expect(model.id).toBe(chutesDefaultModelId) - expect(model.info).toEqual( - expect.objectContaining({ - ...chutesDefaultModelInfo, - }), - ) - }) - - it("should return model info from fetched models", async () => { - const testModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsImages: false, - supportsPromptCache: false, - } - mockGetModels.mockResolvedValue({ - "some-model": testModelInfo, - }) - - const handlerWithModel = new ChutesHandler({ - apiModelId: "some-model", - chutesApiKey: "test-key", - }) - const model = await handlerWithModel.fetchModel() - expect(model.id).toBe("some-model") - expect(model.info).toEqual(expect.objectContaining(testModelInfo)) - }) - - it("should fall back to global cache when instance models are empty", () => { - const cachedInfo = { - maxTokens: 2048, - contextWindow: 64000, - supportsImages: false, - supportsPromptCache: false, - } - mockGetModelsFromCache.mockReturnValue({ - "cached-model": cachedInfo, - }) - - const handlerWithModel = new ChutesHandler({ - apiModelId: "cached-model", - chutesApiKey: "test-key", - }) - const model = handlerWithModel.getModel() - expect(model.id).toBe("cached-model") - expect(model.info).toEqual(expect.objectContaining(cachedInfo)) - }) - - it("should apply DeepSeek default temperature for R1 models", () => { - const r1Info = { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - } - mockGetModelsFromCache.mockReturnValue({ - "deepseek-ai/DeepSeek-R1-0528": r1Info, - }) - - const handlerWithModel = new ChutesHandler({ - apiModelId: "deepseek-ai/DeepSeek-R1-0528", - chutesApiKey: "test-key", - }) - const model = handlerWithModel.getModel() - expect(model.info.defaultTemperature).toBe(DEEP_SEEK_DEFAULT_TEMPERATURE) - expect(model.temperature).toBe(DEEP_SEEK_DEFAULT_TEMPERATURE) - }) - - it("should use default temperature for non-DeepSeek models", () => { - const modelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsImages: false, - supportsPromptCache: false, - } - mockGetModelsFromCache.mockReturnValue({ - "unsloth/Llama-3.3-70B-Instruct": modelInfo, - }) - - const handlerWithModel = new ChutesHandler({ - apiModelId: "unsloth/Llama-3.3-70B-Instruct", - chutesApiKey: "test-key", - }) - const model = handlerWithModel.getModel() - expect(model.info.defaultTemperature).toBe(0.5) - expect(model.temperature).toBe(0.5) - }) - }) - - describe("fetchModel", () => { - it("should fetch models and return the resolved model", async () => { - const model = await handler.fetchModel() - expect(mockGetModels).toHaveBeenCalledWith( - expect.objectContaining({ - provider: "chutes", - }), - ) - expect(model.id).toBe(chutesDefaultModelId) - }) - }) - - describe("createMessage", () => { - const systemPrompt = "You are a helpful assistant." - const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hi" }] - - it("should handle non-DeepSeek models with standard streaming", async () => { - mockGetModels.mockResolvedValue({ - "some-other-model": { maxTokens: 1024, contextWindow: 8192, supportsPromptCache: false }, - }) - - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - const handlerWithModel = new ChutesHandler({ - apiModelId: "some-other-model", - chutesApiKey: "test-key", - }) - - const stream = handlerWithModel.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks).toEqual([ - { type: "text", text: "Test response" }, - { - type: "usage", - inputTokens: 10, - outputTokens: 5, - cacheReadTokens: undefined, - reasoningTokens: undefined, - }, - ]) - }) - - it("should handle DeepSeek R1 reasoning format with TagMatcher", async () => { - mockGetModels.mockResolvedValue({ - "deepseek-ai/DeepSeek-R1-0528": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - }, - }) - - async function* mockFullStream() { - yield { type: "text-delta", text: "Thinking..." } - yield { type: "text-delta", text: "Hello" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - const handlerWithModel = new ChutesHandler({ - apiModelId: "deepseek-ai/DeepSeek-R1-0528", - chutesApiKey: "test-key", - }) - - const stream = handlerWithModel.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks).toEqual([ - { type: "reasoning", text: "Thinking..." }, - { type: "text", text: "Hello" }, - { - type: "usage", - inputTokens: 10, - outputTokens: 5, - cacheReadTokens: undefined, - reasoningTokens: undefined, - }, - ]) - }) - - it("should handle tool calls in R1 path", async () => { - mockGetModels.mockResolvedValue({ - "deepseek-ai/DeepSeek-R1-0528": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - }, - }) - - async function* mockFullStream() { - yield { type: "text-delta", text: "Let me help" } - yield { - type: "tool-input-start", - id: "call_123", - toolName: "test_tool", - } - yield { - type: "tool-input-delta", - id: "call_123", - delta: '{"arg":"value"}', - } - yield { - type: "tool-input-end", - id: "call_123", - } - } - - const mockUsage = Promise.resolve({ - inputTokens: 15, - outputTokens: 10, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - const handlerWithModel = new ChutesHandler({ - apiModelId: "deepseek-ai/DeepSeek-R1-0528", - chutesApiKey: "test-key", - }) - - const stream = handlerWithModel.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks).toContainEqual({ type: "text", text: "Let me help" }) - expect(chunks).toContainEqual({ - type: "tool_call_start", - id: "call_123", - name: "test_tool", - }) - expect(chunks).toContainEqual({ - type: "tool_call_delta", - id: "call_123", - delta: '{"arg":"value"}', - }) - expect(chunks).toContainEqual({ - type: "tool_call_end", - id: "call_123", - }) - }) - - it("should merge system prompt into first user message for R1 path", async () => { - mockGetModels.mockResolvedValue({ - "deepseek-ai/DeepSeek-R1-0528": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - }, - }) - - async function* mockFullStream() { - yield { type: "text-delta", text: "Response" } - } - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: Promise.resolve({ inputTokens: 5, outputTokens: 3 }), - }) - - const handlerWithModel = new ChutesHandler({ - apiModelId: "deepseek-ai/DeepSeek-R1-0528", - chutesApiKey: "test-key", - }) - - const stream = handlerWithModel.createMessage(systemPrompt, messages) - for await (const _ of stream) { - // consume - } - - expect(mockStreamText).toHaveBeenCalledWith( - expect.objectContaining({ - messages: expect.any(Array), - }), - ) - - const callArgs = mockStreamText.mock.calls[0][0] - expect(callArgs.system).toBeUndefined() - }) - - it("should pass system prompt separately for non-R1 path", async () => { - mockGetModels.mockResolvedValue({ - "some-model": { maxTokens: 1024, contextWindow: 8192, supportsPromptCache: false }, - }) - - async function* mockFullStream() { - yield { type: "text-delta", text: "Response" } - } - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: Promise.resolve({ inputTokens: 5, outputTokens: 3 }), - }) - - const handlerWithModel = new ChutesHandler({ - apiModelId: "some-model", - chutesApiKey: "test-key", - }) - - const stream = handlerWithModel.createMessage(systemPrompt, messages) - for await (const _ of stream) { - // consume - } - - expect(mockStreamText).toHaveBeenCalledWith( - expect.objectContaining({ - system: systemPrompt, - }), - ) - }) - - it("should include usage information from stream", async () => { - mockGetModels.mockResolvedValue({ - "some-model": { maxTokens: 1024, contextWindow: 8192, supportsPromptCache: false }, - }) - - async function* mockFullStream() { - yield { type: "text-delta", text: "Hello" } - } - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: Promise.resolve({ - inputTokens: 20, - outputTokens: 10, - }), - }) - - const handlerWithModel = new ChutesHandler({ - apiModelId: "some-model", - chutesApiKey: "test-key", - }) - - const stream = handlerWithModel.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const usageChunks = chunks.filter((c) => c.type === "usage") - expect(usageChunks).toHaveLength(1) - expect(usageChunks[0].inputTokens).toBe(20) - expect(usageChunks[0].outputTokens).toBe(10) - }) - }) - - describe("completePrompt", () => { - it("should return text from generateText", async () => { - const expectedResponse = "This is a test response from Chutes" - mockGenerateText.mockResolvedValue({ text: expectedResponse }) - - const result = await handler.completePrompt("test prompt") - expect(result).toBe(expectedResponse) - expect(mockGenerateText).toHaveBeenCalledWith( - expect.objectContaining({ - prompt: "test prompt", - }), - ) - }) - - it("should handle errors in completePrompt", async () => { - const errorMessage = "Chutes API error" - mockGenerateText.mockRejectedValue(new Error(errorMessage)) - await expect(handler.completePrompt("test prompt")).rejects.toThrow( - `Chutes completion error: ${errorMessage}`, - ) - }) - - it("should pass temperature for R1 models in completePrompt", async () => { - mockGetModels.mockResolvedValue({ - "deepseek-ai/DeepSeek-R1-0528": { - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - }, - }) - - mockGenerateText.mockResolvedValue({ text: "response" }) - - const handlerWithModel = new ChutesHandler({ - apiModelId: "deepseek-ai/DeepSeek-R1-0528", - chutesApiKey: "test-key", - }) - - await handlerWithModel.completePrompt("test prompt") - - expect(mockGenerateText).toHaveBeenCalledWith( - expect.objectContaining({ - temperature: DEEP_SEEK_DEFAULT_TEMPERATURE, - }), - ) - }) - }) - - describe("isAiSdkProvider", () => { - it("should return true", () => { - expect(handler.isAiSdkProvider()).toBe(true) - }) - }) -}) diff --git a/src/api/providers/__tests__/deepinfra.spec.ts b/src/api/providers/__tests__/deepinfra.spec.ts deleted file mode 100644 index c4a9275762a..00000000000 --- a/src/api/providers/__tests__/deepinfra.spec.ts +++ /dev/null @@ -1,386 +0,0 @@ -// npx vitest api/providers/__tests__/deepinfra.spec.ts - -import { deepInfraDefaultModelId, deepInfraDefaultModelInfo } from "@roo-code/types" - -const mockCreate = vitest.fn() -const mockWithResponse = vitest.fn() - -vitest.mock("openai", () => { - const mockConstructor = vitest.fn() - - return { - __esModule: true, - default: mockConstructor.mockImplementation(() => ({ - chat: { - completions: { - create: mockCreate.mockImplementation(() => ({ - withResponse: mockWithResponse, - })), - }, - }, - })), - } -}) - -vitest.mock("../fetchers/modelCache", () => ({ - getModels: vitest.fn().mockResolvedValue({ - [deepInfraDefaultModelId]: deepInfraDefaultModelInfo, - }), - getModelsFromCache: vitest.fn().mockReturnValue(undefined), -})) - -import OpenAI from "openai" -import { DeepInfraHandler } from "../deepinfra" - -describe("DeepInfraHandler", () => { - let handler: DeepInfraHandler - - beforeEach(() => { - vi.clearAllMocks() - mockCreate.mockClear() - mockWithResponse.mockClear() - - handler = new DeepInfraHandler({}) - }) - - it("should use the correct DeepInfra base URL", () => { - expect(OpenAI).toHaveBeenCalledWith( - expect.objectContaining({ - baseURL: "https://api.deepinfra.com/v1/openai", - }), - ) - }) - - it("should use the provided API key", () => { - vi.clearAllMocks() - - const deepInfraApiKey = "test-api-key" - new DeepInfraHandler({ deepInfraApiKey }) - - expect(OpenAI).toHaveBeenCalledWith( - expect.objectContaining({ - apiKey: deepInfraApiKey, - }), - ) - }) - - it("should return default model when no model is specified", () => { - const model = handler.getModel() - expect(model.id).toBe(deepInfraDefaultModelId) - expect(model.info).toEqual(deepInfraDefaultModelInfo) - }) - - it("createMessage should yield text content from stream", async () => { - const testContent = "This is test content" - - mockWithResponse.mockResolvedValueOnce({ - data: { - [Symbol.asyncIterator]: () => ({ - next: vi - .fn() - .mockResolvedValueOnce({ - done: false, - value: { - choices: [{ delta: { content: testContent } }], - }, - }) - .mockResolvedValueOnce({ done: true }), - }), - }, - }) - - const stream = handler.createMessage("system prompt", []) - const firstChunk = await stream.next() - - expect(firstChunk.done).toBe(false) - expect(firstChunk.value).toEqual({ - type: "text", - text: testContent, - }) - }) - - it("createMessage should yield reasoning content from stream", async () => { - const testReasoning = "Test reasoning content" - - mockWithResponse.mockResolvedValueOnce({ - data: { - [Symbol.asyncIterator]: () => ({ - next: vi - .fn() - .mockResolvedValueOnce({ - done: false, - value: { - choices: [{ delta: { reasoning_content: testReasoning } }], - }, - }) - .mockResolvedValueOnce({ done: true }), - }), - }, - }) - - const stream = handler.createMessage("system prompt", []) - const firstChunk = await stream.next() - - expect(firstChunk.done).toBe(false) - expect(firstChunk.value).toEqual({ - type: "reasoning", - text: testReasoning, - }) - }) - - it("createMessage should yield usage data from stream", async () => { - mockWithResponse.mockResolvedValueOnce({ - data: { - [Symbol.asyncIterator]: () => ({ - next: vi - .fn() - .mockResolvedValueOnce({ - done: false, - value: { - choices: [{ delta: {} }], - usage: { - prompt_tokens: 10, - completion_tokens: 20, - prompt_tokens_details: { - cache_write_tokens: 15, - cached_tokens: 5, - }, - }, - }, - }) - .mockResolvedValueOnce({ done: true }), - }), - }, - }) - - const stream = handler.createMessage("system prompt", []) - const firstChunk = await stream.next() - - expect(firstChunk.done).toBe(false) - expect(firstChunk.value).toEqual({ - type: "usage", - inputTokens: 10, - outputTokens: 20, - cacheWriteTokens: 15, - cacheReadTokens: 5, - totalCost: expect.any(Number), - }) - }) - - describe("Native Tool Calling", () => { - const testTools = [ - { - type: "function" as const, - function: { - name: "test_tool", - description: "A test tool", - parameters: { - type: "object", - properties: { - arg1: { type: "string", description: "First argument" }, - }, - required: ["arg1"], - }, - }, - }, - ] - - it("should include tools in request when model supports native tools and tools are provided", async () => { - mockWithResponse.mockResolvedValueOnce({ - data: { - [Symbol.asyncIterator]: () => ({ - async next() { - return { done: true } - }, - }), - }, - }) - - const messageGenerator = handler.createMessage("test prompt", [], { - taskId: "test-task-id", - tools: testTools, - }) - await messageGenerator.next() - - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - tools: expect.arrayContaining([ - expect.objectContaining({ - type: "function", - function: expect.objectContaining({ - name: "test_tool", - }), - }), - ]), - }), - ) - // parallel_tool_calls should be true by default when not explicitly set - const callArgs = mockCreate.mock.calls[0][0] - expect(callArgs).toHaveProperty("parallel_tool_calls", true) - }) - - it("should include tool_choice when provided", async () => { - mockWithResponse.mockResolvedValueOnce({ - data: { - [Symbol.asyncIterator]: () => ({ - async next() { - return { done: true } - }, - }), - }, - }) - - const messageGenerator = handler.createMessage("test prompt", [], { - taskId: "test-task-id", - tools: testTools, - tool_choice: "auto", - }) - await messageGenerator.next() - - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - tool_choice: "auto", - }), - ) - }) - - it("should always include tools and tool_choice in request (tools are always present after PR #10841)", async () => { - mockWithResponse.mockResolvedValueOnce({ - data: { - [Symbol.asyncIterator]: () => ({ - async next() { - return { done: true } - }, - }), - }, - }) - - const messageGenerator = handler.createMessage("test prompt", [], { - taskId: "test-task-id", - }) - await messageGenerator.next() - - const callArgs = mockCreate.mock.calls[mockCreate.mock.calls.length - 1][0] - // Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS) - expect(callArgs).toHaveProperty("tools") - expect(callArgs).toHaveProperty("tool_choice") - // parallel_tool_calls should be true by default when not explicitly set - expect(callArgs).toHaveProperty("parallel_tool_calls", true) - }) - - it("should yield tool_call_partial chunks during streaming", async () => { - mockWithResponse.mockResolvedValueOnce({ - data: { - [Symbol.asyncIterator]: () => ({ - next: vi - .fn() - .mockResolvedValueOnce({ - done: false, - value: { - choices: [ - { - delta: { - tool_calls: [ - { - index: 0, - id: "call_123", - function: { - name: "test_tool", - arguments: '{"arg1":', - }, - }, - ], - }, - }, - ], - }, - }) - .mockResolvedValueOnce({ - done: false, - value: { - choices: [ - { - delta: { - tool_calls: [ - { - index: 0, - function: { - arguments: '"value"}', - }, - }, - ], - }, - }, - ], - }, - }) - .mockResolvedValueOnce({ done: true }), - }), - }, - }) - - const stream = handler.createMessage("test prompt", [], { - taskId: "test-task-id", - tools: testTools, - }) - - const chunks = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks).toContainEqual({ - type: "tool_call_partial", - index: 0, - id: "call_123", - name: "test_tool", - arguments: '{"arg1":', - }) - - expect(chunks).toContainEqual({ - type: "tool_call_partial", - index: 0, - id: undefined, - name: undefined, - arguments: '"value"}', - }) - }) - - it("should set parallel_tool_calls based on metadata", async () => { - mockWithResponse.mockResolvedValueOnce({ - data: { - [Symbol.asyncIterator]: () => ({ - async next() { - return { done: true } - }, - }), - }, - }) - - const messageGenerator = handler.createMessage("test prompt", [], { - taskId: "test-task-id", - tools: testTools, - parallelToolCalls: true, - }) - await messageGenerator.next() - - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - parallel_tool_calls: true, - }), - ) - }) - }) - - describe("completePrompt", () => { - it("should return text from API", async () => { - const expectedResponse = "This is a test response" - mockCreate.mockResolvedValueOnce({ - choices: [{ message: { content: expectedResponse } }], - }) - - const result = await handler.completePrompt("test prompt") - expect(result).toBe(expectedResponse) - }) - }) -}) diff --git a/src/api/providers/__tests__/featherless.spec.ts b/src/api/providers/__tests__/featherless.spec.ts deleted file mode 100644 index 0223da9a63c..00000000000 --- a/src/api/providers/__tests__/featherless.spec.ts +++ /dev/null @@ -1,356 +0,0 @@ -// npx vitest run api/providers/__tests__/featherless.spec.ts - -const { mockStreamText, mockGenerateText } = vi.hoisted(() => ({ - mockStreamText: vi.fn(), - mockGenerateText: vi.fn(), -})) - -vi.mock("ai", async (importOriginal) => { - const actual = await importOriginal() - return { - ...actual, - streamText: mockStreamText, - generateText: mockGenerateText, - } -}) - -vi.mock("@ai-sdk/openai-compatible", () => ({ - createOpenAICompatible: vi.fn(() => { - return vi.fn(() => ({ - modelId: "featherless-model", - provider: "Featherless", - })) - }), -})) - -import type { Anthropic } from "@anthropic-ai/sdk" - -import { type FeatherlessModelId, featherlessDefaultModelId, featherlessModels } from "@roo-code/types" - -import type { ApiHandlerOptions } from "../../../shared/api" - -import { FeatherlessHandler } from "../featherless" - -describe("FeatherlessHandler", () => { - let handler: FeatherlessHandler - let mockOptions: ApiHandlerOptions - - beforeEach(() => { - mockOptions = { - featherlessApiKey: "test-api-key", - } - handler = new FeatherlessHandler(mockOptions) - vi.clearAllMocks() - }) - - describe("constructor", () => { - it("should initialize with provided options", () => { - expect(handler).toBeInstanceOf(FeatherlessHandler) - expect(handler.getModel().id).toBe(featherlessDefaultModelId) - }) - - it("should use specified model ID when provided", () => { - const testModelId: FeatherlessModelId = "moonshotai/Kimi-K2-Instruct" - const handlerWithModel = new FeatherlessHandler({ - apiModelId: testModelId, - featherlessApiKey: "test-api-key", - }) - expect(handlerWithModel.getModel().id).toBe(testModelId) - }) - }) - - describe("getModel", () => { - it("should return default model when no model is specified", () => { - const model = handler.getModel() - expect(model.id).toBe(featherlessDefaultModelId) - expect(model.info).toEqual(expect.objectContaining(featherlessModels[featherlessDefaultModelId])) - }) - - it("should return specified model when valid model is provided", () => { - const testModelId: FeatherlessModelId = "moonshotai/Kimi-K2-Instruct" - const handlerWithModel = new FeatherlessHandler({ - apiModelId: testModelId, - featherlessApiKey: "test-api-key", - }) - const model = handlerWithModel.getModel() - expect(model.id).toBe(testModelId) - expect(model.info).toEqual(expect.objectContaining(featherlessModels[testModelId])) - }) - - it("should use default temperature for non-DeepSeek models", () => { - const testModelId: FeatherlessModelId = "moonshotai/Kimi-K2-Instruct" - const handlerWithModel = new FeatherlessHandler({ - apiModelId: testModelId, - featherlessApiKey: "test-api-key", - }) - const model = handlerWithModel.getModel() - expect(model.temperature).toBe(0.5) - }) - - it("should include model parameters from getModelParams", () => { - const model = handler.getModel() - expect(model).toHaveProperty("temperature") - expect(model).toHaveProperty("maxTokens") - }) - }) - - describe("createMessage", () => { - const systemPrompt = "You are a helpful assistant." - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [ - { - type: "text" as const, - text: "Hello!", - }, - ], - }, - ] - - it("should handle streaming responses for non-R1 models", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks.length).toBeGreaterThan(0) - const textChunks = chunks.filter((chunk) => chunk.type === "text") - expect(textChunks).toHaveLength(1) - expect(textChunks[0].text).toBe("Test response") - }) - - it("should include usage information", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const usageChunks = chunks.filter((chunk) => chunk.type === "usage") - expect(usageChunks.length).toBeGreaterThan(0) - expect(usageChunks[0].inputTokens).toBe(10) - expect(usageChunks[0].outputTokens).toBe(5) - }) - - it("should handle reasoning format from DeepSeek-R1 models using TagMatcher", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Thinking..." } - yield { type: "text-delta", text: "Hello" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - vi.spyOn(handler, "getModel").mockReturnValue({ - id: "some-DeepSeek-R1-model", - info: { maxTokens: 1024, temperature: 0.6 }, - maxTokens: 1024, - temperature: 0.6, - } as any) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks[0]).toEqual({ type: "reasoning", text: "Thinking..." }) - expect(chunks[1]).toEqual({ type: "text", text: "Hello" }) - expect(chunks[2]).toMatchObject({ type: "usage", inputTokens: 10, outputTokens: 5 }) - }) - - it("should delegate to super.createMessage for non-DeepSeek-R1 models", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Standard response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 15, - outputTokens: 8, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - vi.spyOn(handler, "getModel").mockReturnValue({ - id: "some-other-model", - info: { maxTokens: 1024, temperature: 0.5 }, - maxTokens: 1024, - temperature: 0.5, - } as any) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks[0]).toEqual({ type: "text", text: "Standard response" }) - expect(chunks[1]).toMatchObject({ type: "usage", inputTokens: 15, outputTokens: 8 }) - }) - - it("should pass correct model to streamText for R1 path", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "response" } - } - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: Promise.resolve({ inputTokens: 0, outputTokens: 0 }), - }) - - vi.spyOn(handler, "getModel").mockReturnValue({ - id: "some-DeepSeek-R1-model", - info: { maxTokens: 2048, temperature: 0.6 }, - maxTokens: 2048, - temperature: 0.6, - } as any) - - const stream = handler.createMessage(systemPrompt, messages) - // Consume stream - for await (const _ of stream) { - // drain - } - - expect(mockStreamText).toHaveBeenCalledWith( - expect.objectContaining({ - temperature: 0.6, - }), - ) - }) - - it("should not pass system prompt to streamText for R1 path", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "response" } - } - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: Promise.resolve({ inputTokens: 0, outputTokens: 0 }), - }) - - vi.spyOn(handler, "getModel").mockReturnValue({ - id: "some-DeepSeek-R1-model", - info: { maxTokens: 2048, temperature: 0.6 }, - maxTokens: 2048, - temperature: 0.6, - } as any) - - const stream = handler.createMessage(systemPrompt, messages) - for await (const _ of stream) { - // drain - } - - const callArgs = mockStreamText.mock.calls[0][0] - expect(callArgs.system).toBeUndefined() - expect(callArgs.messages).toBeDefined() - }) - - it("should merge consecutive user messages in R1 path to avoid DeepSeek rejection", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "response" } - } - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: Promise.resolve({ inputTokens: 0, outputTokens: 0 }), - }) - - vi.spyOn(handler, "getModel").mockReturnValue({ - id: "some-DeepSeek-R1-model", - info: { maxTokens: 2048, temperature: 0.6 }, - maxTokens: 2048, - temperature: 0.6, - } as any) - - // messages starts with a user message, so after prepending the system - // prompt as a user message we'd have two consecutive user messages. - const userFirstMessages: Anthropic.Messages.MessageParam[] = [ - { role: "user", content: "Hello!" }, - { role: "assistant", content: "Hi there" }, - { role: "user", content: "Follow-up" }, - ] - - const stream = handler.createMessage(systemPrompt, userFirstMessages) - for await (const _ of stream) { - // drain - } - - const callArgs = mockStreamText.mock.calls[0][0] - const passedMessages = callArgs.messages - - // Verify no two consecutive messages share the same role - for (let i = 1; i < passedMessages.length; i++) { - expect(passedMessages[i].role).not.toBe(passedMessages[i - 1].role) - } - - // The system prompt and first user message should be merged into a single user message - expect(passedMessages[0].role).toBe("user") - expect(passedMessages[1].role).toBe("assistant") - expect(passedMessages[2].role).toBe("user") - expect(passedMessages).toHaveLength(3) - }) - }) - - describe("completePrompt", () => { - it("should complete a prompt using generateText", async () => { - mockGenerateText.mockResolvedValue({ - text: "Test completion from Featherless", - }) - - const result = await handler.completePrompt("Test prompt") - - expect(result).toBe("Test completion from Featherless") - expect(mockGenerateText).toHaveBeenCalledWith( - expect.objectContaining({ - prompt: "Test prompt", - }), - ) - }) - }) - - describe("isAiSdkProvider", () => { - it("should return true", () => { - expect(handler.isAiSdkProvider()).toBe(true) - }) - }) -}) diff --git a/src/api/providers/__tests__/groq.spec.ts b/src/api/providers/__tests__/groq.spec.ts deleted file mode 100644 index efb5712cb99..00000000000 --- a/src/api/providers/__tests__/groq.spec.ts +++ /dev/null @@ -1,578 +0,0 @@ -// npx vitest run src/api/providers/__tests__/groq.spec.ts - -// Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls -const { mockStreamText, mockGenerateText } = vi.hoisted(() => ({ - mockStreamText: vi.fn(), - mockGenerateText: vi.fn(), -})) - -vi.mock("ai", async (importOriginal) => { - const actual = await importOriginal() - return { - ...actual, - streamText: mockStreamText, - generateText: mockGenerateText, - } -}) - -vi.mock("@ai-sdk/groq", () => ({ - createGroq: vi.fn(() => { - // Return a function that returns a mock language model - return vi.fn(() => ({ - modelId: "moonshotai/kimi-k2-instruct-0905", - provider: "groq", - })) - }), -})) - -import type { Anthropic } from "@anthropic-ai/sdk" - -import { groqDefaultModelId, groqModels, type GroqModelId } from "@roo-code/types" - -import type { ApiHandlerOptions } from "../../../shared/api" - -import { GroqHandler } from "../groq" - -describe("GroqHandler", () => { - let handler: GroqHandler - let mockOptions: ApiHandlerOptions - - beforeEach(() => { - mockOptions = { - groqApiKey: "test-groq-api-key", - apiModelId: "moonshotai/kimi-k2-instruct-0905", - } - handler = new GroqHandler(mockOptions) - vi.clearAllMocks() - }) - - describe("constructor", () => { - it("should initialize with provided options", () => { - expect(handler).toBeInstanceOf(GroqHandler) - expect(handler.getModel().id).toBe(mockOptions.apiModelId) - }) - - it("should use default model ID if not provided", () => { - const handlerWithoutModel = new GroqHandler({ - ...mockOptions, - apiModelId: undefined, - }) - expect(handlerWithoutModel.getModel().id).toBe(groqDefaultModelId) - }) - }) - - describe("getModel", () => { - it("should return default model when no model is specified", () => { - const handlerWithoutModel = new GroqHandler({ - groqApiKey: "test-groq-api-key", - }) - const model = handlerWithoutModel.getModel() - expect(model.id).toBe(groqDefaultModelId) - expect(model.info).toEqual(groqModels[groqDefaultModelId]) - }) - - it("should return specified model when valid model is provided", () => { - const testModelId: GroqModelId = "llama-3.3-70b-versatile" - const handlerWithModel = new GroqHandler({ - apiModelId: testModelId, - groqApiKey: "test-groq-api-key", - }) - const model = handlerWithModel.getModel() - expect(model.id).toBe(testModelId) - expect(model.info).toEqual(groqModels[testModelId]) - }) - - it("should return model info for llama-3.1-8b-instant", () => { - const handlerWithLlama = new GroqHandler({ - ...mockOptions, - apiModelId: "llama-3.1-8b-instant", - }) - const model = handlerWithLlama.getModel() - expect(model.id).toBe("llama-3.1-8b-instant") - expect(model.info).toBeDefined() - expect(model.info.maxTokens).toBe(8192) - expect(model.info.contextWindow).toBe(131072) - expect(model.info.supportsImages).toBe(false) - expect(model.info.supportsPromptCache).toBe(false) - }) - - it("should return model info for kimi-k2 which supports prompt cache", () => { - const handlerWithKimi = new GroqHandler({ - ...mockOptions, - apiModelId: "moonshotai/kimi-k2-instruct-0905", - }) - const model = handlerWithKimi.getModel() - expect(model.id).toBe("moonshotai/kimi-k2-instruct-0905") - expect(model.info).toBeDefined() - expect(model.info.maxTokens).toBe(16384) - expect(model.info.contextWindow).toBe(262144) - expect(model.info.supportsPromptCache).toBe(true) - }) - - it("should return provided model ID with default model info if model does not exist", () => { - const handlerWithInvalidModel = new GroqHandler({ - ...mockOptions, - apiModelId: "invalid-model", - }) - const model = handlerWithInvalidModel.getModel() - expect(model.id).toBe("invalid-model") - expect(model.info).toBeDefined() - // Should use default model info - expect(model.info).toBe(groqModels[groqDefaultModelId]) - }) - - it("should include model parameters from getModelParams", () => { - const model = handler.getModel() - expect(model).toHaveProperty("temperature") - expect(model).toHaveProperty("maxTokens") - }) - }) - - describe("createMessage", () => { - const systemPrompt = "You are a helpful assistant." - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [ - { - type: "text" as const, - text: "Hello!", - }, - ], - }, - ] - - it("should handle streaming responses", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response from Groq" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - const mockProviderMetadata = Promise.resolve({}) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - providerMetadata: mockProviderMetadata, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks.length).toBeGreaterThan(0) - const textChunks = chunks.filter((chunk) => chunk.type === "text") - expect(textChunks).toHaveLength(1) - expect(textChunks[0].text).toBe("Test response from Groq") - }) - - it("should include usage information", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 20, - }) - - const mockProviderMetadata = Promise.resolve({}) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - providerMetadata: mockProviderMetadata, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const usageChunks = chunks.filter((chunk) => chunk.type === "usage") - expect(usageChunks.length).toBeGreaterThan(0) - expect(usageChunks[0].inputTokens).toBe(10) - expect(usageChunks[0].outputTokens).toBe(20) - }) - - it("should handle cached tokens in usage data from providerMetadata", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 100, - outputTokens: 50, - }) - - // Groq provides cache metrics via providerMetadata for supported models - const mockProviderMetadata = Promise.resolve({ - groq: { - promptCacheHitTokens: 30, - promptCacheMissTokens: 70, - }, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - providerMetadata: mockProviderMetadata, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const usageChunks = chunks.filter((chunk) => chunk.type === "usage") - expect(usageChunks.length).toBeGreaterThan(0) - expect(usageChunks[0].inputTokens).toBe(100) - expect(usageChunks[0].outputTokens).toBe(50) - expect(usageChunks[0].cacheReadTokens).toBe(30) - expect(usageChunks[0].cacheWriteTokens).toBe(70) - }) - - it("should handle usage with details.cachedInputTokens when providerMetadata is not available", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 100, - outputTokens: 50, - details: { - cachedInputTokens: 25, - }, - }) - - const mockProviderMetadata = Promise.resolve({}) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - providerMetadata: mockProviderMetadata, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const usageChunks = chunks.filter((chunk) => chunk.type === "usage") - expect(usageChunks.length).toBeGreaterThan(0) - expect(usageChunks[0].cacheReadTokens).toBe(25) - expect(usageChunks[0].cacheWriteTokens).toBeUndefined() - }) - - it("should pass correct temperature (0.5 default) to streamText", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test" } - } - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: Promise.resolve({ inputTokens: 0, outputTokens: 0 }), - providerMetadata: Promise.resolve({}), - }) - - const handlerWithDefaultTemp = new GroqHandler({ - groqApiKey: "test-key", - apiModelId: "llama-3.1-8b-instant", - }) - - const stream = handlerWithDefaultTemp.createMessage(systemPrompt, messages) - for await (const _ of stream) { - // consume stream - } - - expect(mockStreamText).toHaveBeenCalledWith( - expect.objectContaining({ - temperature: 0.5, - }), - ) - }) - }) - - describe("completePrompt", () => { - it("should complete a prompt using generateText", async () => { - mockGenerateText.mockResolvedValue({ - text: "Test completion from Groq", - }) - - const result = await handler.completePrompt("Test prompt") - - expect(result).toBe("Test completion from Groq") - expect(mockGenerateText).toHaveBeenCalledWith( - expect.objectContaining({ - prompt: "Test prompt", - }), - ) - }) - - it("should use default temperature in completePrompt", async () => { - mockGenerateText.mockResolvedValue({ - text: "Test completion", - }) - - await handler.completePrompt("Test prompt") - - expect(mockGenerateText).toHaveBeenCalledWith( - expect.objectContaining({ - temperature: 0.5, - }), - ) - }) - }) - - describe("processUsageMetrics", () => { - it("should correctly process usage metrics including cache information from providerMetadata", () => { - class TestGroqHandler extends GroqHandler { - public testProcessUsageMetrics(usage: any, providerMetadata?: any) { - return this.processUsageMetrics(usage, providerMetadata) - } - } - - const testHandler = new TestGroqHandler(mockOptions) - - const usage = { - inputTokens: 100, - outputTokens: 50, - } - - const providerMetadata = { - groq: { - promptCacheHitTokens: 20, - promptCacheMissTokens: 80, - }, - } - - const result = testHandler.testProcessUsageMetrics(usage, providerMetadata) - - expect(result.type).toBe("usage") - expect(result.inputTokens).toBe(100) - expect(result.outputTokens).toBe(50) - expect(result.cacheWriteTokens).toBe(80) - expect(result.cacheReadTokens).toBe(20) - }) - - it("should handle missing cache metrics gracefully", () => { - class TestGroqHandler extends GroqHandler { - public testProcessUsageMetrics(usage: any, providerMetadata?: any) { - return this.processUsageMetrics(usage, providerMetadata) - } - } - - const testHandler = new TestGroqHandler(mockOptions) - - const usage = { - inputTokens: 100, - outputTokens: 50, - } - - const result = testHandler.testProcessUsageMetrics(usage) - - expect(result.type).toBe("usage") - expect(result.inputTokens).toBe(100) - expect(result.outputTokens).toBe(50) - expect(result.cacheWriteTokens).toBeUndefined() - expect(result.cacheReadTokens).toBeUndefined() - }) - - it("should include reasoning tokens when provided", () => { - class TestGroqHandler extends GroqHandler { - public testProcessUsageMetrics(usage: any, providerMetadata?: any) { - return this.processUsageMetrics(usage, providerMetadata) - } - } - - const testHandler = new TestGroqHandler(mockOptions) - - const usage = { - inputTokens: 100, - outputTokens: 50, - details: { - reasoningTokens: 30, - }, - } - - const result = testHandler.testProcessUsageMetrics(usage) - - expect(result.reasoningTokens).toBe(30) - }) - }) - - describe("tool handling", () => { - const systemPrompt = "You are a helpful assistant." - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [{ type: "text" as const, text: "Hello!" }], - }, - ] - - it("should handle tool calls in streaming", async () => { - async function* mockFullStream() { - yield { - type: "tool-input-start", - id: "tool-call-1", - toolName: "read_file", - } - yield { - type: "tool-input-delta", - id: "tool-call-1", - delta: '{"path":"test.ts"}', - } - yield { - type: "tool-input-end", - id: "tool-call-1", - } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - const mockProviderMetadata = Promise.resolve({}) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - providerMetadata: mockProviderMetadata, - }) - - const stream = handler.createMessage(systemPrompt, messages, { - taskId: "test-task", - tools: [ - { - type: "function", - function: { - name: "read_file", - description: "Read a file", - parameters: { - type: "object", - properties: { path: { type: "string" } }, - required: ["path"], - }, - }, - }, - ], - }) - - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const toolCallStartChunks = chunks.filter((c) => c.type === "tool_call_start") - const toolCallDeltaChunks = chunks.filter((c) => c.type === "tool_call_delta") - const toolCallEndChunks = chunks.filter((c) => c.type === "tool_call_end") - - expect(toolCallStartChunks.length).toBe(1) - expect(toolCallStartChunks[0].id).toBe("tool-call-1") - expect(toolCallStartChunks[0].name).toBe("read_file") - - expect(toolCallDeltaChunks.length).toBe(1) - expect(toolCallDeltaChunks[0].delta).toBe('{"path":"test.ts"}') - - expect(toolCallEndChunks.length).toBe(1) - expect(toolCallEndChunks[0].id).toBe("tool-call-1") - }) - - it("should ignore tool-call events to prevent duplicate tools in UI", async () => { - async function* mockFullStream() { - yield { - type: "tool-call", - toolCallId: "tool-call-1", - toolName: "read_file", - input: { path: "test.ts" }, - } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - const mockProviderMetadata = Promise.resolve({}) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - providerMetadata: mockProviderMetadata, - }) - - const stream = handler.createMessage(systemPrompt, messages, { - taskId: "test-task", - tools: [ - { - type: "function", - function: { - name: "read_file", - description: "Read a file", - parameters: { - type: "object", - properties: { path: { type: "string" } }, - required: ["path"], - }, - }, - }, - ], - }) - - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - // tool-call events are ignored, so no tool_call chunks should be emitted - const toolCallChunks = chunks.filter((c) => c.type === "tool_call") - expect(toolCallChunks.length).toBe(0) - }) - }) - - describe("getMaxOutputTokens", () => { - it("should return maxTokens from model info", () => { - class TestGroqHandler extends GroqHandler { - public testGetMaxOutputTokens() { - return this.getMaxOutputTokens() - } - } - - const testHandler = new TestGroqHandler({ - ...mockOptions, - apiModelId: "llama-3.1-8b-instant", - }) - const result = testHandler.testGetMaxOutputTokens() - - // llama-3.1-8b-instant has maxTokens of 8192 - expect(result).toBe(8192) - }) - - it("should use modelMaxTokens when provided", () => { - class TestGroqHandler extends GroqHandler { - public testGetMaxOutputTokens() { - return this.getMaxOutputTokens() - } - } - - const customMaxTokens = 5000 - const testHandler = new TestGroqHandler({ - ...mockOptions, - modelMaxTokens: customMaxTokens, - }) - - const result = testHandler.testGetMaxOutputTokens() - expect(result).toBe(customMaxTokens) - }) - }) -}) diff --git a/src/api/providers/__tests__/huggingface.spec.ts b/src/api/providers/__tests__/huggingface.spec.ts deleted file mode 100644 index e7682474c1a..00000000000 --- a/src/api/providers/__tests__/huggingface.spec.ts +++ /dev/null @@ -1,553 +0,0 @@ -// npx vitest run src/api/providers/__tests__/huggingface.spec.ts - -// Use vi.hoisted to define mock functions that can be referenced in hoisted vi.mock() calls -const { mockStreamText, mockGenerateText } = vi.hoisted(() => ({ - mockStreamText: vi.fn(), - mockGenerateText: vi.fn(), -})) - -vi.mock("ai", async (importOriginal) => { - const actual = await importOriginal() - return { - ...actual, - streamText: mockStreamText, - generateText: mockGenerateText, - } -}) - -vi.mock("@ai-sdk/openai-compatible", () => ({ - createOpenAICompatible: vi.fn(() => { - // Return a function that returns a mock language model - return vi.fn(() => ({ - modelId: "meta-llama/Llama-3.3-70B-Instruct", - provider: "huggingface", - })) - }), -})) - -// Mock the fetchers -vi.mock("../fetchers/huggingface", () => ({ - getHuggingFaceModels: vi.fn(() => Promise.resolve({})), - getCachedHuggingFaceModels: vi.fn(() => ({})), -})) - -import type { Anthropic } from "@anthropic-ai/sdk" - -import type { ApiHandlerOptions } from "../../../shared/api" - -import { HuggingFaceHandler } from "../huggingface" - -describe("HuggingFaceHandler", () => { - let handler: HuggingFaceHandler - let mockOptions: ApiHandlerOptions - - beforeEach(() => { - mockOptions = { - huggingFaceApiKey: "test-huggingface-api-key", - huggingFaceModelId: "meta-llama/Llama-3.3-70B-Instruct", - } - handler = new HuggingFaceHandler(mockOptions) - vi.clearAllMocks() - }) - - describe("constructor", () => { - it("should initialize with provided options", () => { - expect(handler).toBeInstanceOf(HuggingFaceHandler) - expect(handler.getModel().id).toBe(mockOptions.huggingFaceModelId) - }) - - it("should use default model ID if not provided", () => { - const handlerWithoutModel = new HuggingFaceHandler({ - ...mockOptions, - huggingFaceModelId: undefined, - }) - expect(handlerWithoutModel.getModel().id).toBe("meta-llama/Llama-3.3-70B-Instruct") - }) - - it("should throw error if API key is not provided", () => { - expect(() => { - new HuggingFaceHandler({ - ...mockOptions, - huggingFaceApiKey: undefined, - }) - }).toThrow("Hugging Face API key is required") - }) - }) - - describe("getModel", () => { - it("should return default model when no model is specified", () => { - const handlerWithoutModel = new HuggingFaceHandler({ - huggingFaceApiKey: "test-huggingface-api-key", - }) - const model = handlerWithoutModel.getModel() - expect(model.id).toBe("meta-llama/Llama-3.3-70B-Instruct") - expect(model.info).toBeDefined() - }) - - it("should return specified model when valid model is provided", () => { - const testModelId = "mistralai/Mistral-7B-Instruct-v0.3" - const handlerWithModel = new HuggingFaceHandler({ - huggingFaceModelId: testModelId, - huggingFaceApiKey: "test-huggingface-api-key", - }) - const model = handlerWithModel.getModel() - expect(model.id).toBe(testModelId) - }) - - it("should include model parameters from getModelParams", () => { - const model = handler.getModel() - expect(model).toHaveProperty("temperature") - expect(model).toHaveProperty("maxTokens") - }) - - it("should return fallback info when model not in cache", () => { - const model = handler.getModel() - expect(model.info).toEqual( - expect.objectContaining({ - maxTokens: 8192, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - }), - ) - }) - }) - - describe("createMessage", () => { - const systemPrompt = "You are a helpful assistant." - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [ - { - type: "text" as const, - text: "Hello!", - }, - ], - }, - ] - - it("should handle streaming responses", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response from HuggingFace" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - const mockProviderMetadata = Promise.resolve({}) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - providerMetadata: mockProviderMetadata, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks.length).toBeGreaterThan(0) - const textChunks = chunks.filter((chunk) => chunk.type === "text") - expect(textChunks).toHaveLength(1) - expect(textChunks[0].text).toBe("Test response from HuggingFace") - }) - - it("should include usage information", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 20, - }) - - const mockProviderMetadata = Promise.resolve({}) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - providerMetadata: mockProviderMetadata, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const usageChunks = chunks.filter((chunk) => chunk.type === "usage") - expect(usageChunks.length).toBeGreaterThan(0) - expect(usageChunks[0].inputTokens).toBe(10) - expect(usageChunks[0].outputTokens).toBe(20) - }) - - it("should handle cached tokens in usage data from providerMetadata", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 100, - outputTokens: 50, - }) - - // HuggingFace provides cache metrics via providerMetadata for supported models - const mockProviderMetadata = Promise.resolve({ - huggingface: { - promptCacheHitTokens: 30, - promptCacheMissTokens: 70, - }, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - providerMetadata: mockProviderMetadata, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const usageChunks = chunks.filter((chunk) => chunk.type === "usage") - expect(usageChunks.length).toBeGreaterThan(0) - expect(usageChunks[0].inputTokens).toBe(100) - expect(usageChunks[0].outputTokens).toBe(50) - expect(usageChunks[0].cacheReadTokens).toBe(30) - expect(usageChunks[0].cacheWriteTokens).toBe(70) - }) - - it("should handle usage with details.cachedInputTokens when providerMetadata is not available", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 100, - outputTokens: 50, - details: { - cachedInputTokens: 25, - }, - }) - - const mockProviderMetadata = Promise.resolve({}) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - providerMetadata: mockProviderMetadata, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const usageChunks = chunks.filter((chunk) => chunk.type === "usage") - expect(usageChunks.length).toBeGreaterThan(0) - expect(usageChunks[0].cacheReadTokens).toBe(25) - expect(usageChunks[0].cacheWriteTokens).toBeUndefined() - }) - - it("should pass correct temperature (0.7 default) to streamText", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test" } - } - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: Promise.resolve({ inputTokens: 0, outputTokens: 0 }), - providerMetadata: Promise.resolve({}), - }) - - const handlerWithDefaultTemp = new HuggingFaceHandler({ - huggingFaceApiKey: "test-key", - huggingFaceModelId: "meta-llama/Llama-3.3-70B-Instruct", - }) - - const stream = handlerWithDefaultTemp.createMessage(systemPrompt, messages) - for await (const _ of stream) { - // consume stream - } - - expect(mockStreamText).toHaveBeenCalledWith( - expect.objectContaining({ - temperature: 0.7, - }), - ) - }) - - it("should use user-specified temperature over provider defaults", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test" } - } - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: Promise.resolve({ inputTokens: 0, outputTokens: 0 }), - providerMetadata: Promise.resolve({}), - }) - - const handlerWithCustomTemp = new HuggingFaceHandler({ - huggingFaceApiKey: "test-key", - huggingFaceModelId: "meta-llama/Llama-3.3-70B-Instruct", - modelTemperature: 0.7, - }) - - const stream = handlerWithCustomTemp.createMessage(systemPrompt, messages) - for await (const _ of stream) { - // consume stream - } - - // User-specified temperature should take precedence over everything - expect(mockStreamText).toHaveBeenCalledWith( - expect.objectContaining({ - temperature: 0.7, - }), - ) - }) - - it("should handle stream with multiple chunks", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Hello" } - yield { type: "text-delta", text: " world" } - } - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: Promise.resolve({ inputTokens: 5, outputTokens: 10 }), - providerMetadata: Promise.resolve({}), - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const textChunks = chunks.filter((c) => c.type === "text") - expect(textChunks[0]).toEqual({ type: "text", text: "Hello" }) - expect(textChunks[1]).toEqual({ type: "text", text: " world" }) - - const usageChunks = chunks.filter((c) => c.type === "usage") - expect(usageChunks[0]).toMatchObject({ type: "usage", inputTokens: 5, outputTokens: 10 }) - }) - - it("should handle errors with handleAiSdkError", async () => { - async function* mockFullStream(): AsyncGenerator { - yield { type: "text-delta", text: "" } // Yield something before error to satisfy lint - throw new Error("API Error") - } - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: Promise.resolve({ inputTokens: 0, outputTokens: 0 }), - providerMetadata: Promise.resolve({}), - }) - - const stream = handler.createMessage(systemPrompt, messages) - - await expect(async () => { - for await (const _ of stream) { - // consume stream - } - }).rejects.toThrow("HuggingFace: API Error") - }) - }) - - describe("completePrompt", () => { - it("should complete a prompt using generateText", async () => { - mockGenerateText.mockResolvedValue({ - text: "Test completion from HuggingFace", - }) - - const result = await handler.completePrompt("Test prompt") - - expect(result).toBe("Test completion from HuggingFace") - expect(mockGenerateText).toHaveBeenCalledWith( - expect.objectContaining({ - prompt: "Test prompt", - }), - ) - }) - - it("should use default temperature in completePrompt", async () => { - mockGenerateText.mockResolvedValue({ - text: "Test completion", - }) - - await handler.completePrompt("Test prompt") - - expect(mockGenerateText).toHaveBeenCalledWith( - expect.objectContaining({ - temperature: 0.7, - }), - ) - }) - }) - - describe("processUsageMetrics", () => { - it("should correctly process usage metrics including cache information from providerMetadata", () => { - class TestHuggingFaceHandler extends HuggingFaceHandler { - public testProcessUsageMetrics(usage: any, providerMetadata?: any) { - return this.processUsageMetrics(usage, providerMetadata) - } - } - - const testHandler = new TestHuggingFaceHandler(mockOptions) - - const usage = { - inputTokens: 100, - outputTokens: 50, - } - - const providerMetadata = { - huggingface: { - promptCacheHitTokens: 20, - promptCacheMissTokens: 80, - }, - } - - const result = testHandler.testProcessUsageMetrics(usage, providerMetadata) - - expect(result.type).toBe("usage") - expect(result.inputTokens).toBe(100) - expect(result.outputTokens).toBe(50) - expect(result.cacheWriteTokens).toBe(80) - expect(result.cacheReadTokens).toBe(20) - }) - - it("should handle missing cache metrics gracefully", () => { - class TestHuggingFaceHandler extends HuggingFaceHandler { - public testProcessUsageMetrics(usage: any, providerMetadata?: any) { - return this.processUsageMetrics(usage, providerMetadata) - } - } - - const testHandler = new TestHuggingFaceHandler(mockOptions) - - const usage = { - inputTokens: 100, - outputTokens: 50, - } - - const result = testHandler.testProcessUsageMetrics(usage) - - expect(result.type).toBe("usage") - expect(result.inputTokens).toBe(100) - expect(result.outputTokens).toBe(50) - expect(result.cacheWriteTokens).toBeUndefined() - expect(result.cacheReadTokens).toBeUndefined() - }) - - it("should include reasoning tokens when provided", () => { - class TestHuggingFaceHandler extends HuggingFaceHandler { - public testProcessUsageMetrics(usage: any, providerMetadata?: any) { - return this.processUsageMetrics(usage, providerMetadata) - } - } - - const testHandler = new TestHuggingFaceHandler(mockOptions) - - const usage = { - inputTokens: 100, - outputTokens: 50, - details: { - reasoningTokens: 30, - }, - } - - const result = testHandler.testProcessUsageMetrics(usage) - - expect(result.reasoningTokens).toBe(30) - }) - }) - - describe("tool handling", () => { - const systemPrompt = "You are a helpful assistant." - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [{ type: "text" as const, text: "Hello!" }], - }, - ] - - it("should handle tool calls in streaming", async () => { - async function* mockFullStream() { - yield { - type: "tool-input-start", - id: "tool-call-1", - toolName: "read_file", - } - yield { - type: "tool-input-delta", - id: "tool-call-1", - delta: '{"path":"test.ts"}', - } - yield { - type: "tool-input-end", - id: "tool-call-1", - } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - }) - - const mockProviderMetadata = Promise.resolve({}) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - providerMetadata: mockProviderMetadata, - }) - - const stream = handler.createMessage(systemPrompt, messages, { - taskId: "test-task", - tools: [ - { - type: "function", - function: { - name: "read_file", - description: "Read a file", - parameters: { - type: "object", - properties: { path: { type: "string" } }, - required: ["path"], - }, - }, - }, - ], - }) - - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const toolCallStartChunks = chunks.filter((c) => c.type === "tool_call_start") - const toolCallDeltaChunks = chunks.filter((c) => c.type === "tool_call_delta") - const toolCallEndChunks = chunks.filter((c) => c.type === "tool_call_end") - - expect(toolCallStartChunks.length).toBe(1) - expect(toolCallStartChunks[0].id).toBe("tool-call-1") - expect(toolCallStartChunks[0].name).toBe("read_file") - - expect(toolCallDeltaChunks.length).toBe(1) - expect(toolCallDeltaChunks[0].delta).toBe('{"path":"test.ts"}') - - expect(toolCallEndChunks.length).toBe(1) - expect(toolCallEndChunks[0].id).toBe("tool-call-1") - }) - }) -}) diff --git a/src/api/providers/__tests__/io-intelligence.spec.ts b/src/api/providers/__tests__/io-intelligence.spec.ts deleted file mode 100644 index 2978ef856cc..00000000000 --- a/src/api/providers/__tests__/io-intelligence.spec.ts +++ /dev/null @@ -1,197 +0,0 @@ -const { mockStreamText, mockGenerateText } = vi.hoisted(() => ({ - mockStreamText: vi.fn(), - mockGenerateText: vi.fn(), -})) - -vi.mock("ai", async (importOriginal) => { - const actual = await importOriginal() - return { - ...actual, - streamText: mockStreamText, - generateText: mockGenerateText, - } -}) - -vi.mock("@ai-sdk/openai-compatible", () => ({ - createOpenAICompatible: vi.fn(() => { - return vi.fn(() => ({ - modelId: "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", - provider: "IO Intelligence", - })) - }), -})) - -import type { Anthropic } from "@anthropic-ai/sdk" - -import { ioIntelligenceDefaultModelId } from "@roo-code/types" - -import type { ApiHandlerOptions } from "../../../shared/api" - -import { IOIntelligenceHandler } from "../io-intelligence" - -describe("IOIntelligenceHandler", () => { - let handler: IOIntelligenceHandler - let mockOptions: ApiHandlerOptions - - beforeEach(() => { - mockOptions = { - ioIntelligenceApiKey: "test-api-key", - ioIntelligenceModelId: "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", - modelTemperature: 0.7, - modelMaxTokens: undefined, - } as ApiHandlerOptions - handler = new IOIntelligenceHandler(mockOptions) - vi.clearAllMocks() - }) - - describe("constructor", () => { - it("should initialize with provided options", () => { - expect(handler).toBeInstanceOf(IOIntelligenceHandler) - expect(handler.getModel().id).toBe("meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8") - }) - - it("should use default model ID if not provided", () => { - const handlerWithoutModel = new IOIntelligenceHandler({ - ...mockOptions, - ioIntelligenceModelId: undefined, - } as ApiHandlerOptions) - expect(handlerWithoutModel.getModel().id).toBe(ioIntelligenceDefaultModelId) - }) - - it("should throw error when API key is missing", () => { - const optionsWithoutKey = { ...mockOptions } - delete optionsWithoutKey.ioIntelligenceApiKey - - expect(() => new IOIntelligenceHandler(optionsWithoutKey)).toThrow("IO Intelligence API key is required") - }) - }) - - describe("getModel", () => { - it("should return model info for valid model ID", () => { - const model = handler.getModel() - expect(model.id).toBe("meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8") - expect(model.info).toBeDefined() - expect(model.info.maxTokens).toBe(8192) - expect(model.info.contextWindow).toBe(430000) - expect(model.info.supportsImages).toBe(true) - expect(model.info.supportsPromptCache).toBe(false) - }) - - it("should return default model info for unknown model ID", () => { - const handlerWithUnknown = new IOIntelligenceHandler({ - ...mockOptions, - ioIntelligenceModelId: "unknown-model", - } as ApiHandlerOptions) - const model = handlerWithUnknown.getModel() - expect(model.id).toBe("unknown-model") - expect(model.info).toBeDefined() - expect(model.info.contextWindow).toBe(handler.getModel().info.contextWindow) - }) - - it("should return default model if no model ID is provided", () => { - const handlerWithoutModel = new IOIntelligenceHandler({ - ...mockOptions, - ioIntelligenceModelId: undefined, - } as ApiHandlerOptions) - const model = handlerWithoutModel.getModel() - expect(model.id).toBe(ioIntelligenceDefaultModelId) - expect(model.info).toBeDefined() - }) - - it("should include model parameters from getModelParams", () => { - const model = handler.getModel() - expect(model).toHaveProperty("temperature") - expect(model).toHaveProperty("maxTokens") - }) - }) - - describe("createMessage", () => { - const systemPrompt = "You are a helpful assistant." - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [ - { - type: "text" as const, - text: "Hello!", - }, - ], - }, - ] - - it("should handle streaming responses", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - details: {}, - raw: {}, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks.length).toBeGreaterThan(0) - const textChunks = chunks.filter((chunk) => chunk.type === "text") - expect(textChunks).toHaveLength(1) - expect(textChunks[0].text).toBe("Test response") - }) - - it("should include usage information", async () => { - async function* mockFullStream() { - yield { type: "text-delta", text: "Test response" } - } - - const mockUsage = Promise.resolve({ - inputTokens: 10, - outputTokens: 5, - details: {}, - raw: {}, - }) - - mockStreamText.mockReturnValue({ - fullStream: mockFullStream(), - usage: mockUsage, - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const usageChunks = chunks.filter((chunk) => chunk.type === "usage") - expect(usageChunks.length).toBeGreaterThan(0) - expect(usageChunks[0].inputTokens).toBe(10) - expect(usageChunks[0].outputTokens).toBe(5) - }) - }) - - describe("completePrompt", () => { - it("should complete a prompt using generateText", async () => { - mockGenerateText.mockResolvedValue({ - text: "Test completion", - }) - - const result = await handler.completePrompt("Test prompt") - - expect(result).toBe("Test completion") - expect(mockGenerateText).toHaveBeenCalledWith( - expect.objectContaining({ - prompt: "Test prompt", - }), - ) - }) - }) -}) diff --git a/src/api/providers/__tests__/unbound.spec.ts b/src/api/providers/__tests__/unbound.spec.ts deleted file mode 100644 index e95586dc6b6..00000000000 --- a/src/api/providers/__tests__/unbound.spec.ts +++ /dev/null @@ -1,549 +0,0 @@ -// npx vitest run src/api/providers/__tests__/unbound.spec.ts - -import { Anthropic } from "@anthropic-ai/sdk" - -import { ApiHandlerOptions } from "../../../shared/api" - -import { UnboundHandler } from "../unbound" - -// Mock dependencies -vitest.mock("../fetchers/modelCache", () => ({ - getModels: vitest.fn().mockImplementation(() => { - return Promise.resolve({ - "anthropic/claude-3-5-sonnet-20241022": { - maxTokens: 8192, - contextWindow: 200000, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 3, - outputPrice: 15, - cacheWritesPrice: 3.75, - cacheReadsPrice: 0.3, - description: "Claude 3.5 Sonnet", - thinking: false, - }, - "anthropic/claude-sonnet-4-5": { - maxTokens: 8192, - contextWindow: 200000, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 3, - outputPrice: 15, - cacheWritesPrice: 3.75, - cacheReadsPrice: 0.3, - description: "Claude 4.5 Sonnet", - thinking: false, - }, - "anthropic/claude-3-7-sonnet-20250219": { - maxTokens: 8192, - contextWindow: 200000, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 3, - outputPrice: 15, - cacheWritesPrice: 3.75, - cacheReadsPrice: 0.3, - description: "Claude 3.7 Sonnet", - thinking: false, - }, - "openai/gpt-4o": { - maxTokens: 4096, - contextWindow: 128000, - supportsImages: true, - supportsPromptCache: false, - inputPrice: 5, - outputPrice: 15, - description: "GPT-4o", - }, - "openai/o3-mini": { - maxTokens: 4096, - contextWindow: 128000, - supportsImages: true, - supportsPromptCache: false, - inputPrice: 1, - outputPrice: 3, - description: "O3 Mini", - }, - }) - }), - getModelsFromCache: vitest.fn().mockReturnValue(undefined), -})) - -// Mock OpenAI client -const mockCreate = vitest.fn() -const mockWithResponse = vitest.fn() - -vitest.mock("openai", () => { - return { - __esModule: true, - default: vitest.fn().mockImplementation(() => ({ - chat: { - completions: { - create: (...args: any[]) => { - const stream = { - [Symbol.asyncIterator]: async function* () { - // First chunk with content - yield { - choices: [{ delta: { content: "Test response" }, index: 0 }], - } - // Second chunk with usage data - yield { - choices: [{ delta: {}, index: 0 }], - usage: { - prompt_tokens: 10, - completion_tokens: 5, - total_tokens: 15, - }, - } - // Third chunk with cache usage data - yield { - choices: [{ delta: {}, index: 0 }], - usage: { - prompt_tokens: 8, - completion_tokens: 4, - total_tokens: 12, - cache_creation_input_tokens: 3, - cache_read_input_tokens: 2, - }, - } - }, - } - - const result = mockCreate(...args) - - if (args[0].stream) { - mockWithResponse.mockReturnValue( - Promise.resolve({ data: stream, response: { headers: new Map() } }), - ) - result.withResponse = mockWithResponse - } - - return result - }, - }, - }, - })), - } -}) - -describe("UnboundHandler", () => { - let handler: UnboundHandler - let mockOptions: ApiHandlerOptions - - beforeEach(() => { - mockOptions = { - unboundApiKey: "test-api-key", - unboundModelId: "anthropic/claude-3-5-sonnet-20241022", - } - - handler = new UnboundHandler(mockOptions) - mockCreate.mockClear() - mockWithResponse.mockClear() - - // Default mock implementation for non-streaming responses - mockCreate.mockResolvedValue({ - id: "test-completion", - choices: [ - { - message: { role: "assistant", content: "Test response" }, - finish_reason: "stop", - index: 0, - }, - ], - }) - }) - - describe("constructor", () => { - it("should initialize with provided options", async () => { - expect(handler).toBeInstanceOf(UnboundHandler) - expect((await handler.fetchModel()).id).toBe(mockOptions.unboundModelId) - }) - }) - - describe("createMessage", () => { - const systemPrompt = "You are a helpful assistant." - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: "Hello!", - }, - ] - - it("should handle streaming responses with text and usage data", async () => { - const stream = handler.createMessage(systemPrompt, messages) - const chunks: Array<{ type: string } & Record> = [] - - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks.length).toBe(3) - - // Verify text chunk - expect(chunks[0]).toEqual({ type: "text", text: "Test response" }) - - // Verify regular usage data - expect(chunks[1]).toEqual({ type: "usage", inputTokens: 10, outputTokens: 5 }) - - // Verify usage data with cache information - expect(chunks[2]).toEqual({ - type: "usage", - inputTokens: 8, - outputTokens: 4, - cacheWriteTokens: 3, - cacheReadTokens: 2, - }) - - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - model: "claude-3-5-sonnet-20241022", - messages: expect.any(Array), - stream: true, - }), - - expect.objectContaining({ - headers: { - "X-Unbound-Metadata": expect.stringContaining("roo-code"), - }, - }), - ) - }) - - it("should handle API errors", async () => { - mockCreate.mockImplementationOnce(() => { - throw new Error("API Error") - }) - - const stream = handler.createMessage(systemPrompt, messages) - const chunks = [] - - try { - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect.fail("Expected error to be thrown") - } catch (error) { - expect(error).toBeInstanceOf(Error) - expect(error.message).toBe("API Error") - } - }) - }) - - describe("completePrompt", () => { - it("should complete prompt successfully", async () => { - const result = await handler.completePrompt("Test prompt") - expect(result).toBe("Test response") - - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - model: "claude-3-5-sonnet-20241022", - messages: [{ role: "user", content: "Test prompt" }], - temperature: 0, - max_tokens: 8192, - }), - expect.objectContaining({ - headers: expect.objectContaining({ - "X-Unbound-Metadata": expect.stringContaining("roo-code"), - }), - }), - ) - }) - - it("should handle API errors", async () => { - mockCreate.mockRejectedValueOnce(new Error("API Error")) - await expect(handler.completePrompt("Test prompt")).rejects.toThrow("Unbound completion error: API Error") - }) - - it("should handle empty response", async () => { - mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: "" } }] }) - const result = await handler.completePrompt("Test prompt") - expect(result).toBe("") - }) - - it("should not set max_tokens for non-Anthropic models", async () => { - mockCreate.mockClear() - - const nonAnthropicHandler = new UnboundHandler({ - apiModelId: "openai/gpt-4o", - unboundApiKey: "test-key", - unboundModelId: "openai/gpt-4o", - }) - - await nonAnthropicHandler.completePrompt("Test prompt") - - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - model: "gpt-4o", - messages: [{ role: "user", content: "Test prompt" }], - temperature: 0, - }), - expect.objectContaining({ - headers: expect.objectContaining({ - "X-Unbound-Metadata": expect.stringContaining("roo-code"), - }), - }), - ) - - expect(mockCreate.mock.calls[0][0]).not.toHaveProperty("max_tokens") - }) - - it("should not set temperature for openai/o3-mini", async () => { - mockCreate.mockClear() - - const openaiHandler = new UnboundHandler({ - apiModelId: "openai/o3-mini", - unboundApiKey: "test-key", - unboundModelId: "openai/o3-mini", - }) - - await openaiHandler.completePrompt("Test prompt") - - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - model: "o3-mini", - messages: [{ role: "user", content: "Test prompt" }], - }), - expect.objectContaining({ - headers: expect.objectContaining({ - "X-Unbound-Metadata": expect.stringContaining("roo-code"), - }), - }), - ) - - expect(mockCreate.mock.calls[0][0]).not.toHaveProperty("temperature") - }) - }) - - describe("fetchModel", () => { - it("should return model info", async () => { - const modelInfo = await handler.fetchModel() - expect(modelInfo.id).toBe(mockOptions.unboundModelId) - expect(modelInfo.info).toBeDefined() - }) - - it("should return default model when invalid model provided", async () => { - const handlerWithInvalidModel = new UnboundHandler({ ...mockOptions, unboundModelId: "invalid/model" }) - const modelInfo = await handlerWithInvalidModel.fetchModel() - expect(modelInfo.id).toBe("anthropic/claude-sonnet-4-5") - expect(modelInfo.info).toBeDefined() - }) - }) - - describe("Native Tool Calling", () => { - const testTools = [ - { - type: "function" as const, - function: { - name: "test_tool", - description: "A test tool", - parameters: { - type: "object", - properties: { - arg1: { type: "string", description: "First argument" }, - }, - required: ["arg1"], - }, - }, - }, - ] - - it("should include tools in request when tools are provided", async () => { - mockWithResponse.mockResolvedValueOnce({ - data: { - [Symbol.asyncIterator]: () => ({ - async next() { - return { done: true } - }, - }), - }, - }) - - const messageGenerator = handler.createMessage("test prompt", [], { - taskId: "test-task-id", - tools: testTools, - }) - await messageGenerator.next() - - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - tools: expect.arrayContaining([ - expect.objectContaining({ - type: "function", - function: expect.objectContaining({ - name: "test_tool", - }), - }), - ]), - parallel_tool_calls: true, - }), - expect.objectContaining({ - headers: { - "X-Unbound-Metadata": expect.stringContaining("roo-code"), - }, - }), - ) - }) - - it("should include tool_choice when provided", async () => { - mockWithResponse.mockResolvedValueOnce({ - data: { - [Symbol.asyncIterator]: () => ({ - async next() { - return { done: true } - }, - }), - }, - }) - - const messageGenerator = handler.createMessage("test prompt", [], { - taskId: "test-task-id", - tools: testTools, - tool_choice: "auto", - }) - await messageGenerator.next() - - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - tool_choice: "auto", - }), - expect.objectContaining({ - headers: { - "X-Unbound-Metadata": expect.stringContaining("roo-code"), - }, - }), - ) - }) - - it("should always include tools and tool_choice (tools are guaranteed to be present after ALWAYS_AVAILABLE_TOOLS)", async () => { - mockWithResponse.mockResolvedValueOnce({ - data: { - [Symbol.asyncIterator]: () => ({ - async next() { - return { done: true } - }, - }), - }, - }) - - const messageGenerator = handler.createMessage("test prompt", [], { - taskId: "test-task-id", - }) - await messageGenerator.next() - - // Tools are now always present (minimum 6 from ALWAYS_AVAILABLE_TOOLS) - const callArgs = mockCreate.mock.calls[mockCreate.mock.calls.length - 1][0] - expect(callArgs).toHaveProperty("tools") - expect(callArgs).toHaveProperty("tool_choice") - expect(callArgs).toHaveProperty("parallel_tool_calls", true) - }) - - it("should yield tool_call_partial chunks during streaming", async () => { - mockWithResponse.mockResolvedValueOnce({ - data: { - [Symbol.asyncIterator]: () => ({ - next: vi - .fn() - .mockResolvedValueOnce({ - done: false, - value: { - choices: [ - { - delta: { - tool_calls: [ - { - index: 0, - id: "call_123", - function: { - name: "test_tool", - arguments: '{"arg1":', - }, - }, - ], - }, - }, - ], - }, - }) - .mockResolvedValueOnce({ - done: false, - value: { - choices: [ - { - delta: { - tool_calls: [ - { - index: 0, - function: { - arguments: '"value"}', - }, - }, - ], - }, - }, - ], - }, - }) - .mockResolvedValueOnce({ done: true }), - }), - }, - }) - - const stream = handler.createMessage("test prompt", [], { - taskId: "test-task-id", - tools: testTools, - }) - - const chunks = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks).toContainEqual({ - type: "tool_call_partial", - index: 0, - id: "call_123", - name: "test_tool", - arguments: '{"arg1":', - }) - - expect(chunks).toContainEqual({ - type: "tool_call_partial", - index: 0, - id: undefined, - name: undefined, - arguments: '"value"}', - }) - }) - - it("should set parallel_tool_calls based on metadata", async () => { - mockWithResponse.mockResolvedValueOnce({ - data: { - [Symbol.asyncIterator]: () => ({ - async next() { - return { done: true } - }, - }), - }, - }) - - const messageGenerator = handler.createMessage("test prompt", [], { - taskId: "test-task-id", - tools: testTools, - parallelToolCalls: true, - }) - await messageGenerator.next() - - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - parallel_tool_calls: true, - }), - expect.objectContaining({ - headers: { - "X-Unbound-Metadata": expect.stringContaining("roo-code"), - }, - }), - ) - }) - }) -}) diff --git a/src/api/providers/cerebras.ts b/src/api/providers/cerebras.ts deleted file mode 100644 index f6c516b7a2c..00000000000 --- a/src/api/providers/cerebras.ts +++ /dev/null @@ -1,169 +0,0 @@ -import { Anthropic } from "@anthropic-ai/sdk" -import { createCerebras } from "@ai-sdk/cerebras" -import { streamText, generateText, ToolSet } from "ai" - -import { cerebrasModels, cerebrasDefaultModelId, type CerebrasModelId, type ModelInfo } from "@roo-code/types" - -import type { ApiHandlerOptions } from "../../shared/api" - -import { - convertToAiSdkMessages, - convertToolsForAiSdk, - processAiSdkStreamPart, - mapToolChoice, - handleAiSdkError, -} from "../transform/ai-sdk" -import { ApiStream, ApiStreamUsageChunk } from "../transform/stream" -import { getModelParams } from "../transform/model-params" - -import { DEFAULT_HEADERS } from "./constants" -import { BaseProvider } from "./base-provider" -import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" - -const CEREBRAS_INTEGRATION_HEADER = "X-Cerebras-3rd-Party-Integration" -const CEREBRAS_INTEGRATION_NAME = "roocode" -const CEREBRAS_DEFAULT_TEMPERATURE = 0 - -/** - * Cerebras provider using the dedicated @ai-sdk/cerebras package. - * Provides high-speed inference powered by Wafer-Scale Engines. - */ -export class CerebrasHandler extends BaseProvider implements SingleCompletionHandler { - protected options: ApiHandlerOptions - protected provider: ReturnType - - constructor(options: ApiHandlerOptions) { - super() - this.options = options - - // Create the Cerebras provider using AI SDK - this.provider = createCerebras({ - apiKey: options.cerebrasApiKey ?? "not-provided", - headers: { - ...DEFAULT_HEADERS, - [CEREBRAS_INTEGRATION_HEADER]: CEREBRAS_INTEGRATION_NAME, - }, - }) - } - - override getModel(): { id: string; info: ModelInfo; maxTokens?: number; temperature?: number } { - const id = (this.options.apiModelId ?? cerebrasDefaultModelId) as CerebrasModelId - const info = cerebrasModels[id as keyof typeof cerebrasModels] || cerebrasModels[cerebrasDefaultModelId] - const params = getModelParams({ - format: "openai", - modelId: id, - model: info, - settings: this.options, - defaultTemperature: CEREBRAS_DEFAULT_TEMPERATURE, - }) - return { id, info, ...params } - } - - /** - * Get the language model for the configured model ID. - */ - protected getLanguageModel() { - const { id } = this.getModel() - return this.provider(id) - } - - /** - * Process usage metrics from the AI SDK response. - */ - protected processUsageMetrics(usage: { - inputTokens?: number - outputTokens?: number - details?: { - cachedInputTokens?: number - reasoningTokens?: number - } - }): ApiStreamUsageChunk { - return { - type: "usage", - inputTokens: usage.inputTokens || 0, - outputTokens: usage.outputTokens || 0, - cacheReadTokens: usage.details?.cachedInputTokens, - reasoningTokens: usage.details?.reasoningTokens, - } - } - - /** - * Get the max tokens parameter to include in the request. - */ - protected getMaxOutputTokens(): number | undefined { - const { info } = this.getModel() - return this.options.modelMaxTokens || info.maxTokens || undefined - } - - /** - * Create a message stream using the AI SDK. - */ - override async *createMessage( - systemPrompt: string, - messages: Anthropic.Messages.MessageParam[], - metadata?: ApiHandlerCreateMessageMetadata, - ): ApiStream { - const { temperature } = this.getModel() - const languageModel = this.getLanguageModel() - - // Convert messages to AI SDK format - const aiSdkMessages = convertToAiSdkMessages(messages) - - // Convert tools to OpenAI format first, then to AI SDK format - const openAiTools = this.convertToolsForOpenAI(metadata?.tools) - const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined - - // Build the request options - const requestOptions: Parameters[0] = { - model: languageModel, - system: systemPrompt, - messages: aiSdkMessages, - temperature: this.options.modelTemperature ?? temperature ?? CEREBRAS_DEFAULT_TEMPERATURE, - maxOutputTokens: this.getMaxOutputTokens(), - tools: aiSdkTools, - toolChoice: mapToolChoice(metadata?.tool_choice), - } - - // Use streamText for streaming responses - const result = streamText(requestOptions) - - try { - // Process the full stream to get all events including reasoning - for await (const part of result.fullStream) { - for (const chunk of processAiSdkStreamPart(part)) { - yield chunk - } - } - - // Yield usage metrics at the end - const usage = await result.usage - if (usage) { - yield this.processUsageMetrics(usage) - } - } catch (error) { - // Handle AI SDK errors (AI_RetryError, AI_APICallError, etc.) - throw handleAiSdkError(error, "Cerebras") - } - } - - /** - * Complete a prompt using the AI SDK generateText. - */ - async completePrompt(prompt: string): Promise { - const { temperature } = this.getModel() - const languageModel = this.getLanguageModel() - - const { text } = await generateText({ - model: languageModel, - prompt, - maxOutputTokens: this.getMaxOutputTokens(), - temperature: this.options.modelTemperature ?? temperature ?? CEREBRAS_DEFAULT_TEMPERATURE, - }) - - return text - } - - override isAiSdkProvider(): boolean { - return true - } -} diff --git a/src/api/providers/chutes.ts b/src/api/providers/chutes.ts deleted file mode 100644 index 66e1d6c9879..00000000000 --- a/src/api/providers/chutes.ts +++ /dev/null @@ -1,242 +0,0 @@ -import { Anthropic } from "@anthropic-ai/sdk" -import { streamText, generateText, LanguageModel, ToolSet } from "ai" - -import { - DEEP_SEEK_DEFAULT_TEMPERATURE, - chutesDefaultModelId, - chutesDefaultModelInfo, - type ModelInfo, - type ModelRecord, -} from "@roo-code/types" - -import type { ApiHandlerOptions } from "../../shared/api" -import { getModelMaxOutputTokens } from "../../shared/api" -import { TagMatcher } from "../../utils/tag-matcher" -import { - convertToAiSdkMessages, - convertToolsForAiSdk, - processAiSdkStreamPart, - mapToolChoice, - handleAiSdkError, -} from "../transform/ai-sdk" -import { ApiStream } from "../transform/stream" -import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" - -import { OpenAICompatibleHandler, OpenAICompatibleConfig } from "./openai-compatible" -import { getModels, getModelsFromCache } from "./fetchers/modelCache" - -export class ChutesHandler extends OpenAICompatibleHandler implements SingleCompletionHandler { - private models: ModelRecord = {} - - constructor(options: ApiHandlerOptions) { - const modelId = options.apiModelId ?? chutesDefaultModelId - - const config: OpenAICompatibleConfig = { - providerName: "chutes", - baseURL: "https://llm.chutes.ai/v1", - apiKey: options.chutesApiKey ?? "not-provided", - modelId, - modelInfo: chutesDefaultModelInfo, - } - - super(options, config) - } - - async fetchModel() { - this.models = await getModels({ provider: "chutes", apiKey: this.config.apiKey, baseUrl: this.config.baseURL }) - return this.getModel() - } - - override getModel(): { id: string; info: ModelInfo; temperature?: number } { - const id = this.options.apiModelId ?? chutesDefaultModelId - - let info: ModelInfo | undefined = this.models[id] - - if (!info) { - const cachedModels = getModelsFromCache("chutes") - if (cachedModels?.[id]) { - this.models = cachedModels - info = cachedModels[id] - } - } - - if (!info) { - const isDeepSeekR1 = chutesDefaultModelId.includes("DeepSeek-R1") - const defaultTemp = isDeepSeekR1 ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0.5 - return { - id: chutesDefaultModelId, - info: { - ...chutesDefaultModelInfo, - defaultTemperature: defaultTemp, - }, - temperature: this.options.modelTemperature ?? defaultTemp, - } - } - - const isDeepSeekR1 = id.includes("DeepSeek-R1") - const defaultTemp = isDeepSeekR1 ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0.5 - - return { - id, - info: { - ...info, - defaultTemperature: defaultTemp, - }, - temperature: this.supportsTemperature(id) ? (this.options.modelTemperature ?? defaultTemp) : undefined, - } - } - - protected override getLanguageModel(): LanguageModel { - const { id } = this.getModel() - return this.provider(id) - } - - protected override getMaxOutputTokens(): number | undefined { - const { id, info } = this.getModel() - return ( - getModelMaxOutputTokens({ - modelId: id, - model: info, - settings: this.options, - format: "openai", - }) ?? undefined - ) - } - - private supportsTemperature(modelId: string): boolean { - return !modelId.startsWith("openai/o3-mini") - } - - override async *createMessage( - systemPrompt: string, - messages: Anthropic.Messages.MessageParam[], - metadata?: ApiHandlerCreateMessageMetadata, - ): ApiStream { - const model = await this.fetchModel() - - if (model.id.includes("DeepSeek-R1")) { - yield* this.createR1Message(systemPrompt, messages, model, metadata) - } else { - yield* super.createMessage(systemPrompt, messages, metadata) - } - } - - private async *createR1Message( - systemPrompt: string, - messages: Anthropic.Messages.MessageParam[], - model: { id: string; info: ModelInfo }, - metadata?: ApiHandlerCreateMessageMetadata, - ): ApiStream { - const languageModel = this.getLanguageModel() - - const modifiedMessages = [...messages] as Anthropic.Messages.MessageParam[] - - if (modifiedMessages.length > 0 && modifiedMessages[0].role === "user") { - const first = modifiedMessages[0] - if (typeof first.content === "string") { - modifiedMessages[0] = { role: "user", content: `${systemPrompt}\n\n${first.content}` } - } else { - modifiedMessages[0] = { - role: "user", - content: [{ type: "text", text: systemPrompt }, ...first.content], - } - } - } else { - modifiedMessages.unshift({ role: "user", content: systemPrompt }) - } - - const aiSdkMessages = convertToAiSdkMessages(modifiedMessages) - - const openAiTools = this.convertToolsForOpenAI(metadata?.tools) - const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined - - const maxOutputTokens = - getModelMaxOutputTokens({ - modelId: model.id, - model: model.info, - settings: this.options, - format: "openai", - }) ?? undefined - - const temperature = this.supportsTemperature(model.id) - ? (this.options.modelTemperature ?? model.info.defaultTemperature) - : undefined - - const result = streamText({ - model: languageModel, - messages: aiSdkMessages, - temperature, - maxOutputTokens, - tools: aiSdkTools, - toolChoice: mapToolChoice(metadata?.tool_choice), - }) - - const matcher = new TagMatcher( - "think", - (chunk) => - ({ - type: chunk.matched ? "reasoning" : "text", - text: chunk.data, - }) as const, - ) - - try { - for await (const part of result.fullStream) { - if (part.type === "text-delta") { - for (const processedChunk of matcher.update(part.text)) { - yield processedChunk - } - } else { - for (const chunk of processAiSdkStreamPart(part)) { - yield chunk - } - } - } - - for (const processedChunk of matcher.final()) { - yield processedChunk - } - - const usage = await result.usage - if (usage) { - yield this.processUsageMetrics(usage) - } - } catch (error) { - throw handleAiSdkError(error, "chutes") - } - } - - override async completePrompt(prompt: string): Promise { - const model = await this.fetchModel() - const languageModel = this.getLanguageModel() - - const maxOutputTokens = - getModelMaxOutputTokens({ - modelId: model.id, - model: model.info, - settings: this.options, - format: "openai", - }) ?? undefined - - const isDeepSeekR1 = model.id.includes("DeepSeek-R1") - const defaultTemperature = isDeepSeekR1 ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0.5 - const temperature = this.supportsTemperature(model.id) - ? (this.options.modelTemperature ?? defaultTemperature) - : undefined - - try { - const { text } = await generateText({ - model: languageModel, - prompt, - maxOutputTokens, - temperature, - }) - return text - } catch (error) { - if (error instanceof Error) { - throw new Error(`Chutes completion error: ${error.message}`) - } - throw error - } - } -} diff --git a/src/api/providers/deepinfra.ts b/src/api/providers/deepinfra.ts deleted file mode 100644 index 3dc20683721..00000000000 --- a/src/api/providers/deepinfra.ts +++ /dev/null @@ -1,164 +0,0 @@ -import { Anthropic } from "@anthropic-ai/sdk" -import OpenAI from "openai" - -import { deepInfraDefaultModelId, deepInfraDefaultModelInfo } from "@roo-code/types" - -import type { ApiHandlerOptions } from "../../shared/api" -import { calculateApiCostOpenAI } from "../../shared/cost" - -import { ApiStream, ApiStreamUsageChunk } from "../transform/stream" -import { convertToOpenAiMessages } from "../transform/openai-format" - -import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" -import { RouterProvider } from "./router-provider" -import { getModelParams } from "../transform/model-params" -import { getModels } from "./fetchers/modelCache" - -export class DeepInfraHandler extends RouterProvider implements SingleCompletionHandler { - constructor(options: ApiHandlerOptions) { - super({ - options: { - ...options, - openAiHeaders: { - "X-Deepinfra-Source": "roo-code", - "X-Deepinfra-Version": `2025-08-25`, - }, - }, - name: "deepinfra", - baseURL: `${options.deepInfraBaseUrl || "https://api.deepinfra.com/v1/openai"}`, - apiKey: options.deepInfraApiKey || "not-provided", - modelId: options.deepInfraModelId, - defaultModelId: deepInfraDefaultModelId, - defaultModelInfo: deepInfraDefaultModelInfo, - }) - } - - public override async fetchModel() { - this.models = await getModels({ provider: this.name, apiKey: this.client.apiKey, baseUrl: this.client.baseURL }) - return this.getModel() - } - - override getModel() { - const id = this.options.deepInfraModelId ?? deepInfraDefaultModelId - const info = this.models[id] ?? deepInfraDefaultModelInfo - - const params = getModelParams({ - format: "openai", - modelId: id, - model: info, - settings: this.options, - defaultTemperature: 0, - }) - - return { id, info, ...params } - } - - override async *createMessage( - systemPrompt: string, - messages: Anthropic.Messages.MessageParam[], - _metadata?: ApiHandlerCreateMessageMetadata, - ): ApiStream { - // Ensure we have up-to-date model metadata - await this.fetchModel() - const { id: modelId, info, reasoningEffort: reasoning_effort } = await this.fetchModel() - let prompt_cache_key = undefined - if (info.supportsPromptCache && _metadata?.taskId) { - prompt_cache_key = _metadata.taskId - } - - const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { - model: modelId, - messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], - stream: true, - stream_options: { include_usage: true }, - reasoning_effort, - prompt_cache_key, - tools: this.convertToolsForOpenAI(_metadata?.tools), - tool_choice: _metadata?.tool_choice, - parallel_tool_calls: _metadata?.parallelToolCalls ?? true, - } as OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming - - if (this.supportsTemperature(modelId)) { - requestOptions.temperature = this.options.modelTemperature ?? 0 - } - - if (this.options.includeMaxTokens === true && info.maxTokens) { - ;(requestOptions as any).max_completion_tokens = this.options.modelMaxTokens || info.maxTokens - } - - const { data: stream } = await this.client.chat.completions.create(requestOptions).withResponse() - - let lastUsage: OpenAI.CompletionUsage | undefined - for await (const chunk of stream) { - const delta = chunk.choices[0]?.delta - - if (delta?.content) { - yield { type: "text", text: delta.content } - } - - if (delta && "reasoning_content" in delta && delta.reasoning_content) { - yield { type: "reasoning", text: (delta.reasoning_content as string | undefined) || "" } - } - - // Handle tool calls in stream - emit partial chunks for NativeToolCallParser - if (delta?.tool_calls) { - for (const toolCall of delta.tool_calls) { - yield { - type: "tool_call_partial", - index: toolCall.index, - id: toolCall.id, - name: toolCall.function?.name, - arguments: toolCall.function?.arguments, - } - } - } - - if (chunk.usage) { - lastUsage = chunk.usage - } - } - - if (lastUsage) { - yield this.processUsageMetrics(lastUsage, info) - } - } - - async completePrompt(prompt: string): Promise { - await this.fetchModel() - const { id: modelId, info } = this.getModel() - - const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { - model: modelId, - messages: [{ role: "user", content: prompt }], - } - if (this.supportsTemperature(modelId)) { - requestOptions.temperature = this.options.modelTemperature ?? 0 - } - if (this.options.includeMaxTokens === true && info.maxTokens) { - ;(requestOptions as any).max_completion_tokens = this.options.modelMaxTokens || info.maxTokens - } - - const resp = await this.client.chat.completions.create(requestOptions) - return resp.choices[0]?.message?.content || "" - } - - protected processUsageMetrics(usage: any, modelInfo?: any): ApiStreamUsageChunk { - const inputTokens = usage?.prompt_tokens || 0 - const outputTokens = usage?.completion_tokens || 0 - const cacheWriteTokens = usage?.prompt_tokens_details?.cache_write_tokens || 0 - const cacheReadTokens = usage?.prompt_tokens_details?.cached_tokens || 0 - - const { totalCost } = modelInfo - ? calculateApiCostOpenAI(modelInfo, inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens) - : { totalCost: 0 } - - return { - type: "usage", - inputTokens, - outputTokens, - cacheWriteTokens: cacheWriteTokens || undefined, - cacheReadTokens: cacheReadTokens || undefined, - totalCost, - } - } -} diff --git a/src/api/providers/doubao.ts b/src/api/providers/doubao.ts deleted file mode 100644 index 6490e422085..00000000000 --- a/src/api/providers/doubao.ts +++ /dev/null @@ -1,87 +0,0 @@ -import { OpenAiHandler } from "./openai" -import type { ApiHandlerOptions } from "../../shared/api" -import { DOUBAO_API_BASE_URL, doubaoDefaultModelId, doubaoModels } from "@roo-code/types" -import { getModelParams } from "../transform/model-params" -import { ApiStreamUsageChunk } from "../transform/stream" - -// Core types for Doubao API -interface ChatCompletionMessageParam { - role: "system" | "user" | "assistant" | "developer" - content: - | string - | Array<{ - type: "text" | "image_url" - text?: string - image_url?: { url: string } - }> -} - -interface ChatCompletionParams { - model: string - messages: ChatCompletionMessageParam[] - temperature?: number - stream?: boolean - stream_options?: { include_usage: boolean } - max_completion_tokens?: number -} - -interface ChatCompletion { - choices: Array<{ - message: { - content: string - } - }> - usage?: { - prompt_tokens: number - completion_tokens: number - } -} - -interface ChatCompletionChunk { - choices: Array<{ - delta: { - content?: string - } - }> - usage?: { - prompt_tokens: number - completion_tokens: number - } -} - -export class DoubaoHandler extends OpenAiHandler { - constructor(options: ApiHandlerOptions) { - super({ - ...options, - openAiApiKey: options.doubaoApiKey ?? "not-provided", - openAiModelId: options.apiModelId ?? doubaoDefaultModelId, - openAiBaseUrl: options.doubaoBaseUrl ?? DOUBAO_API_BASE_URL, - openAiStreamingEnabled: true, - includeMaxTokens: true, - }) - } - - override getModel() { - const id = this.options.apiModelId ?? doubaoDefaultModelId - const info = doubaoModels[id as keyof typeof doubaoModels] || doubaoModels[doubaoDefaultModelId] - const params = getModelParams({ - format: "openai", - modelId: id, - model: info, - settings: this.options, - defaultTemperature: 0, - }) - return { id, info, ...params } - } - - // Override to handle Doubao's usage metrics, including caching. - protected override processUsageMetrics(usage: any): ApiStreamUsageChunk { - return { - type: "usage", - inputTokens: usage?.prompt_tokens || 0, - outputTokens: usage?.completion_tokens || 0, - cacheWriteTokens: usage?.prompt_tokens_details?.cache_miss_tokens, - cacheReadTokens: usage?.prompt_tokens_details?.cached_tokens, - } - } -} diff --git a/src/api/providers/featherless.ts b/src/api/providers/featherless.ts deleted file mode 100644 index a3aca6538f7..00000000000 --- a/src/api/providers/featherless.ts +++ /dev/null @@ -1,140 +0,0 @@ -import { Anthropic } from "@anthropic-ai/sdk" -import { streamText } from "ai" - -import { DEEP_SEEK_DEFAULT_TEMPERATURE, featherlessDefaultModelId, featherlessModels } from "@roo-code/types" - -import type { ApiHandlerOptions } from "../../shared/api" -import { TagMatcher } from "../../utils/tag-matcher" -import { convertToAiSdkMessages, handleAiSdkError } from "../transform/ai-sdk" -import { ApiStream } from "../transform/stream" -import { getModelParams } from "../transform/model-params" - -import type { ApiHandlerCreateMessageMetadata } from "../index" -import { OpenAICompatibleHandler, OpenAICompatibleConfig } from "./openai-compatible" - -/** - * Merge consecutive Anthropic messages that share the same role. - * DeepSeek R1 does not support successive messages with the same role, - * so this is needed when the system prompt is injected as a user message - * before the existing conversation (which may also start with a user message). - */ -function mergeConsecutiveSameRoleMessages( - messages: Anthropic.Messages.MessageParam[], -): Anthropic.Messages.MessageParam[] { - if (messages.length <= 1) { - return messages - } - - const merged: Anthropic.Messages.MessageParam[] = [] - - for (const msg of messages) { - const prev = merged[merged.length - 1] - - if (prev && prev.role === msg.role) { - const prevBlocks: Anthropic.Messages.ContentBlockParam[] = - typeof prev.content === "string" ? [{ type: "text", text: prev.content }] : prev.content - const currBlocks: Anthropic.Messages.ContentBlockParam[] = - typeof msg.content === "string" ? [{ type: "text", text: msg.content }] : msg.content - - merged[merged.length - 1] = { - role: prev.role, - content: [...prevBlocks, ...currBlocks], - } - } else { - merged.push(msg) - } - } - - return merged -} - -export class FeatherlessHandler extends OpenAICompatibleHandler { - constructor(options: ApiHandlerOptions) { - const modelId = options.apiModelId ?? featherlessDefaultModelId - const modelInfo = - featherlessModels[modelId as keyof typeof featherlessModels] || featherlessModels[featherlessDefaultModelId] - - const config: OpenAICompatibleConfig = { - providerName: "Featherless", - baseURL: "https://api.featherless.ai/v1", - apiKey: options.featherlessApiKey ?? "not-provided", - modelId, - modelInfo, - modelMaxTokens: options.modelMaxTokens ?? undefined, - temperature: options.modelTemperature ?? undefined, - } - - super(options, config) - } - - override getModel() { - const id = this.options.apiModelId ?? featherlessDefaultModelId - const info = - featherlessModels[id as keyof typeof featherlessModels] || featherlessModels[featherlessDefaultModelId] - const isDeepSeekR1 = id.includes("DeepSeek-R1") - const defaultTemp = isDeepSeekR1 ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0.5 - const params = getModelParams({ - format: "openai", - modelId: id, - model: info, - settings: this.options, - defaultTemperature: defaultTemp, - }) - return { id, info, ...params } - } - - override async *createMessage( - systemPrompt: string, - messages: Anthropic.Messages.MessageParam[], - metadata?: ApiHandlerCreateMessageMetadata, - ): ApiStream { - const model = this.getModel() - - if (model.id.includes("DeepSeek-R1")) { - // R1 path: merge system prompt into user messages, use TagMatcher for tags. - // mergeConsecutiveSameRoleMessages ensures no two successive messages share the - // same role (e.g. the injected system-as-user + original first user message). - const r1Messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: systemPrompt }, ...messages] - const aiSdkMessages = convertToAiSdkMessages(mergeConsecutiveSameRoleMessages(r1Messages)) - - const result = streamText({ - model: this.getLanguageModel(), - messages: aiSdkMessages, - temperature: model.temperature ?? 0, - maxOutputTokens: this.getMaxOutputTokens(), - }) - - const matcher = new TagMatcher( - "think", - (chunk) => - ({ - type: chunk.matched ? "reasoning" : "text", - text: chunk.data, - }) as const, - ) - - try { - for await (const part of result.fullStream) { - if (part.type === "text-delta") { - for (const processedChunk of matcher.update(part.text)) { - yield processedChunk - } - } - } - - for (const processedChunk of matcher.final()) { - yield processedChunk - } - - const usage = await result.usage - if (usage) { - yield this.processUsageMetrics(usage) - } - } catch (error) { - throw handleAiSdkError(error, "Featherless") - } - } else { - yield* super.createMessage(systemPrompt, messages, metadata) - } - } -} diff --git a/src/api/providers/fetchers/__tests__/chutes.spec.ts b/src/api/providers/fetchers/__tests__/chutes.spec.ts deleted file mode 100644 index 009cf0493f2..00000000000 --- a/src/api/providers/fetchers/__tests__/chutes.spec.ts +++ /dev/null @@ -1,342 +0,0 @@ -// Mocks must come first, before imports -vi.mock("axios") - -import type { Mock } from "vitest" -import type { ModelInfo } from "@roo-code/types" -import axios from "axios" -import { getChutesModels } from "../chutes" -import { chutesModels } from "@roo-code/types" - -const mockedAxios = axios as typeof axios & { - get: Mock -} - -describe("getChutesModels", () => { - beforeEach(() => { - vi.clearAllMocks() - }) - - it("should fetch and parse models successfully", async () => { - const mockResponse = { - data: { - data: [ - { - id: "test/new-model", - object: "model", - owned_by: "test", - created: 1234567890, - context_length: 128000, - max_model_len: 8192, - input_modalities: ["text"], - }, - ], - }, - } - - mockedAxios.get.mockResolvedValue(mockResponse) - - const models = await getChutesModels("test-api-key") - - expect(mockedAxios.get).toHaveBeenCalledWith( - "https://llm.chutes.ai/v1/models", - expect.objectContaining({ - headers: expect.objectContaining({ - Authorization: "Bearer test-api-key", - }), - }), - ) - - expect(models["test/new-model"]).toEqual({ - maxTokens: 8192, - contextWindow: 128000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Chutes AI model: test/new-model", - }) - }) - - it("should override hardcoded models with dynamic API data", async () => { - // Find any hardcoded model - const [modelId] = Object.entries(chutesModels)[0] - - const mockResponse = { - data: { - data: [ - { - id: modelId, - object: "model", - owned_by: "test", - created: 1234567890, - context_length: 200000, // Different from hardcoded - max_model_len: 10000, // Different from hardcoded - input_modalities: ["text", "image"], - }, - ], - }, - } - - mockedAxios.get.mockResolvedValue(mockResponse) - - const models = await getChutesModels("test-api-key") - - // Dynamic values should override hardcoded - expect(models[modelId]).toBeDefined() - expect(models[modelId].contextWindow).toBe(200000) - expect(models[modelId].maxTokens).toBe(10000) - expect(models[modelId].supportsImages).toBe(true) - }) - - it("should return hardcoded models when API returns empty", async () => { - const mockResponse = { - data: { - data: [], - }, - } - - mockedAxios.get.mockResolvedValue(mockResponse) - - const models = await getChutesModels("test-api-key") - - // Should still have hardcoded models - expect(Object.keys(models).length).toBeGreaterThan(0) - expect(models).toEqual(expect.objectContaining(chutesModels)) - }) - - it("should return hardcoded models on API error", async () => { - mockedAxios.get.mockRejectedValue(new Error("Network error")) - - const models = await getChutesModels("test-api-key") - - // Should still have hardcoded models - expect(Object.keys(models).length).toBeGreaterThan(0) - expect(models).toEqual(chutesModels) - }) - - it("should work without API key", async () => { - const mockResponse = { - data: { - data: [], - }, - } - - mockedAxios.get.mockResolvedValue(mockResponse) - - const models = await getChutesModels() - - expect(mockedAxios.get).toHaveBeenCalledWith( - "https://llm.chutes.ai/v1/models", - expect.objectContaining({ - headers: expect.not.objectContaining({ - Authorization: expect.anything(), - }), - }), - ) - - expect(Object.keys(models).length).toBeGreaterThan(0) - }) - - it("should detect image support from input_modalities", async () => { - const mockResponse = { - data: { - data: [ - { - id: "test/image-model", - object: "model", - owned_by: "test", - created: 1234567890, - context_length: 128000, - max_model_len: 8192, - input_modalities: ["text", "image"], - }, - ], - }, - } - - mockedAxios.get.mockResolvedValue(mockResponse) - - const models = await getChutesModels("test-api-key") - - expect(models["test/image-model"].supportsImages).toBe(true) - }) - - it("should accept supported_features containing tools", async () => { - const mockResponse = { - data: { - data: [ - { - id: "test/tools-model", - object: "model", - owned_by: "test", - created: 1234567890, - context_length: 128000, - max_model_len: 8192, - input_modalities: ["text"], - supported_features: ["json_mode", "tools", "reasoning"], - }, - ], - }, - } - - mockedAxios.get.mockResolvedValue(mockResponse) - - const models = await getChutesModels("test-api-key") - - expect(models["test/tools-model"]).toBeDefined() - expect(models["test/tools-model"].contextWindow).toBe(128000) - }) - - it("should accept supported_features without tools", async () => { - const mockResponse = { - data: { - data: [ - { - id: "test/no-tools-model", - object: "model", - owned_by: "test", - created: 1234567890, - context_length: 128000, - max_model_len: 8192, - input_modalities: ["text"], - supported_features: ["json_mode", "reasoning"], - }, - ], - }, - } - - mockedAxios.get.mockResolvedValue(mockResponse) - - const models = await getChutesModels("test-api-key") - - expect(models["test/no-tools-model"]).toBeDefined() - expect(models["test/no-tools-model"].contextWindow).toBe(128000) - }) - - it("should skip empty objects in API response and still process valid models", async () => { - const mockResponse = { - data: { - data: [ - { - id: "test/valid-model", - object: "model", - owned_by: "test", - created: 1234567890, - context_length: 128000, - max_model_len: 8192, - input_modalities: ["text"], - }, - {}, // Empty object - should be skipped - { - id: "test/another-valid-model", - object: "model", - context_length: 64000, - max_model_len: 4096, - }, - ], - }, - } - - mockedAxios.get.mockResolvedValue(mockResponse) - - const models = await getChutesModels("test-api-key") - - // Valid models should be processed - expect(models["test/valid-model"]).toBeDefined() - expect(models["test/valid-model"].contextWindow).toBe(128000) - expect(models["test/another-valid-model"]).toBeDefined() - expect(models["test/another-valid-model"].contextWindow).toBe(64000) - }) - - it("should skip models without id field", async () => { - const mockResponse = { - data: { - data: [ - { - // Missing id field - object: "model", - context_length: 128000, - max_model_len: 8192, - }, - { - id: "test/valid-model", - context_length: 64000, - max_model_len: 4096, - }, - ], - }, - } - - mockedAxios.get.mockResolvedValue(mockResponse) - - const models = await getChutesModels("test-api-key") - - // Only the valid model should be added - expect(models["test/valid-model"]).toBeDefined() - // Hardcoded models should still exist - expect(Object.keys(models).length).toBeGreaterThan(1) - }) - - it("should calculate maxTokens fallback when max_model_len is missing", async () => { - const mockResponse = { - data: { - data: [ - { - id: "test/no-max-len-model", - object: "model", - context_length: 100000, - // max_model_len is missing - input_modalities: ["text"], - }, - ], - }, - } - - mockedAxios.get.mockResolvedValue(mockResponse) - - const models = await getChutesModels("test-api-key") - - // Should calculate maxTokens as 20% of contextWindow - expect(models["test/no-max-len-model"]).toBeDefined() - expect(models["test/no-max-len-model"].maxTokens).toBe(20000) // 100000 * 0.2 - expect(models["test/no-max-len-model"].contextWindow).toBe(100000) - }) - - it("should gracefully handle response with mixed valid and invalid items", async () => { - const consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {}) - - const mockResponse = { - data: { - data: [ - { - id: "test/valid-1", - context_length: 128000, - max_model_len: 8192, - }, - {}, // Empty - will be skipped - null, // Null - will be skipped - { - id: "", // Empty string id - will be skipped - context_length: 64000, - }, - { - id: "test/valid-2", - context_length: 256000, - max_model_len: 16384, - supported_features: ["tools"], - }, - ], - }, - } - - mockedAxios.get.mockResolvedValue(mockResponse) - - const models = await getChutesModels("test-api-key") - - // Both valid models should be processed - expect(models["test/valid-1"]).toBeDefined() - expect(models["test/valid-2"]).toBeDefined() - - consoleErrorSpy.mockRestore() - }) -}) diff --git a/src/api/providers/fetchers/__tests__/modelCache.spec.ts b/src/api/providers/fetchers/__tests__/modelCache.spec.ts index 3c73b2a2725..60a39fa15f7 100644 --- a/src/api/providers/fetchers/__tests__/modelCache.spec.ts +++ b/src/api/providers/fetchers/__tests__/modelCache.spec.ts @@ -41,8 +41,6 @@ vi.mock("fs", () => ({ vi.mock("../litellm") vi.mock("../openrouter") vi.mock("../requesty") -vi.mock("../unbound") -vi.mock("../io-intelligence") // Mock ContextProxy with a simple static instance vi.mock("../../../core/config/ContextProxy", () => ({ @@ -63,18 +61,12 @@ import { getModels, getModelsFromCache } from "../modelCache" import { getLiteLLMModels } from "../litellm" import { getOpenRouterModels } from "../openrouter" import { getRequestyModels } from "../requesty" -import { getUnboundModels } from "../unbound" -import { getIOIntelligenceModels } from "../io-intelligence" const mockGetLiteLLMModels = getLiteLLMModels as Mock const mockGetOpenRouterModels = getOpenRouterModels as Mock const mockGetRequestyModels = getRequestyModels as Mock -const mockGetUnboundModels = getUnboundModels as Mock -const mockGetIOIntelligenceModels = getIOIntelligenceModels as Mock const DUMMY_REQUESTY_KEY = "requesty-key-for-testing" -const DUMMY_UNBOUND_KEY = "unbound-key-for-testing" -const DUMMY_IOINTELLIGENCE_KEY = "io-intelligence-key-for-testing" describe("getModels with new GetModelsOptions", () => { beforeEach(() => { @@ -136,40 +128,6 @@ describe("getModels with new GetModelsOptions", () => { expect(result).toEqual(mockModels) }) - it("calls getUnboundModels with optional API key", async () => { - const mockModels = { - "unbound/model": { - maxTokens: 4096, - contextWindow: 8192, - supportsPromptCache: false, - description: "Unbound model", - }, - } - mockGetUnboundModels.mockResolvedValue(mockModels) - - const result = await getModels({ provider: "unbound", apiKey: DUMMY_UNBOUND_KEY }) - - expect(mockGetUnboundModels).toHaveBeenCalledWith(DUMMY_UNBOUND_KEY) - expect(result).toEqual(mockModels) - }) - - it("calls IOIntelligenceModels for IO-Intelligence provider", async () => { - const mockModels = { - "io-intelligence/model": { - maxTokens: 4096, - contextWindow: 8192, - supportsPromptCache: false, - description: "IO Intelligence Model", - }, - } - mockGetIOIntelligenceModels.mockResolvedValue(mockModels) - - const result = await getModels({ provider: "io-intelligence", apiKey: DUMMY_IOINTELLIGENCE_KEY }) - - expect(mockGetIOIntelligenceModels).toHaveBeenCalled() - expect(result).toEqual(mockModels) - }) - it("handles errors and re-throws them", async () => { const expectedError = new Error("LiteLLM connection failed") mockGetLiteLLMModels.mockRejectedValue(expectedError) diff --git a/src/api/providers/fetchers/chutes.ts b/src/api/providers/fetchers/chutes.ts deleted file mode 100644 index d79a2c80b08..00000000000 --- a/src/api/providers/fetchers/chutes.ts +++ /dev/null @@ -1,89 +0,0 @@ -import axios from "axios" -import { z } from "zod" - -import { type ModelInfo, chutesModels } from "@roo-code/types" - -import { DEFAULT_HEADERS } from "../constants" - -// Chutes models endpoint follows OpenAI /models shape with additional fields. -// All fields are optional to allow graceful handling of incomplete API responses. -const ChutesModelSchema = z.object({ - id: z.string().optional(), - object: z.literal("model").optional(), - owned_by: z.string().optional(), - created: z.number().optional(), - context_length: z.number().optional(), - max_model_len: z.number().optional(), - input_modalities: z.array(z.string()).optional(), - supported_features: z.array(z.string()).optional(), -}) - -const ChutesModelsResponseSchema = z.object({ data: z.array(ChutesModelSchema) }) - -type ChutesModelsResponse = z.infer - -export async function getChutesModels(apiKey?: string): Promise> { - const headers: Record = { ...DEFAULT_HEADERS } - - if (apiKey) { - headers["Authorization"] = `Bearer ${apiKey}` - } - - const url = "https://llm.chutes.ai/v1/models" - - // Start with hardcoded models as the base. - const models: Record = { ...chutesModels } - - try { - const response = await axios.get(url, { headers }) - const result = ChutesModelsResponseSchema.safeParse(response.data) - - // Graceful fallback: use parsed data if valid, otherwise fall back to raw response data. - // This mirrors the OpenRouter pattern for handling API responses with some invalid items. - const data = result.success ? result.data.data : response.data?.data - - if (!result.success) { - console.error(`Error parsing Chutes models response: ${JSON.stringify(result.error.format(), null, 2)}`) - } - - if (!data || !Array.isArray(data)) { - console.error("Chutes models response missing data array") - return models - } - - for (const m of data) { - // Skip items missing required fields (e.g., empty objects from API) - if (!m || typeof m.id !== "string" || !m.id) { - continue - } - - const contextWindow = - typeof m.context_length === "number" && Number.isFinite(m.context_length) ? m.context_length : undefined - const maxModelLen = - typeof m.max_model_len === "number" && Number.isFinite(m.max_model_len) ? m.max_model_len : undefined - - // Skip models without valid context window information - if (!contextWindow) { - continue - } - - const info: ModelInfo = { - maxTokens: maxModelLen ?? Math.ceil(contextWindow * 0.2), - contextWindow, - supportsImages: (m.input_modalities || []).includes("image"), - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: `Chutes AI model: ${m.id}`, - } - - // Union: dynamic models override hardcoded ones if they have the same ID. - models[m.id] = info - } - } catch (error) { - console.error(`Error fetching Chutes models: ${error instanceof Error ? error.message : String(error)}`) - // On error, still return hardcoded models. - } - - return models -} diff --git a/src/api/providers/fetchers/deepinfra.ts b/src/api/providers/fetchers/deepinfra.ts deleted file mode 100644 index f38daff8224..00000000000 --- a/src/api/providers/fetchers/deepinfra.ts +++ /dev/null @@ -1,71 +0,0 @@ -import axios from "axios" -import { z } from "zod" - -import { type ModelInfo } from "@roo-code/types" - -import { DEFAULT_HEADERS } from "../constants" - -// DeepInfra models endpoint follows OpenAI /models shape with an added metadata object. - -const DeepInfraModelSchema = z.object({ - id: z.string(), - object: z.literal("model").optional(), - owned_by: z.string().optional(), - created: z.number().optional(), - root: z.string().optional(), - metadata: z - .object({ - description: z.string().optional(), - context_length: z.number().optional(), - max_tokens: z.number().optional(), - tags: z.array(z.string()).optional(), // e.g., ["vision", "prompt_cache"] - pricing: z - .object({ - input_tokens: z.number().optional(), - output_tokens: z.number().optional(), - cache_read_tokens: z.number().optional(), - }) - .optional(), - }) - .optional(), -}) - -const DeepInfraModelsResponseSchema = z.object({ data: z.array(DeepInfraModelSchema) }) - -export async function getDeepInfraModels( - apiKey?: string, - baseUrl: string = "https://api.deepinfra.com/v1/openai", -): Promise> { - const headers: Record = { ...DEFAULT_HEADERS } - if (apiKey) headers["Authorization"] = `Bearer ${apiKey}` - - const url = `${baseUrl.replace(/\/$/, "")}/models` - const models: Record = {} - - const response = await axios.get(url, { headers }) - const parsed = DeepInfraModelsResponseSchema.safeParse(response.data) - const data = parsed.success ? parsed.data.data : response.data?.data || [] - - for (const m of data as Array>) { - const meta = m.metadata || {} - const tags = meta.tags || [] - - const contextWindow = typeof meta.context_length === "number" ? meta.context_length : 8192 - const maxTokens = typeof meta.max_tokens === "number" ? meta.max_tokens : Math.ceil(contextWindow * 0.2) - - const info: ModelInfo = { - maxTokens, - contextWindow, - supportsImages: tags.includes("vision"), - supportsPromptCache: tags.includes("prompt_cache"), - inputPrice: meta.pricing?.input_tokens, - outputPrice: meta.pricing?.output_tokens, - cacheReadsPrice: meta.pricing?.cache_read_tokens, - description: meta.description, - } - - models[m.id] = info - } - - return models -} diff --git a/src/api/providers/fetchers/huggingface.ts b/src/api/providers/fetchers/huggingface.ts deleted file mode 100644 index 16963edc756..00000000000 --- a/src/api/providers/fetchers/huggingface.ts +++ /dev/null @@ -1,252 +0,0 @@ -import axios from "axios" -import { z } from "zod" - -import { - type ModelInfo, - type ModelRecord, - HUGGINGFACE_API_URL, - HUGGINGFACE_CACHE_DURATION, - HUGGINGFACE_DEFAULT_MAX_TOKENS, - HUGGINGFACE_DEFAULT_CONTEXT_WINDOW, -} from "@roo-code/types" - -const huggingFaceProviderSchema = z.object({ - provider: z.string(), - status: z.enum(["live", "staging", "error"]), - supports_tools: z.boolean().optional(), - supports_structured_output: z.boolean().optional(), - context_length: z.number().optional(), - pricing: z - .object({ - input: z.number(), - output: z.number(), - }) - .optional(), -}) - -/** - * Represents a provider that can serve a HuggingFace model. - * - * @property provider - The provider identifier (e.g., "sambanova", "together") - * @property status - The current status of the provider - * @property supports_tools - Whether the provider supports tool/function calling - * @property supports_structured_output - Whether the provider supports structured output - * @property context_length - The maximum context length supported by this provider - * @property pricing - The pricing information for input/output tokens - */ -export type HuggingFaceProvider = z.infer - -const huggingFaceModelSchema = z.object({ - id: z.string(), - object: z.literal("model"), - created: z.number(), - owned_by: z.string(), - providers: z.array(huggingFaceProviderSchema), -}) - -/** - * Represents a HuggingFace model available through the router API - * - * @property id - The unique identifier of the model - * @property object - The object type (always "model") - * @property created - Unix timestamp of when the model was created - * @property owned_by - The organization that owns the model - * @property providers - List of providers that can serve this model - */ -export type HuggingFaceModel = z.infer - -const huggingFaceApiResponseSchema = z.object({ - object: z.string(), - data: z.array(huggingFaceModelSchema), -}) - -type HuggingFaceApiResponse = z.infer - -interface CacheEntry { - data: ModelRecord - rawModels?: HuggingFaceModel[] - timestamp: number -} - -let cache: CacheEntry | null = null - -/** - * Parse a HuggingFace model into ModelInfo format. - * - * @param model - The HuggingFace model to parse - * @param provider - Optional specific provider to use for capabilities - * @returns ModelInfo object compatible with the application's model system - */ -function parseHuggingFaceModel(model: HuggingFaceModel, provider?: HuggingFaceProvider): ModelInfo { - // Use provider-specific values if available, otherwise find first provider with values. - const contextLength = - provider?.context_length || - model.providers.find((p) => p.context_length)?.context_length || - HUGGINGFACE_DEFAULT_CONTEXT_WINDOW - - const pricing = provider?.pricing || model.providers.find((p) => p.pricing)?.pricing - - // Include provider name in description if specific provider is given. - const description = provider ? `${model.id} via ${provider.provider}` : `${model.id} via HuggingFace` - - return { - maxTokens: Math.min(contextLength, HUGGINGFACE_DEFAULT_MAX_TOKENS), - contextWindow: contextLength, - supportsImages: false, // HuggingFace API doesn't provide this info yet. - supportsPromptCache: false, - inputPrice: pricing?.input, - outputPrice: pricing?.output, - description, - } -} - -/** - * Fetches available models from HuggingFace - * - * @returns A promise that resolves to a record of model IDs to model info - * @throws Will throw an error if the request fails - */ -export async function getHuggingFaceModels(): Promise { - const now = Date.now() - - if (cache && now - cache.timestamp < HUGGINGFACE_CACHE_DURATION) { - return cache.data - } - - const models: ModelRecord = {} - - try { - const response = await axios.get(HUGGINGFACE_API_URL, { - headers: { - "Upgrade-Insecure-Requests": "1", - "Sec-Fetch-Dest": "document", - "Sec-Fetch-Mode": "navigate", - "Sec-Fetch-Site": "none", - "Sec-Fetch-User": "?1", - Priority: "u=0, i", - Pragma: "no-cache", - "Cache-Control": "no-cache", - }, - timeout: 10000, - }) - - const result = huggingFaceApiResponseSchema.safeParse(response.data) - - if (!result.success) { - console.error("HuggingFace models response validation failed:", result.error.format()) - throw new Error("Invalid response format from HuggingFace API") - } - - const validModels = result.data.data.filter((model) => model.providers.length > 0) - - for (const model of validModels) { - // Add the base model. - models[model.id] = parseHuggingFaceModel(model) - - // Add provider-specific variants for all live providers. - for (const provider of model.providers) { - if (provider.status === "live") { - const providerKey = `${model.id}:${provider.provider}` - const providerModel = parseHuggingFaceModel(model, provider) - - // Always add provider variants to show all available providers. - models[providerKey] = providerModel - } - } - } - - cache = { data: models, rawModels: validModels, timestamp: now } - - return models - } catch (error) { - console.error("Error fetching HuggingFace models:", error) - - if (cache) { - return cache.data - } - - if (axios.isAxiosError(error)) { - if (error.response) { - throw new Error( - `Failed to fetch HuggingFace models: ${error.response.status} ${error.response.statusText}`, - ) - } else if (error.request) { - throw new Error( - "Failed to fetch HuggingFace models: No response from server. Check your internet connection.", - ) - } - } - - throw new Error( - `Failed to fetch HuggingFace models: ${error instanceof Error ? error.message : "Unknown error"}`, - ) - } -} - -/** - * Get cached models without making an API request. - */ -export function getCachedHuggingFaceModels(): ModelRecord | null { - return cache?.data || null -} - -/** - * Get cached raw models for UI display. - */ -export function getCachedRawHuggingFaceModels(): HuggingFaceModel[] | null { - return cache?.rawModels || null -} - -export function clearHuggingFaceCache(): void { - cache = null -} - -export interface HuggingFaceModelsResponse { - models: HuggingFaceModel[] - cached: boolean - timestamp: number -} - -export async function getHuggingFaceModelsWithMetadata(): Promise { - try { - // First, trigger the fetch to populate cache. - await getHuggingFaceModels() - - // Get the raw models from cache. - const cachedRawModels = getCachedRawHuggingFaceModels() - - if (cachedRawModels) { - return { - models: cachedRawModels, - cached: true, - timestamp: Date.now(), - } - } - - // If no cached raw models, fetch directly from API. - const response = await axios.get(HUGGINGFACE_API_URL, { - headers: { - "Upgrade-Insecure-Requests": "1", - "Sec-Fetch-Dest": "document", - "Sec-Fetch-Mode": "navigate", - "Sec-Fetch-Site": "none", - "Sec-Fetch-User": "?1", - Priority: "u=0, i", - Pragma: "no-cache", - "Cache-Control": "no-cache", - }, - timeout: 10000, - }) - - const models = response.data?.data || [] - - return { - models, - cached: false, - timestamp: Date.now(), - } - } catch (error) { - console.error("Failed to get HuggingFace models:", error) - return { models: [], cached: false, timestamp: Date.now() } - } -} diff --git a/src/api/providers/fetchers/io-intelligence.ts b/src/api/providers/fetchers/io-intelligence.ts deleted file mode 100644 index a0ea5dedae7..00000000000 --- a/src/api/providers/fetchers/io-intelligence.ts +++ /dev/null @@ -1,158 +0,0 @@ -import axios from "axios" -import { z } from "zod" - -import { type ModelInfo, type ModelRecord, IO_INTELLIGENCE_CACHE_DURATION } from "@roo-code/types" - -const ioIntelligenceModelSchema = z.object({ - id: z.string(), - object: z.literal("model"), - created: z.number(), - owned_by: z.string(), - root: z.string().nullable().optional(), - parent: z.string().nullable().optional(), - max_model_len: z.number().nullable().optional(), - permission: z.array( - z.object({ - id: z.string(), - object: z.literal("model_permission"), - created: z.number(), - allow_create_engine: z.boolean(), - allow_sampling: z.boolean(), - allow_logprobs: z.boolean(), - allow_search_indices: z.boolean(), - allow_view: z.boolean(), - allow_fine_tuning: z.boolean(), - organization: z.string(), - group: z.string().nullable(), - is_blocking: z.boolean(), - }), - ), -}) - -export type IOIntelligenceModel = z.infer - -const ioIntelligenceApiResponseSchema = z.object({ - object: z.literal("list"), - data: z.array(ioIntelligenceModelSchema), -}) - -type IOIntelligenceApiResponse = z.infer - -interface CacheEntry { - data: ModelRecord - timestamp: number -} - -let cache: CacheEntry | null = null - -/** - * Model context length mapping based on the documentation - * 1 - */ -const MODEL_CONTEXT_LENGTHS: Record = { - "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": 430000, - "deepseek-ai/DeepSeek-R1-0528": 128000, - "Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar": 106000, - "openai/gpt-oss-120b": 131072, -} - -const VISION_MODELS = new Set([ - "Qwen/Qwen2.5-VL-32B-Instruct", - "meta-llama/Llama-3.2-90B-Vision-Instruct", - "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", -]) - -function parseIOIntelligenceModel(model: IOIntelligenceModel): ModelInfo { - const contextLength = MODEL_CONTEXT_LENGTHS[model.id] || 8192 - // Cap maxTokens at 32k for very large context windows, or 20% of context length, whichever is smaller. - const maxTokens = Math.min(contextLength, Math.ceil(contextLength * 0.2), 32768) - const supportsImages = VISION_MODELS.has(model.id) - - return { - maxTokens, - contextWindow: contextLength, - supportsImages, - supportsPromptCache: false, - description: `${model.id} via IO Intelligence`, - } -} - -/** - * Fetches available models from IO Intelligence - * 1 - */ -export async function getIOIntelligenceModels(apiKey?: string): Promise { - const now = Date.now() - - if (cache && now - cache.timestamp < IO_INTELLIGENCE_CACHE_DURATION) { - return cache.data - } - - const models: ModelRecord = {} - - try { - const headers: Record = { - "Content-Type": "application/json", - } - - if (apiKey) { - headers.Authorization = `Bearer ${apiKey}` - } else { - console.error("IO Intelligence API key is required") - throw new Error("IO Intelligence API key is required") - } - - const response = await axios.get( - "https://api.intelligence.io.solutions/api/v1/models", - { - headers, - timeout: 10_000, - }, - ) - - const result = ioIntelligenceApiResponseSchema.safeParse(response.data) - - if (!result.success) { - console.error("IO Intelligence models response validation failed:", result.error.format()) - throw new Error("Invalid response format from IO Intelligence API") - } - - for (const model of result.data.data) { - models[model.id] = parseIOIntelligenceModel(model) - } - - cache = { data: models, timestamp: now } - - return models - } catch (error) { - console.error("Error fetching IO Intelligence models:", error) - - if (cache) { - return cache.data - } - - if (axios.isAxiosError(error)) { - if (error.response) { - throw new Error( - `Failed to fetch IO Intelligence models: ${error.response.status} ${error.response.statusText}`, - ) - } else if (error.request) { - throw new Error( - "Failed to fetch IO Intelligence models: No response from server. Check your internet connection.", - ) - } - } - - throw new Error( - `Failed to fetch IO Intelligence models: ${error instanceof Error ? error.message : "Unknown error"}`, - ) - } -} - -export function getCachedIOIntelligenceModels(): ModelRecord | null { - return cache?.data || null -} - -export function clearIOIntelligenceCache(): void { - cache = null -} diff --git a/src/api/providers/fetchers/modelCache.ts b/src/api/providers/fetchers/modelCache.ts index cb5cb094148..fd213dc93af 100644 --- a/src/api/providers/fetchers/modelCache.ts +++ b/src/api/providers/fetchers/modelCache.ts @@ -19,16 +19,11 @@ import { fileExistsAtPath } from "../../../utils/fs" import { getOpenRouterModels } from "./openrouter" import { getVercelAiGatewayModels } from "./vercel-ai-gateway" import { getRequestyModels } from "./requesty" -import { getUnboundModels } from "./unbound" import { getLiteLLMModels } from "./litellm" import { GetModelsOptions } from "../../../shared/api" import { getOllamaModels } from "./ollama" import { getLMStudioModels } from "./lmstudio" -import { getIOIntelligenceModels } from "./io-intelligence" -import { getDeepInfraModels } from "./deepinfra" -import { getHuggingFaceModels } from "./huggingface" import { getRooModels } from "./roo" -import { getChutesModels } from "./chutes" const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 }) @@ -73,10 +68,6 @@ async function fetchModelsFromProvider(options: GetModelsOptions): Promise { const publicProviders: Array<{ provider: RouterName; options: GetModelsOptions }> = [ { provider: "openrouter", options: { provider: "openrouter" } }, { provider: "vercel-ai-gateway", options: { provider: "vercel-ai-gateway" } }, - { provider: "chutes", options: { provider: "chutes" } }, ] // Refresh each provider in background (fire and forget) diff --git a/src/api/providers/fetchers/unbound.ts b/src/api/providers/fetchers/unbound.ts deleted file mode 100644 index 354c0fde58a..00000000000 --- a/src/api/providers/fetchers/unbound.ts +++ /dev/null @@ -1,52 +0,0 @@ -import axios from "axios" - -import type { ModelInfo } from "@roo-code/types" - -export async function getUnboundModels(apiKey?: string | null): Promise> { - const models: Record = {} - - try { - const headers: Record = {} - - if (apiKey) { - headers["Authorization"] = `Bearer ${apiKey}` - } - - const response = await axios.get("https://api.getunbound.ai/models", { headers }) - - if (response.data) { - const rawModels: Record = response.data - - for (const [modelId, model] of Object.entries(rawModels)) { - const modelInfo: ModelInfo = { - maxTokens: model?.maxTokens ? parseInt(model.maxTokens) : undefined, - contextWindow: model?.contextWindow ? parseInt(model.contextWindow) : 0, - supportsImages: model?.supportsImages ?? false, - supportsPromptCache: model?.supportsPromptCaching ?? false, - inputPrice: model?.inputTokenPrice ? parseFloat(model.inputTokenPrice) : undefined, - outputPrice: model?.outputTokenPrice ? parseFloat(model.outputTokenPrice) : undefined, - cacheWritesPrice: model?.cacheWritePrice ? parseFloat(model.cacheWritePrice) : undefined, - cacheReadsPrice: model?.cacheReadPrice ? parseFloat(model.cacheReadPrice) : undefined, - } - - switch (true) { - case modelId.startsWith("anthropic/"): - // Set max tokens to 8192 for supported Anthropic models - if (modelInfo.maxTokens !== 4096) { - modelInfo.maxTokens = 8192 - } - break - default: - break - } - - models[modelId] = modelInfo - } - } - } catch (error) { - console.error(`Error fetching Unbound models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`) - throw new Error(`Failed to fetch Unbound models: ${error instanceof Error ? error.message : "Unknown error"}`) - } - - return models -} diff --git a/src/api/providers/groq.ts b/src/api/providers/groq.ts deleted file mode 100644 index 12bd6b4023f..00000000000 --- a/src/api/providers/groq.ts +++ /dev/null @@ -1,181 +0,0 @@ -import { Anthropic } from "@anthropic-ai/sdk" -import { createGroq } from "@ai-sdk/groq" -import { streamText, generateText, ToolSet } from "ai" - -import { groqModels, groqDefaultModelId, type ModelInfo } from "@roo-code/types" - -import type { ApiHandlerOptions } from "../../shared/api" - -import { - convertToAiSdkMessages, - convertToolsForAiSdk, - processAiSdkStreamPart, - mapToolChoice, - handleAiSdkError, -} from "../transform/ai-sdk" -import { ApiStream, ApiStreamUsageChunk } from "../transform/stream" -import { getModelParams } from "../transform/model-params" - -import { DEFAULT_HEADERS } from "./constants" -import { BaseProvider } from "./base-provider" -import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" - -const GROQ_DEFAULT_TEMPERATURE = 0.5 - -/** - * Groq provider using the dedicated @ai-sdk/groq package. - * Provides native support for reasoning models and prompt caching. - */ -export class GroqHandler extends BaseProvider implements SingleCompletionHandler { - protected options: ApiHandlerOptions - protected provider: ReturnType - - constructor(options: ApiHandlerOptions) { - super() - this.options = options - - // Create the Groq provider using AI SDK - this.provider = createGroq({ - baseURL: "https://api.groq.com/openai/v1", - apiKey: options.groqApiKey ?? "not-provided", - headers: DEFAULT_HEADERS, - }) - } - - override getModel(): { id: string; info: ModelInfo; maxTokens?: number; temperature?: number } { - const id = this.options.apiModelId ?? groqDefaultModelId - const info = groqModels[id as keyof typeof groqModels] || groqModels[groqDefaultModelId] - const params = getModelParams({ - format: "openai", - modelId: id, - model: info, - settings: this.options, - defaultTemperature: GROQ_DEFAULT_TEMPERATURE, - }) - return { id, info, ...params } - } - - /** - * Get the language model for the configured model ID. - */ - protected getLanguageModel() { - const { id } = this.getModel() - return this.provider(id) - } - - /** - * Process usage metrics from the AI SDK response, including Groq's cache metrics. - * Groq provides cache hit/miss info via providerMetadata for supported models. - */ - protected processUsageMetrics( - usage: { - inputTokens?: number - outputTokens?: number - details?: { - cachedInputTokens?: number - reasoningTokens?: number - } - }, - providerMetadata?: { - groq?: { - promptCacheHitTokens?: number - promptCacheMissTokens?: number - } - }, - ): ApiStreamUsageChunk { - // Extract cache metrics from Groq's providerMetadata - const cacheReadTokens = providerMetadata?.groq?.promptCacheHitTokens ?? usage.details?.cachedInputTokens - const cacheWriteTokens = providerMetadata?.groq?.promptCacheMissTokens - - return { - type: "usage", - inputTokens: usage.inputTokens || 0, - outputTokens: usage.outputTokens || 0, - cacheReadTokens, - cacheWriteTokens, - reasoningTokens: usage.details?.reasoningTokens, - } - } - - /** - * Get the max tokens parameter to include in the request. - */ - protected getMaxOutputTokens(): number | undefined { - const { info } = this.getModel() - return this.options.modelMaxTokens || info.maxTokens || undefined - } - - /** - * Create a message stream using the AI SDK. - * Groq supports reasoning for models like qwen/qwen3-32b via reasoningFormat: 'parsed'. - */ - override async *createMessage( - systemPrompt: string, - messages: Anthropic.Messages.MessageParam[], - metadata?: ApiHandlerCreateMessageMetadata, - ): ApiStream { - const { temperature } = this.getModel() - const languageModel = this.getLanguageModel() - - // Convert messages to AI SDK format - const aiSdkMessages = convertToAiSdkMessages(messages) - - // Convert tools to OpenAI format first, then to AI SDK format - const openAiTools = this.convertToolsForOpenAI(metadata?.tools) - const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined - - // Build the request options - const requestOptions: Parameters[0] = { - model: languageModel, - system: systemPrompt, - messages: aiSdkMessages, - temperature: this.options.modelTemperature ?? temperature ?? GROQ_DEFAULT_TEMPERATURE, - maxOutputTokens: this.getMaxOutputTokens(), - tools: aiSdkTools, - toolChoice: mapToolChoice(metadata?.tool_choice), - } - - // Use streamText for streaming responses - const result = streamText(requestOptions) - - try { - // Process the full stream to get all events including reasoning - for await (const part of result.fullStream) { - for (const chunk of processAiSdkStreamPart(part)) { - yield chunk - } - } - - // Yield usage metrics at the end, including cache metrics from providerMetadata - const usage = await result.usage - const providerMetadata = await result.providerMetadata - if (usage) { - yield this.processUsageMetrics(usage, providerMetadata as any) - } - } catch (error) { - // Handle AI SDK errors (AI_RetryError, AI_APICallError, etc.) - throw handleAiSdkError(error, "Groq") - } - } - - /** - * Complete a prompt using the AI SDK generateText. - */ - async completePrompt(prompt: string): Promise { - const { temperature } = this.getModel() - const languageModel = this.getLanguageModel() - - const { text } = await generateText({ - model: languageModel, - prompt, - maxOutputTokens: this.getMaxOutputTokens(), - temperature: this.options.modelTemperature ?? temperature ?? GROQ_DEFAULT_TEMPERATURE, - }) - - return text - } - - override isAiSdkProvider(): boolean { - return true - } -} diff --git a/src/api/providers/huggingface.ts b/src/api/providers/huggingface.ts deleted file mode 100644 index 79daa95c77c..00000000000 --- a/src/api/providers/huggingface.ts +++ /dev/null @@ -1,215 +0,0 @@ -import { Anthropic } from "@anthropic-ai/sdk" -import { createOpenAICompatible } from "@ai-sdk/openai-compatible" -import { streamText, generateText, ToolSet } from "ai" - -import type { ModelRecord, ModelInfo } from "@roo-code/types" - -import type { ApiHandlerOptions } from "../../shared/api" - -import { - convertToAiSdkMessages, - convertToolsForAiSdk, - processAiSdkStreamPart, - mapToolChoice, - handleAiSdkError, -} from "../transform/ai-sdk" -import { ApiStream, ApiStreamUsageChunk } from "../transform/stream" -import { getModelParams } from "../transform/model-params" - -import { DEFAULT_HEADERS } from "./constants" -import { BaseProvider } from "./base-provider" -import { getHuggingFaceModels, getCachedHuggingFaceModels } from "./fetchers/huggingface" -import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" - -const HUGGINGFACE_DEFAULT_TEMPERATURE = 0.7 - -/** - * HuggingFace provider using @ai-sdk/openai-compatible for OpenAI-compatible API. - * Uses HuggingFace's OpenAI-compatible endpoint to enable tool message support. - * @see https://github.com/vercel/ai/issues/10766 - Workaround for tool messages not supported in @ai-sdk/huggingface - */ -export class HuggingFaceHandler extends BaseProvider implements SingleCompletionHandler { - protected options: ApiHandlerOptions - protected provider: ReturnType - private modelCache: ModelRecord | null = null - - constructor(options: ApiHandlerOptions) { - super() - this.options = options - - if (!this.options.huggingFaceApiKey) { - throw new Error("Hugging Face API key is required") - } - - // Create an OpenAI-compatible provider pointing to HuggingFace's /v1 endpoint - // This fixes "tool messages not supported" error - the HuggingFace SDK doesn't - // properly handle function_call_output format, but OpenAI SDK does - this.provider = createOpenAICompatible({ - name: "huggingface", - baseURL: "https://router.huggingface.co/v1", - apiKey: this.options.huggingFaceApiKey, - headers: DEFAULT_HEADERS, - }) - - // Try to get cached models first - this.modelCache = getCachedHuggingFaceModels() - - // Fetch models asynchronously - this.fetchModels() - } - - private async fetchModels() { - try { - this.modelCache = await getHuggingFaceModels() - } catch (error) { - console.error("Failed to fetch HuggingFace models:", error) - } - } - - override getModel(): { id: string; info: ModelInfo; maxTokens?: number; temperature?: number } { - const id = this.options.huggingFaceModelId || "meta-llama/Llama-3.3-70B-Instruct" - - // Try to get model info from cache - const cachedInfo = this.modelCache?.[id] - - const info: ModelInfo = cachedInfo || { - maxTokens: 8192, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - } - - const params = getModelParams({ - format: "openai", - modelId: id, - model: info, - settings: this.options, - defaultTemperature: HUGGINGFACE_DEFAULT_TEMPERATURE, - }) - - return { id, info, ...params } - } - - /** - * Get the language model for the configured model ID. - */ - protected getLanguageModel() { - const { id } = this.getModel() - return this.provider(id) - } - - /** - * Process usage metrics from the AI SDK response. - */ - protected processUsageMetrics( - usage: { - inputTokens?: number - outputTokens?: number - details?: { - cachedInputTokens?: number - reasoningTokens?: number - } - }, - providerMetadata?: { - huggingface?: { - promptCacheHitTokens?: number - promptCacheMissTokens?: number - } - }, - ): ApiStreamUsageChunk { - // Extract cache metrics from HuggingFace's providerMetadata if available - const cacheReadTokens = providerMetadata?.huggingface?.promptCacheHitTokens ?? usage.details?.cachedInputTokens - const cacheWriteTokens = providerMetadata?.huggingface?.promptCacheMissTokens - - return { - type: "usage", - inputTokens: usage.inputTokens || 0, - outputTokens: usage.outputTokens || 0, - cacheReadTokens, - cacheWriteTokens, - reasoningTokens: usage.details?.reasoningTokens, - } - } - - /** - * Get the max tokens parameter to include in the request. - */ - protected getMaxOutputTokens(): number | undefined { - const { info } = this.getModel() - return this.options.modelMaxTokens || info.maxTokens || undefined - } - - /** - * Create a message stream using the AI SDK. - */ - override async *createMessage( - systemPrompt: string, - messages: Anthropic.Messages.MessageParam[], - metadata?: ApiHandlerCreateMessageMetadata, - ): ApiStream { - const { temperature } = this.getModel() - const languageModel = this.getLanguageModel() - - // Convert messages to AI SDK format - const aiSdkMessages = convertToAiSdkMessages(messages) - - // Convert tools to OpenAI format first, then to AI SDK format - const openAiTools = this.convertToolsForOpenAI(metadata?.tools) - const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined - - // Build the request options - const requestOptions: Parameters[0] = { - model: languageModel, - system: systemPrompt, - messages: aiSdkMessages, - temperature: this.options.modelTemperature ?? temperature ?? HUGGINGFACE_DEFAULT_TEMPERATURE, - maxOutputTokens: this.getMaxOutputTokens(), - tools: aiSdkTools, - toolChoice: mapToolChoice(metadata?.tool_choice), - } - - // Use streamText for streaming responses - const result = streamText(requestOptions) - - try { - // Process the full stream to get all events - for await (const part of result.fullStream) { - // Use the processAiSdkStreamPart utility to convert stream parts - for (const chunk of processAiSdkStreamPart(part)) { - yield chunk - } - } - - // Yield usage metrics at the end, including cache metrics from providerMetadata - const usage = await result.usage - const providerMetadata = await result.providerMetadata - if (usage) { - yield this.processUsageMetrics(usage, providerMetadata as any) - } - } catch (error) { - // Handle AI SDK errors (AI_RetryError, AI_APICallError, etc.) - throw handleAiSdkError(error, "HuggingFace") - } - } - - /** - * Complete a prompt using the AI SDK generateText. - */ - async completePrompt(prompt: string): Promise { - const { temperature } = this.getModel() - const languageModel = this.getLanguageModel() - - const { text } = await generateText({ - model: languageModel, - prompt, - maxOutputTokens: this.getMaxOutputTokens(), - temperature: this.options.modelTemperature ?? temperature ?? HUGGINGFACE_DEFAULT_TEMPERATURE, - }) - - return text - } - - override isAiSdkProvider(): boolean { - return true - } -} diff --git a/src/api/providers/index.ts b/src/api/providers/index.ts index cf49f75f189..51eafc200d9 100644 --- a/src/api/providers/index.ts +++ b/src/api/providers/index.ts @@ -1,16 +1,10 @@ export { AnthropicVertexHandler } from "./anthropic-vertex" export { AnthropicHandler } from "./anthropic" export { AwsBedrockHandler } from "./bedrock" -export { CerebrasHandler } from "./cerebras" -export { ChutesHandler } from "./chutes" export { DeepSeekHandler } from "./deepseek" -export { DoubaoHandler } from "./doubao" export { MoonshotHandler } from "./moonshot" export { FakeAIHandler } from "./fake-ai" export { GeminiHandler } from "./gemini" -export { GroqHandler } from "./groq" -export { HuggingFaceHandler } from "./huggingface" -export { IOIntelligenceHandler } from "./io-intelligence" export { LiteLLMHandler } from "./lite-llm" export { LmStudioHandler } from "./lm-studio" export { MistralHandler } from "./mistral" @@ -23,15 +17,12 @@ export { OpenRouterHandler } from "./openrouter" export { QwenCodeHandler } from "./qwen-code" export { RequestyHandler } from "./requesty" export { SambaNovaHandler } from "./sambanova" -export { UnboundHandler } from "./unbound" export { VertexHandler } from "./vertex" export { VsCodeLmHandler } from "./vscode-lm" export { XAIHandler } from "./xai" export { ZAiHandler } from "./zai" export { FireworksHandler } from "./fireworks" export { RooHandler } from "./roo" -export { FeatherlessHandler } from "./featherless" export { VercelAiGatewayHandler } from "./vercel-ai-gateway" -export { DeepInfraHandler } from "./deepinfra" export { MiniMaxHandler } from "./minimax" export { BasetenHandler } from "./baseten" diff --git a/src/api/providers/io-intelligence.ts b/src/api/providers/io-intelligence.ts deleted file mode 100644 index 11b8afe5c4e..00000000000 --- a/src/api/providers/io-intelligence.ts +++ /dev/null @@ -1,62 +0,0 @@ -import { - ioIntelligenceDefaultModelId, - ioIntelligenceModels, - type IOIntelligenceModelId, - type ModelInfo, -} from "@roo-code/types" - -import type { ApiHandlerOptions } from "../../shared/api" - -import { getModelParams } from "../transform/model-params" - -import { OpenAICompatibleHandler, type OpenAICompatibleConfig } from "./openai-compatible" - -export class IOIntelligenceHandler extends OpenAICompatibleHandler { - constructor(options: ApiHandlerOptions) { - if (!options.ioIntelligenceApiKey) { - throw new Error("IO Intelligence API key is required") - } - - const modelId = options.ioIntelligenceModelId ?? ioIntelligenceDefaultModelId - const modelInfo: ModelInfo = ioIntelligenceModels[modelId as IOIntelligenceModelId] ?? - ioIntelligenceModels[ioIntelligenceDefaultModelId] ?? { - maxTokens: 8192, - contextWindow: 128000, - supportsImages: false, - supportsPromptCache: false, - } - - const config: OpenAICompatibleConfig = { - providerName: "IO Intelligence", - baseURL: "https://api.intelligence.io.solutions/api/v1", - apiKey: options.ioIntelligenceApiKey, - modelId, - modelInfo, - modelMaxTokens: options.modelMaxTokens ?? undefined, - temperature: options.modelTemperature ?? 0.7, - } - - super(options, config) - } - - override getModel() { - const modelId = this.options.ioIntelligenceModelId ?? ioIntelligenceDefaultModelId - const modelInfo: ModelInfo = ioIntelligenceModels[modelId as IOIntelligenceModelId] ?? - ioIntelligenceModels[ioIntelligenceDefaultModelId] ?? { - maxTokens: 8192, - contextWindow: 128000, - supportsImages: false, - supportsPromptCache: false, - } - - const params = getModelParams({ - format: "openai", - modelId, - model: modelInfo, - settings: this.options, - defaultTemperature: 0.7, - }) - - return { id: modelId, info: modelInfo, ...params } - } -} diff --git a/src/api/providers/unbound.ts b/src/api/providers/unbound.ts deleted file mode 100644 index ba144f6e1b7..00000000000 --- a/src/api/providers/unbound.ts +++ /dev/null @@ -1,208 +0,0 @@ -import { Anthropic } from "@anthropic-ai/sdk" -import OpenAI from "openai" - -import { unboundDefaultModelId, unboundDefaultModelInfo } from "@roo-code/types" - -import type { ApiHandlerOptions } from "../../shared/api" - -import { ApiStream, ApiStreamUsageChunk } from "../transform/stream" -import { convertToOpenAiMessages } from "../transform/openai-format" -import { addCacheBreakpoints as addAnthropicCacheBreakpoints } from "../transform/caching/anthropic" -import { addCacheBreakpoints as addGeminiCacheBreakpoints } from "../transform/caching/gemini" -import { addCacheBreakpoints as addVertexCacheBreakpoints } from "../transform/caching/vertex" - -import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" -import { RouterProvider } from "./router-provider" -import { getModelParams } from "../transform/model-params" -import { getModels } from "./fetchers/modelCache" - -const ORIGIN_APP = "roo-code" - -const DEFAULT_HEADERS = { - "X-Unbound-Metadata": JSON.stringify({ labels: [{ key: "app", value: "roo-code" }] }), -} - -interface UnboundUsage extends OpenAI.CompletionUsage { - cache_creation_input_tokens?: number - cache_read_input_tokens?: number -} - -type UnboundChatCompletionCreateParamsStreaming = OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming & { - unbound_metadata: { - originApp: string - taskId?: string - mode?: string - } -} - -type UnboundChatCompletionCreateParamsNonStreaming = OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming & { - unbound_metadata: { - originApp: string - } -} - -export class UnboundHandler extends RouterProvider implements SingleCompletionHandler { - constructor(options: ApiHandlerOptions) { - super({ - options, - name: "unbound", - baseURL: "https://api.getunbound.ai/v1", - apiKey: options.unboundApiKey, - modelId: options.unboundModelId, - defaultModelId: unboundDefaultModelId, - defaultModelInfo: unboundDefaultModelInfo, - }) - } - - public override async fetchModel() { - this.models = await getModels({ provider: this.name, apiKey: this.client.apiKey, baseUrl: this.client.baseURL }) - return this.getModel() - } - - override getModel() { - const requestedId = this.options.unboundModelId ?? unboundDefaultModelId - const modelExists = this.models[requestedId] - const id = modelExists ? requestedId : unboundDefaultModelId - const info = modelExists ? this.models[requestedId] : unboundDefaultModelInfo - - const params = getModelParams({ - format: "openai", - modelId: id, - model: info, - settings: this.options, - defaultTemperature: 0, - }) - - return { id, info, ...params } - } - - override async *createMessage( - systemPrompt: string, - messages: Anthropic.Messages.MessageParam[], - metadata?: ApiHandlerCreateMessageMetadata, - ): ApiStream { - // Ensure we have up-to-date model metadata - await this.fetchModel() - const { id: modelId, info } = this.getModel() - - const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [ - { role: "system", content: systemPrompt }, - ...convertToOpenAiMessages(messages), - ] - - if (info.supportsPromptCache) { - if (modelId.startsWith("google/")) { - addGeminiCacheBreakpoints(systemPrompt, openAiMessages) - } else if (modelId.startsWith("anthropic/")) { - addAnthropicCacheBreakpoints(systemPrompt, openAiMessages) - } - } - // Custom models from Vertex AI (no configuration) need to be handled differently. - if (modelId.startsWith("vertex-ai/google.") || modelId.startsWith("vertex-ai/anthropic.")) { - addVertexCacheBreakpoints(messages) - } - - // Required by Anthropic; other providers default to max tokens allowed. - let maxTokens: number | undefined - - if (modelId.startsWith("anthropic/")) { - maxTokens = info.maxTokens ?? undefined - } - - const requestOptions: UnboundChatCompletionCreateParamsStreaming = { - model: modelId.split("/")[1], - max_tokens: maxTokens, - messages: openAiMessages, - stream: true, - stream_options: { include_usage: true }, - unbound_metadata: { - originApp: ORIGIN_APP, - taskId: metadata?.taskId, - mode: metadata?.mode, - }, - tools: this.convertToolsForOpenAI(metadata?.tools), - tool_choice: metadata?.tool_choice, - parallel_tool_calls: metadata?.parallelToolCalls ?? true, - } - - if (this.supportsTemperature(modelId)) { - requestOptions.temperature = this.options.modelTemperature ?? 0 - } - - const { data: completion } = await this.client.chat.completions - .create(requestOptions, { headers: DEFAULT_HEADERS }) - .withResponse() - - for await (const chunk of completion) { - const delta = chunk.choices[0]?.delta - const usage = chunk.usage as UnboundUsage - - if (delta?.content) { - yield { type: "text", text: delta.content } - } - - // Handle tool calls in stream - emit partial chunks for NativeToolCallParser - if (delta?.tool_calls) { - for (const toolCall of delta.tool_calls) { - yield { - type: "tool_call_partial", - index: toolCall.index, - id: toolCall.id, - name: toolCall.function?.name, - arguments: toolCall.function?.arguments, - } - } - } - - if (usage) { - const usageData: ApiStreamUsageChunk = { - type: "usage", - inputTokens: usage.prompt_tokens || 0, - outputTokens: usage.completion_tokens || 0, - } - - // Only add cache tokens if they exist. - if (usage.cache_creation_input_tokens) { - usageData.cacheWriteTokens = usage.cache_creation_input_tokens - } - - if (usage.cache_read_input_tokens) { - usageData.cacheReadTokens = usage.cache_read_input_tokens - } - - yield usageData - } - } - } - - async completePrompt(prompt: string): Promise { - const { id: modelId, info } = await this.fetchModel() - - try { - const requestOptions: UnboundChatCompletionCreateParamsNonStreaming = { - model: modelId.split("/")[1], - messages: [{ role: "user", content: prompt }], - unbound_metadata: { - originApp: ORIGIN_APP, - }, - } - - if (this.supportsTemperature(modelId)) { - requestOptions.temperature = this.options.modelTemperature ?? 0 - } - - if (modelId.startsWith("anthropic/")) { - requestOptions.max_tokens = info.maxTokens - } - - const response = await this.client.chat.completions.create(requestOptions, { headers: DEFAULT_HEADERS }) - return response.choices[0]?.message.content || "" - } catch (error) { - if (error instanceof Error) { - throw new Error(`Unbound completion error: ${error.message}`) - } - - throw error - } - } -} diff --git a/src/api/transform/__tests__/ai-sdk.spec.ts b/src/api/transform/__tests__/ai-sdk.spec.ts index ea4b9a4235e..f973fc85a6d 100644 --- a/src/api/transform/__tests__/ai-sdk.spec.ts +++ b/src/api/transform/__tests__/ai-sdk.spec.ts @@ -810,9 +810,9 @@ describe("AI SDK conversion utilities", () => { lastError: { message: "Too Many Requests", status: 429 }, } - const result = handleAiSdkError(retryError, "Groq") + const result = handleAiSdkError(retryError, "SambaNova") - expect(result.message).toContain("Groq:") + expect(result.message).toContain("SambaNova:") expect(result.message).toContain("429") expect((result as any).status).toBe(429) }) @@ -833,7 +833,7 @@ describe("AI SDK conversion utilities", () => { it("should preserve original error as cause", () => { const originalError = new Error("Original error") - const result = handleAiSdkError(originalError, "Cerebras") + const result = handleAiSdkError(originalError, "Mistral") expect((result as any).cause).toBe(originalError) }) diff --git a/src/core/context/context-management/__tests__/context-error-handling.test.ts b/src/core/context/context-management/__tests__/context-error-handling.test.ts index d26ac837f08..8ba431b05c5 100644 --- a/src/core/context/context-management/__tests__/context-error-handling.test.ts +++ b/src/core/context/context-management/__tests__/context-error-handling.test.ts @@ -193,37 +193,6 @@ describe("checkContextWindowExceededError", () => { }) }) - describe("Cerebras errors", () => { - it("should detect Cerebras context window error", () => { - const error = { - status: 400, - message: "Please reduce the length of the messages or completion", - } - - expect(checkContextWindowExceededError(error)).toBe(true) - }) - - it("should detect Cerebras error with nested structure", () => { - const error = { - error: { - status: 400, - message: "Please reduce the length of the messages or completion", - }, - } - - expect(checkContextWindowExceededError(error)).toBe(true) - }) - - it("should not detect non-context Cerebras errors", () => { - const error = { - status: 400, - message: "Invalid request parameters", - } - - expect(checkContextWindowExceededError(error)).toBe(false) - }) - }) - describe("Edge cases", () => { it("should handle null input", () => { expect(checkContextWindowExceededError(null)).toBe(false) @@ -317,13 +286,6 @@ describe("checkContextWindowExceededError", () => { }, } expect(checkContextWindowExceededError(error2)).toBe(true) - - // This error should be detected by Cerebras check - const error3 = { - status: 400, - message: "Please reduce the length of the messages or completion", - } - expect(checkContextWindowExceededError(error3)).toBe(true) }) }) }) diff --git a/src/core/context/context-management/context-error-handling.ts b/src/core/context/context-management/context-error-handling.ts index 006d7b16072..6cfe993f955 100644 --- a/src/core/context/context-management/context-error-handling.ts +++ b/src/core/context/context-management/context-error-handling.ts @@ -4,8 +4,7 @@ export function checkContextWindowExceededError(error: unknown): boolean { return ( checkIsOpenAIContextWindowError(error) || checkIsOpenRouterContextWindowError(error) || - checkIsAnthropicContextWindowError(error) || - checkIsCerebrasContextWindowError(error) + checkIsAnthropicContextWindowError(error) ) } @@ -94,21 +93,3 @@ function checkIsAnthropicContextWindowError(response: unknown): boolean { return false } } - -function checkIsCerebrasContextWindowError(response: unknown): boolean { - try { - // Type guard to safely access properties - if (!response || typeof response !== "object") { - return false - } - - // Use type assertions with proper checks - const res = response as Record - const status = res.status ?? res.code ?? res.error?.status ?? res.response?.status - const message: string = String(res.message || res.error?.message || "") - - return String(status) === "400" && message.includes("Please reduce the length of the messages or completion") - } catch { - return false - } -} diff --git a/src/core/webview/__tests__/ClineProvider.spec.ts b/src/core/webview/__tests__/ClineProvider.spec.ts index b65b137597c..fff16153328 100644 --- a/src/core/webview/__tests__/ClineProvider.spec.ts +++ b/src/core/webview/__tests__/ClineProvider.spec.ts @@ -2552,7 +2552,6 @@ describe("ClineProvider - Router Models", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - unboundApiKey: "unbound-key", litellmApiKey: "litellm-key", litellmBaseUrl: "http://localhost:4000", }, @@ -2581,9 +2580,7 @@ describe("ClineProvider - Router Models", () => { // Verify getModels was called for each provider with correct options expect(getModels).toHaveBeenCalledWith({ provider: "openrouter" }) expect(getModels).toHaveBeenCalledWith({ provider: "requesty", apiKey: "requesty-key" }) - expect(getModels).toHaveBeenCalledWith({ provider: "unbound", apiKey: "unbound-key" }) expect(getModels).toHaveBeenCalledWith({ provider: "vercel-ai-gateway" }) - expect(getModels).toHaveBeenCalledWith({ provider: "deepinfra" }) expect(getModels).toHaveBeenCalledWith( expect.objectContaining({ provider: "roo", @@ -2595,24 +2592,18 @@ describe("ClineProvider - Router Models", () => { apiKey: "litellm-key", baseUrl: "http://localhost:4000", }) - expect(getModels).toHaveBeenCalledWith({ provider: "chutes" }) // Verify response was sent expect(mockPostMessage).toHaveBeenCalledWith({ type: "routerModels", routerModels: { - deepinfra: mockModels, openrouter: mockModels, requesty: mockModels, - unbound: mockModels, roo: mockModels, - chutes: mockModels, litellm: mockModels, ollama: {}, lmstudio: {}, "vercel-ai-gateway": mockModels, - huggingface: {}, - "io-intelligence": {}, }, values: undefined, }) @@ -2626,7 +2617,6 @@ describe("ClineProvider - Router Models", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - unboundApiKey: "unbound-key", litellmApiKey: "litellm-key", litellmBaseUrl: "http://localhost:4000", }, @@ -2641,11 +2631,8 @@ describe("ClineProvider - Router Models", () => { vi.mocked(getModels) .mockResolvedValueOnce(mockModels) // openrouter success .mockRejectedValueOnce(new Error("Requesty API error")) // requesty fail - .mockRejectedValueOnce(new Error("Unbound API error")) // unbound fail .mockResolvedValueOnce(mockModels) // vercel-ai-gateway success - .mockResolvedValueOnce(mockModels) // deepinfra success .mockResolvedValueOnce(mockModels) // roo success - .mockRejectedValueOnce(new Error("Chutes API error")) // chutes fail .mockRejectedValueOnce(new Error("LiteLLM connection failed")) // litellm fail await messageHandler({ type: "requestRouterModels" }) @@ -2654,18 +2641,13 @@ describe("ClineProvider - Router Models", () => { expect(mockPostMessage).toHaveBeenCalledWith({ type: "routerModels", routerModels: { - deepinfra: mockModels, openrouter: mockModels, requesty: {}, - unbound: {}, roo: mockModels, - chutes: {}, ollama: {}, lmstudio: {}, litellm: {}, "vercel-ai-gateway": mockModels, - huggingface: {}, - "io-intelligence": {}, }, values: undefined, }) @@ -2678,27 +2660,6 @@ describe("ClineProvider - Router Models", () => { values: { provider: "requesty" }, }) - expect(mockPostMessage).toHaveBeenCalledWith({ - type: "singleRouterModelFetchResponse", - success: false, - error: "Unbound API error", - values: { provider: "unbound" }, - }) - - expect(mockPostMessage).toHaveBeenCalledWith({ - type: "singleRouterModelFetchResponse", - success: false, - error: "Unbound API error", - values: { provider: "unbound" }, - }) - - expect(mockPostMessage).toHaveBeenCalledWith({ - type: "singleRouterModelFetchResponse", - success: false, - error: "Chutes API error", - values: { provider: "chutes" }, - }) - expect(mockPostMessage).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, @@ -2716,7 +2677,6 @@ describe("ClineProvider - Router Models", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - unboundApiKey: "unbound-key", // No litellm config }, } as any) @@ -2751,7 +2711,6 @@ describe("ClineProvider - Router Models", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - unboundApiKey: "unbound-key", // No litellm config }, } as any) @@ -2775,18 +2734,13 @@ describe("ClineProvider - Router Models", () => { expect(mockPostMessage).toHaveBeenCalledWith({ type: "routerModels", routerModels: { - deepinfra: mockModels, openrouter: mockModels, requesty: mockModels, - unbound: mockModels, roo: mockModels, - chutes: mockModels, litellm: {}, ollama: {}, lmstudio: {}, "vercel-ai-gateway": mockModels, - huggingface: {}, - "io-intelligence": {}, }, values: undefined, }) diff --git a/src/core/webview/__tests__/webviewMessageHandler.routerModels.spec.ts b/src/core/webview/__tests__/webviewMessageHandler.routerModels.spec.ts index df2616a8425..111b6c745d1 100644 --- a/src/core/webview/__tests__/webviewMessageHandler.routerModels.spec.ts +++ b/src/core/webview/__tests__/webviewMessageHandler.routerModels.spec.ts @@ -74,14 +74,8 @@ describe("webviewMessageHandler - requestRouterModels provider filter", () => { return { "openrouter/qwen2.5": { contextWindow: 32768, supportsPromptCache: false } } case "requesty": return { "requesty/model": { contextWindow: 8192, supportsPromptCache: false } } - case "deepinfra": - return { "deepinfra/model": { contextWindow: 8192, supportsPromptCache: false } } - case "unbound": - return { "unbound/model": { contextWindow: 8192, supportsPromptCache: false } } case "vercel-ai-gateway": return { "vercel/model": { contextWindow: 8192, supportsPromptCache: false } } - case "io-intelligence": - return { "io/model": { contextWindow: 8192, supportsPromptCache: false } } case "litellm": return { "litellm/model": { contextWindow: 8192, supportsPromptCache: false } } default: diff --git a/src/core/webview/__tests__/webviewMessageHandler.spec.ts b/src/core/webview/__tests__/webviewMessageHandler.spec.ts index faa8e926825..420d309fb76 100644 --- a/src/core/webview/__tests__/webviewMessageHandler.spec.ts +++ b/src/core/webview/__tests__/webviewMessageHandler.spec.ts @@ -265,7 +265,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - unboundApiKey: "unbound-key", litellmApiKey: "litellm-key", litellmBaseUrl: "http://localhost:4000", }, @@ -297,9 +296,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { // Verify getModels was called for each provider expect(mockGetModels).toHaveBeenCalledWith({ provider: "openrouter" }) expect(mockGetModels).toHaveBeenCalledWith({ provider: "requesty", apiKey: "requesty-key" }) - expect(mockGetModels).toHaveBeenCalledWith({ provider: "unbound", apiKey: "unbound-key" }) expect(mockGetModels).toHaveBeenCalledWith({ provider: "vercel-ai-gateway" }) - expect(mockGetModels).toHaveBeenCalledWith({ provider: "deepinfra" }) expect(mockGetModels).toHaveBeenCalledWith( expect.objectContaining({ provider: "roo", @@ -311,25 +308,18 @@ describe("webviewMessageHandler - requestRouterModels", () => { apiKey: "litellm-key", baseUrl: "http://localhost:4000", }) - // Note: huggingface is not fetched in requestRouterModels - it has its own handler - // Note: io-intelligence is not fetched because no API key is provided in the mock state // Verify response was sent expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "routerModels", routerModels: { - deepinfra: mockModels, openrouter: mockModels, requesty: mockModels, - unbound: mockModels, litellm: mockModels, roo: mockModels, - chutes: mockModels, ollama: {}, lmstudio: {}, "vercel-ai-gateway": mockModels, - huggingface: {}, - "io-intelligence": {}, }, values: undefined, }) @@ -340,7 +330,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - unboundApiKey: "unbound-key", // Missing litellm config }, }) @@ -377,7 +366,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", - unboundApiKey: "unbound-key", // Missing litellm config }, }) @@ -409,18 +397,13 @@ describe("webviewMessageHandler - requestRouterModels", () => { expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "routerModels", routerModels: { - deepinfra: mockModels, openrouter: mockModels, requesty: mockModels, - unbound: mockModels, roo: mockModels, - chutes: mockModels, litellm: {}, ollama: {}, lmstudio: {}, "vercel-ai-gateway": mockModels, - huggingface: {}, - "io-intelligence": {}, }, values: undefined, }) @@ -440,11 +423,8 @@ describe("webviewMessageHandler - requestRouterModels", () => { mockGetModels .mockResolvedValueOnce(mockModels) // openrouter .mockRejectedValueOnce(new Error("Requesty API error")) // requesty - .mockRejectedValueOnce(new Error("Unbound API error")) // unbound .mockResolvedValueOnce(mockModels) // vercel-ai-gateway - .mockResolvedValueOnce(mockModels) // deepinfra .mockResolvedValueOnce(mockModels) // roo - .mockRejectedValueOnce(new Error("Chutes API error")) // chutes .mockRejectedValueOnce(new Error("LiteLLM connection failed")) // litellm await webviewMessageHandler(mockClineProvider, { @@ -459,20 +439,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { values: { provider: "requesty" }, }) - expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ - type: "singleRouterModelFetchResponse", - success: false, - error: "Unbound API error", - values: { provider: "unbound" }, - }) - - expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ - type: "singleRouterModelFetchResponse", - success: false, - error: "Chutes API error", - values: { provider: "chutes" }, - }) - expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, @@ -484,18 +450,13 @@ describe("webviewMessageHandler - requestRouterModels", () => { expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "routerModels", routerModels: { - deepinfra: mockModels, openrouter: mockModels, requesty: {}, - unbound: {}, roo: mockModels, - chutes: {}, litellm: {}, ollama: {}, lmstudio: {}, "vercel-ai-gateway": mockModels, - huggingface: {}, - "io-intelligence": {}, }, values: undefined, }) @@ -506,11 +467,8 @@ describe("webviewMessageHandler - requestRouterModels", () => { mockGetModels .mockRejectedValueOnce(new Error("Structured error message")) // openrouter .mockRejectedValueOnce(new Error("Requesty API error")) // requesty - .mockRejectedValueOnce(new Error("Unbound API error")) // unbound .mockRejectedValueOnce(new Error("Vercel AI Gateway error")) // vercel-ai-gateway - .mockRejectedValueOnce(new Error("DeepInfra API error")) // deepinfra .mockRejectedValueOnce(new Error("Roo API error")) // roo - .mockRejectedValueOnce(new Error("Chutes API error")) // chutes .mockRejectedValueOnce(new Error("LiteLLM connection failed")) // litellm await webviewMessageHandler(mockClineProvider, { @@ -532,20 +490,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { values: { provider: "requesty" }, }) - expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ - type: "singleRouterModelFetchResponse", - success: false, - error: "Unbound API error", - values: { provider: "unbound" }, - }) - - expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ - type: "singleRouterModelFetchResponse", - success: false, - error: "DeepInfra API error", - values: { provider: "deepinfra" }, - }) - expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, @@ -560,13 +504,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { values: { provider: "roo" }, }) - expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ - type: "singleRouterModelFetchResponse", - success: false, - error: "Chutes API error", - values: { provider: "chutes" }, - }) - expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index 75f1ce0ff4a..a69b7f51f09 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -874,16 +874,11 @@ export const webviewMessageHandler = async ( : { openrouter: {}, "vercel-ai-gateway": {}, - huggingface: {}, litellm: {}, - deepinfra: {}, - "io-intelligence": {}, requesty: {}, - unbound: {}, ollama: {}, lmstudio: {}, roo: {}, - chutes: {}, } const safeGetModels = async (options: GetModelsOptions): Promise => { @@ -910,16 +905,7 @@ export const webviewMessageHandler = async ( baseUrl: apiConfiguration.requestyBaseUrl, }, }, - { key: "unbound", options: { provider: "unbound", apiKey: apiConfiguration.unboundApiKey } }, { key: "vercel-ai-gateway", options: { provider: "vercel-ai-gateway" } }, - { - key: "deepinfra", - options: { - provider: "deepinfra", - apiKey: apiConfiguration.deepInfraApiKey, - baseUrl: apiConfiguration.deepInfraBaseUrl, - }, - }, { key: "roo", options: { @@ -930,20 +916,8 @@ export const webviewMessageHandler = async ( : undefined, }, }, - { - key: "chutes", - options: { provider: "chutes", apiKey: apiConfiguration.chutesApiKey }, - }, ] - // IO Intelligence is conditional on api key - if (apiConfiguration.ioIntelligenceApiKey) { - candidates.push({ - key: "io-intelligence", - options: { provider: "io-intelligence", apiKey: apiConfiguration.ioIntelligenceApiKey }, - }) - } - // LiteLLM is conditional on baseUrl+apiKey const litellmApiKey = apiConfiguration.litellmApiKey || message?.values?.litellmApiKey const litellmBaseUrl = apiConfiguration.litellmBaseUrl || message?.values?.litellmBaseUrl @@ -1131,21 +1105,6 @@ export const webviewMessageHandler = async ( // TODO: Cache like we do for OpenRouter, etc? provider.postMessageToWebview({ type: "vsCodeLmModels", vsCodeLmModels }) break - case "requestHuggingFaceModels": - // TODO: Why isn't this handled by `requestRouterModels` above? - try { - const { getHuggingFaceModelsWithMetadata } = await import("../../api/providers/fetchers/huggingface") - const huggingFaceModelsResponse = await getHuggingFaceModelsWithMetadata() - - provider.postMessageToWebview({ - type: "huggingFaceModels", - huggingFaceModels: huggingFaceModelsResponse.models, - }) - } catch (error) { - console.error("Failed to fetch Hugging Face models:", error) - provider.postMessageToWebview({ type: "huggingFaceModels", huggingFaceModels: [] }) - } - break case "openImage": openImage(message.text!, { values: message.values }) break diff --git a/src/i18n/locales/ca/common.json b/src/i18n/locales/ca/common.json index 9f8f961e73e..33188fce193 100644 --- a/src/i18n/locales/ca/common.json +++ b/src/i18n/locales/ca/common.json @@ -114,15 +114,6 @@ "thinking_complete_safety": "(Pensament completat, però la sortida s'ha bloquejat a causa de la configuració de seguretat.)", "thinking_complete_recitation": "(Pensament completat, però la sortida s'ha bloquejat a causa de la comprovació de recitació.)" }, - "cerebras": { - "authenticationFailed": "Ha fallat l'autenticació de l'API de Cerebras. Comproveu que la vostra clau d'API sigui vàlida i no hagi caducat.", - "accessForbidden": "Accés denegat a l'API de Cerebras. La vostra clau d'API pot no tenir accés al model o funcionalitat sol·licitats.", - "rateLimitExceeded": "S'ha superat el límit de velocitat de l'API de Cerebras. Espereu abans de fer una altra sol·licitud.", - "serverError": "Error del servidor de l'API de Cerebras ({{status}}). Torneu-ho a provar més tard.", - "genericError": "Error de l'API de Cerebras ({{status}}): {{message}}", - "noResponseBody": "Error de l'API de Cerebras: No hi ha cos de resposta", - "completionError": "Error de finalització de Cerebras: {{error}}" - }, "roo": { "authenticationRequired": "El proveïdor Roo requereix autenticació al núvol. Si us plau, inicieu sessió a Roo Code Cloud." }, @@ -205,10 +196,7 @@ "enter_valid_path": "Introdueix una ruta vàlida" }, "settings": { - "providers": { - "groqApiKey": "Clau API de Groq", - "getGroqApiKey": "Obté la clau API de Groq" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/de/common.json b/src/i18n/locales/de/common.json index 086372dda85..861d9da5768 100644 --- a/src/i18n/locales/de/common.json +++ b/src/i18n/locales/de/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(Denken abgeschlossen, aber die Ausgabe wurde aufgrund von Sicherheitseinstellungen blockiert.)", "thinking_complete_recitation": "(Denken abgeschlossen, aber die Ausgabe wurde aufgrund der Rezitationsprüfung blockiert.)" }, - "cerebras": { - "authenticationFailed": "Cerebras API-Authentifizierung fehlgeschlagen. Bitte überprüfe, ob dein API-Schlüssel gültig und nicht abgelaufen ist.", - "accessForbidden": "Cerebras API-Zugriff verweigert. Dein API-Schlüssel hat möglicherweise keinen Zugriff auf das angeforderte Modell oder die Funktion.", - "rateLimitExceeded": "Cerebras API-Ratenlimit überschritten. Bitte warte, bevor du eine weitere Anfrage stellst.", - "serverError": "Cerebras API-Serverfehler ({{status}}). Bitte versuche es später erneut.", - "genericError": "Cerebras API-Fehler ({{status}}): {{message}}", - "noResponseBody": "Cerebras API-Fehler: Kein Antworttext vorhanden", - "completionError": "Cerebras-Vervollständigungsfehler: {{error}}" - }, "roo": { "authenticationRequired": "Roo-Anbieter erfordert Cloud-Authentifizierung. Bitte melde dich bei Roo Code Cloud an." }, @@ -205,10 +196,7 @@ "task_placeholder": "Gib deine Aufgabe hier ein" }, "settings": { - "providers": { - "groqApiKey": "Groq API-Schlüssel", - "getGroqApiKey": "Groq API-Schlüssel erhalten" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/en/common.json b/src/i18n/locales/en/common.json index 636d26f76cb..d65fe183679 100644 --- a/src/i18n/locales/en/common.json +++ b/src/i18n/locales/en/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(Thinking complete, but output was blocked due to safety settings.)", "thinking_complete_recitation": "(Thinking complete, but output was blocked due to recitation check.)" }, - "cerebras": { - "authenticationFailed": "Cerebras API authentication failed. Please check your API key is valid and not expired.", - "accessForbidden": "Cerebras API access forbidden. Your API key may not have access to the requested model or feature.", - "rateLimitExceeded": "Cerebras API rate limit exceeded. Please wait before making another request.", - "serverError": "Cerebras API server error ({{status}}). Please try again later.", - "genericError": "Cerebras API Error ({{status}}): {{message}}", - "noResponseBody": "Cerebras API Error: No response body", - "completionError": "Cerebras completion error: {{error}}" - }, "roo": { "authenticationRequired": "Roo provider requires cloud authentication. Please sign in to Roo Code Cloud." }, diff --git a/src/i18n/locales/es/common.json b/src/i18n/locales/es/common.json index bc22040c6a8..82be83956b0 100644 --- a/src/i18n/locales/es/common.json +++ b/src/i18n/locales/es/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(Pensamiento completado, pero la salida fue bloqueada debido a la configuración de seguridad.)", "thinking_complete_recitation": "(Pensamiento completado, pero la salida fue bloqueada debido a la comprobación de recitación.)" }, - "cerebras": { - "authenticationFailed": "Falló la autenticación de la API de Cerebras. Verifica que tu clave de API sea válida y no haya expirado.", - "accessForbidden": "Acceso prohibido a la API de Cerebras. Tu clave de API puede no tener acceso al modelo o función solicitada.", - "rateLimitExceeded": "Se excedió el límite de velocidad de la API de Cerebras. Espera antes de hacer otra solicitud.", - "serverError": "Error del servidor de la API de Cerebras ({{status}}). Inténtalo de nuevo más tarde.", - "genericError": "Error de la API de Cerebras ({{status}}): {{message}}", - "noResponseBody": "Error de la API de Cerebras: Sin cuerpo de respuesta", - "completionError": "Error de finalización de Cerebras: {{error}}" - }, "roo": { "authenticationRequired": "El proveedor Roo requiere autenticación en la nube. Por favor, inicia sesión en Roo Code Cloud." }, @@ -205,10 +196,7 @@ "task_placeholder": "Escribe tu tarea aquí" }, "settings": { - "providers": { - "groqApiKey": "Clave API de Groq", - "getGroqApiKey": "Obtener clave API de Groq" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/fr/common.json b/src/i18n/locales/fr/common.json index f7a76a53c12..6fc05ff94a3 100644 --- a/src/i18n/locales/fr/common.json +++ b/src/i18n/locales/fr/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(Réflexion terminée, mais la sortie a été bloquée en raison des paramètres de sécurité.)", "thinking_complete_recitation": "(Réflexion terminée, mais la sortie a été bloquée en raison de la vérification de récitation.)" }, - "cerebras": { - "authenticationFailed": "Échec de l'authentification de l'API Cerebras. Vérifiez que votre clé API est valide et n'a pas expiré.", - "accessForbidden": "Accès interdit à l'API Cerebras. Votre clé API peut ne pas avoir accès au modèle ou à la fonction demandée.", - "rateLimitExceeded": "Limite de débit de l'API Cerebras dépassée. Veuillez attendre avant de faire une autre demande.", - "serverError": "Erreur du serveur de l'API Cerebras ({{status}}). Veuillez réessayer plus tard.", - "genericError": "Erreur de l'API Cerebras ({{status}}) : {{message}}", - "noResponseBody": "Erreur de l'API Cerebras : Aucun corps de réponse", - "completionError": "Erreur d'achèvement de Cerebras : {{error}}" - }, "roo": { "authenticationRequired": "Le fournisseur Roo nécessite une authentification cloud. Veuillez vous connecter à Roo Code Cloud." }, @@ -205,10 +196,7 @@ "task_placeholder": "Écris ta tâche ici" }, "settings": { - "providers": { - "groqApiKey": "Clé API Groq", - "getGroqApiKey": "Obtenir la clé API Groq" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/hi/common.json b/src/i18n/locales/hi/common.json index e51d177d946..528ed6d45f5 100644 --- a/src/i18n/locales/hi/common.json +++ b/src/i18n/locales/hi/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(सोचना पूरा हुआ, लेकिन सुरक्षा सेटिंग्स के कारण आउटपुट अवरुद्ध कर दिया गया।)", "thinking_complete_recitation": "(सोचना पूरा हुआ, लेकिन पाठ जाँच के कारण आउटपुट अवरुद्ध कर दिया गया।)" }, - "cerebras": { - "authenticationFailed": "Cerebras API प्रमाणीकरण विफल हुआ। कृपया जांचें कि आपकी API कुंजी वैध है और समाप्त नहीं हुई है।", - "accessForbidden": "Cerebras API पहुंच निषेध। आपकी API कुंजी का अनुरोधित मॉडल या सुविधा तक पहुंच नहीं हो सकती है।", - "rateLimitExceeded": "Cerebras API दर सीमा पार हो गई। कृपया दूसरा अनुरोध करने से पहले प्रतीक्षा करें।", - "serverError": "Cerebras API सर्वर त्रुटि ({{status}})। कृपया बाद में पुनः प्रयास करें।", - "genericError": "Cerebras API त्रुटि ({{status}}): {{message}}", - "noResponseBody": "Cerebras API त्रुटि: कोई प्रतिक्रिया मुख्य भाग नहीं", - "completionError": "Cerebras पूर्णता त्रुटि: {{error}}" - }, "roo": { "authenticationRequired": "Roo प्रदाता को क्लाउड प्रमाणीकरण की आवश्यकता है। कृपया Roo Code Cloud में साइन इन करें।" }, @@ -205,10 +196,7 @@ "task_placeholder": "अपना कार्य यहाँ लिखें" }, "settings": { - "providers": { - "groqApiKey": "ग्रोक एपीआई कुंजी", - "getGroqApiKey": "ग्रोक एपीआई कुंजी प्राप्त करें" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/id/common.json b/src/i18n/locales/id/common.json index cfb165979d3..cb1c3231fb8 100644 --- a/src/i18n/locales/id/common.json +++ b/src/i18n/locales/id/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(Berpikir selesai, tetapi output diblokir karena pengaturan keamanan.)", "thinking_complete_recitation": "(Berpikir selesai, tetapi output diblokir karena pemeriksaan resitasi.)" }, - "cerebras": { - "authenticationFailed": "Autentikasi API Cerebras gagal. Silakan periksa apakah kunci API Anda valid dan belum kedaluwarsa.", - "accessForbidden": "Akses API Cerebras ditolak. Kunci API Anda mungkin tidak memiliki akses ke model atau fitur yang diminta.", - "rateLimitExceeded": "Batas kecepatan API Cerebras terlampaui. Silakan tunggu sebelum membuat permintaan lain.", - "serverError": "Kesalahan server API Cerebras ({{status}}). Silakan coba lagi nanti.", - "genericError": "Kesalahan API Cerebras ({{status}}): {{message}}", - "noResponseBody": "Kesalahan API Cerebras: Tidak ada isi respons", - "completionError": "Kesalahan penyelesaian Cerebras: {{error}}" - }, "roo": { "authenticationRequired": "Penyedia Roo memerlukan autentikasi cloud. Silakan masuk ke Roo Code Cloud." }, @@ -205,10 +196,7 @@ "task_placeholder": "Ketik tugas kamu di sini" }, "settings": { - "providers": { - "groqApiKey": "Kunci API Groq", - "getGroqApiKey": "Dapatkan Kunci API Groq" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/it/common.json b/src/i18n/locales/it/common.json index e5fa6d68db3..b4e522cb732 100644 --- a/src/i18n/locales/it/common.json +++ b/src/i18n/locales/it/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(Pensiero completato, ma l'output è stato bloccato a causa delle impostazioni di sicurezza.)", "thinking_complete_recitation": "(Pensiero completato, ma l'output è stato bloccato a causa del controllo di recitazione.)" }, - "cerebras": { - "authenticationFailed": "Autenticazione API Cerebras fallita. Verifica che la tua chiave API sia valida e non scaduta.", - "accessForbidden": "Accesso API Cerebras negato. La tua chiave API potrebbe non avere accesso al modello o alla funzione richiesta.", - "rateLimitExceeded": "Limite di velocità API Cerebras superato. Attendi prima di fare un'altra richiesta.", - "serverError": "Errore del server API Cerebras ({{status}}). Riprova più tardi.", - "genericError": "Errore API Cerebras ({{status}}): {{message}}", - "noResponseBody": "Errore API Cerebras: Nessun corpo di risposta", - "completionError": "Errore di completamento Cerebras: {{error}}" - }, "roo": { "authenticationRequired": "Il provider Roo richiede l'autenticazione cloud. Accedi a Roo Code Cloud." }, @@ -205,10 +196,7 @@ "task_placeholder": "Scrivi il tuo compito qui" }, "settings": { - "providers": { - "groqApiKey": "Chiave API Groq", - "getGroqApiKey": "Ottieni chiave API Groq" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/ja/common.json b/src/i18n/locales/ja/common.json index 7ebe0de597d..7b63b6f7298 100644 --- a/src/i18n/locales/ja/common.json +++ b/src/i18n/locales/ja/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(思考完了、安全設定により出力ブロック)", "thinking_complete_recitation": "(思考完了、引用チェックにより出力ブロック)" }, - "cerebras": { - "authenticationFailed": "Cerebras API認証が失敗しました。APIキーが有効で期限切れではないことを確認してください。", - "accessForbidden": "Cerebras APIアクセスが禁止されています。あなたのAPIキーは要求されたモデルや機能にアクセスできない可能性があります。", - "rateLimitExceeded": "Cerebras APIレート制限を超過しました。別のリクエストを行う前にお待ちください。", - "serverError": "Cerebras APIサーバーエラー ({{status}})。しばらくしてからもう一度お試しください。", - "genericError": "Cerebras APIエラー ({{status}}): {{message}}", - "noResponseBody": "Cerebras APIエラー: レスポンスボディなし", - "completionError": "Cerebras完了エラー: {{error}}" - }, "roo": { "authenticationRequired": "Rooプロバイダーはクラウド認証が必要です。Roo Code Cloudにサインインしてください。" }, @@ -205,10 +196,7 @@ "task_placeholder": "タスクをここに入力してください" }, "settings": { - "providers": { - "groqApiKey": "Groq APIキー", - "getGroqApiKey": "Groq APIキーを取得" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/ko/common.json b/src/i18n/locales/ko/common.json index 0c1ed5ba518..fbde3225bb1 100644 --- a/src/i18n/locales/ko/common.json +++ b/src/i18n/locales/ko/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(생각 완료, 안전 설정으로 출력 차단됨)", "thinking_complete_recitation": "(생각 완료, 암송 확인으로 출력 차단됨)" }, - "cerebras": { - "authenticationFailed": "Cerebras API 인증에 실패했습니다. API 키가 유효하고 만료되지 않았는지 확인하세요.", - "accessForbidden": "Cerebras API 액세스가 금지되었습니다. API 키가 요청된 모델이나 기능에 액세스할 수 없을 수 있습니다.", - "rateLimitExceeded": "Cerebras API 속도 제한을 초과했습니다. 다른 요청을 하기 전에 기다리세요.", - "serverError": "Cerebras API 서버 오류 ({{status}}). 나중에 다시 시도하세요.", - "genericError": "Cerebras API 오류 ({{status}}): {{message}}", - "noResponseBody": "Cerebras API 오류: 응답 본문 없음", - "completionError": "Cerebras 완료 오류: {{error}}" - }, "roo": { "authenticationRequired": "Roo 제공업체는 클라우드 인증이 필요합니다. Roo Code Cloud에 로그인하세요." }, @@ -205,10 +196,7 @@ "task_placeholder": "여기에 작업을 입력하세요" }, "settings": { - "providers": { - "groqApiKey": "Groq API 키", - "getGroqApiKey": "Groq API 키 받기" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/nl/common.json b/src/i18n/locales/nl/common.json index 0bbf5695364..eba274c96ec 100644 --- a/src/i18n/locales/nl/common.json +++ b/src/i18n/locales/nl/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(Nadenken voltooid, maar uitvoer is geblokkeerd vanwege veiligheidsinstellingen.)", "thinking_complete_recitation": "(Nadenken voltooid, maar uitvoer is geblokkeerd vanwege recitatiecontrole.)" }, - "cerebras": { - "authenticationFailed": "Cerebras API-authenticatie mislukt. Controleer of je API-sleutel geldig is en niet verlopen.", - "accessForbidden": "Cerebras API-toegang geweigerd. Je API-sleutel heeft mogelijk geen toegang tot het gevraagde model of de functie.", - "rateLimitExceeded": "Cerebras API-snelheidslimiet overschreden. Wacht voordat je een ander verzoek doet.", - "serverError": "Cerebras API-serverfout ({{status}}). Probeer het later opnieuw.", - "genericError": "Cerebras API-fout ({{status}}): {{message}}", - "noResponseBody": "Cerebras API-fout: Geen responslichaam", - "completionError": "Cerebras-voltooiingsfout: {{error}}" - }, "roo": { "authenticationRequired": "Roo provider vereist cloud authenticatie. Log in bij Roo Code Cloud." }, @@ -205,10 +196,7 @@ "task_placeholder": "Typ hier je taak" }, "settings": { - "providers": { - "groqApiKey": "Groq API-sleutel", - "getGroqApiKey": "Groq API-sleutel ophalen" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/pl/common.json b/src/i18n/locales/pl/common.json index 23bc09e4d78..20b568281bb 100644 --- a/src/i18n/locales/pl/common.json +++ b/src/i18n/locales/pl/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(Myślenie zakończone, ale dane wyjściowe zostały zablokowane przez ustawienia bezpieczeństwa.)", "thinking_complete_recitation": "(Myślenie zakończone, ale dane wyjściowe zostały zablokowane przez kontrolę recytacji.)" }, - "cerebras": { - "authenticationFailed": "Uwierzytelnianie API Cerebras nie powiodło się. Sprawdź, czy twój klucz API jest ważny i nie wygasł.", - "accessForbidden": "Dostęp do API Cerebras zabroniony. Twój klucz API może nie mieć dostępu do żądanego modelu lub funkcji.", - "rateLimitExceeded": "Przekroczono limit szybkości API Cerebras. Poczekaj przed wykonaniem kolejnego żądania.", - "serverError": "Błąd serwera API Cerebras ({{status}}). Spróbuj ponownie później.", - "genericError": "Błąd API Cerebras ({{status}}): {{message}}", - "noResponseBody": "Błąd API Cerebras: Brak treści odpowiedzi", - "completionError": "Błąd uzupełniania Cerebras: {{error}}" - }, "roo": { "authenticationRequired": "Dostawca Roo wymaga uwierzytelnienia w chmurze. Zaloguj się do Roo Code Cloud." }, @@ -205,10 +196,7 @@ "task_placeholder": "Wpisz swoje zadanie tutaj" }, "settings": { - "providers": { - "groqApiKey": "Klucz API Groq", - "getGroqApiKey": "Uzyskaj klucz API Groq" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/pt-BR/common.json b/src/i18n/locales/pt-BR/common.json index 737b322f78a..38abc8c8047 100644 --- a/src/i18n/locales/pt-BR/common.json +++ b/src/i18n/locales/pt-BR/common.json @@ -115,15 +115,6 @@ "thinking_complete_safety": "(Pensamento concluído, mas a saída foi bloqueada devido às configurações de segurança.)", "thinking_complete_recitation": "(Pensamento concluído, mas a saída foi bloqueada devido à verificação de recitação.)" }, - "cerebras": { - "authenticationFailed": "Falha na autenticação da API Cerebras. Verifique se sua chave de API é válida e não expirou.", - "accessForbidden": "Acesso à API Cerebras negado. Sua chave de API pode não ter acesso ao modelo ou recurso solicitado.", - "rateLimitExceeded": "Limite de taxa da API Cerebras excedido. Aguarde antes de fazer outra solicitação.", - "serverError": "Erro do servidor da API Cerebras ({{status}}). Tente novamente mais tarde.", - "genericError": "Erro da API Cerebras ({{status}}): {{message}}", - "noResponseBody": "Erro da API Cerebras: Sem corpo de resposta", - "completionError": "Erro de conclusão do Cerebras: {{error}}" - }, "roo": { "authenticationRequired": "O provedor Roo requer autenticação na nuvem. Faça login no Roo Code Cloud." }, @@ -205,10 +196,7 @@ "enter_valid_path": "Por favor, digite um caminho válido" }, "settings": { - "providers": { - "groqApiKey": "Chave de API Groq", - "getGroqApiKey": "Obter chave de API Groq" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/ru/common.json b/src/i18n/locales/ru/common.json index 7ac53199ba8..d124f597318 100644 --- a/src/i18n/locales/ru/common.json +++ b/src/i18n/locales/ru/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(Размышление завершено, но вывод заблокирован настройками безопасности.)", "thinking_complete_recitation": "(Размышление завершено, но вывод заблокирован проверкой цитирования.)" }, - "cerebras": { - "authenticationFailed": "Ошибка аутентификации Cerebras API. Убедитесь, что ваш API-ключ действителен и не истек.", - "accessForbidden": "Доступ к Cerebras API запрещен. Ваш API-ключ может не иметь доступа к запрашиваемой модели или функции.", - "rateLimitExceeded": "Превышен лимит скорости Cerebras API. Подождите перед отправкой следующего запроса.", - "serverError": "Ошибка сервера Cerebras API ({{status}}). Попробуйте позже.", - "genericError": "Ошибка Cerebras API ({{status}}): {{message}}", - "noResponseBody": "Ошибка Cerebras API: Нет тела ответа", - "completionError": "Ошибка завершения Cerebras: {{error}}" - }, "roo": { "authenticationRequired": "Провайдер Roo требует облачной аутентификации. Войдите в Roo Code Cloud." }, @@ -205,10 +196,7 @@ "task_placeholder": "Введите вашу задачу здесь" }, "settings": { - "providers": { - "groqApiKey": "Ключ API Groq", - "getGroqApiKey": "Получить ключ API Groq" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/tr/common.json b/src/i18n/locales/tr/common.json index fca268c0ff6..00dcf6fc33d 100644 --- a/src/i18n/locales/tr/common.json +++ b/src/i18n/locales/tr/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(Düşünme tamamlandı, ancak çıktı güvenlik ayarları nedeniyle engellendi.)", "thinking_complete_recitation": "(Düşünme tamamlandı, ancak çıktı okuma kontrolü nedeniyle engellendi.)" }, - "cerebras": { - "authenticationFailed": "Cerebras API kimlik doğrulama başarısız oldu. API anahtarınızın geçerli olduğunu ve süresi dolmadığını kontrol edin.", - "accessForbidden": "Cerebras API erişimi yasak. API anahtarınız istenen modele veya özelliğe erişimi olmayabilir.", - "rateLimitExceeded": "Cerebras API hız sınırı aşıldı. Başka bir istek yapmadan önce bekleyin.", - "serverError": "Cerebras API sunucu hatası ({{status}}). Lütfen daha sonra tekrar deneyin.", - "genericError": "Cerebras API Hatası ({{status}}): {{message}}", - "noResponseBody": "Cerebras API Hatası: Yanıt gövdesi yok", - "completionError": "Cerebras tamamlama hatası: {{error}}" - }, "roo": { "authenticationRequired": "Roo sağlayıcısı bulut kimlik doğrulaması gerektirir. Lütfen Roo Code Cloud'a giriş yapın." }, @@ -205,10 +196,7 @@ "task_placeholder": "Görevini buraya yaz" }, "settings": { - "providers": { - "groqApiKey": "Groq API Anahtarı", - "getGroqApiKey": "Groq API Anahtarı Al" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/vi/common.json b/src/i18n/locales/vi/common.json index bd9bb72b474..decd4ff53ef 100644 --- a/src/i18n/locales/vi/common.json +++ b/src/i18n/locales/vi/common.json @@ -111,15 +111,6 @@ "thinking_complete_safety": "(Đã suy nghĩ xong nhưng kết quả bị chặn do cài đặt an toàn.)", "thinking_complete_recitation": "(Đã suy nghĩ xong nhưng kết quả bị chặn do kiểm tra trích dẫn.)" }, - "cerebras": { - "authenticationFailed": "Xác thực API Cerebras thất bại. Vui lòng kiểm tra khóa API của bạn có hợp lệ và chưa hết hạn.", - "accessForbidden": "Truy cập API Cerebras bị từ chối. Khóa API của bạn có thể không có quyền truy cập vào mô hình hoặc tính năng được yêu cầu.", - "rateLimitExceeded": "Vượt quá giới hạn tốc độ API Cerebras. Vui lòng chờ trước khi thực hiện yêu cầu khác.", - "serverError": "Lỗi máy chủ API Cerebras ({{status}}). Vui lòng thử lại sau.", - "genericError": "Lỗi API Cerebras ({{status}}): {{message}}", - "noResponseBody": "Lỗi API Cerebras: Không có nội dung phản hồi", - "completionError": "Lỗi hoàn thành Cerebras: {{error}}" - }, "roo": { "authenticationRequired": "Nhà cung cấp Roo yêu cầu xác thực đám mây. Vui lòng đăng nhập vào Roo Code Cloud." }, @@ -205,10 +196,7 @@ "task_placeholder": "Nhập nhiệm vụ của bạn ở đây" }, "settings": { - "providers": { - "groqApiKey": "Khóa API Groq", - "getGroqApiKey": "Lấy khóa API Groq" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/zh-CN/common.json b/src/i18n/locales/zh-CN/common.json index 494c246d658..6df1f78b167 100644 --- a/src/i18n/locales/zh-CN/common.json +++ b/src/i18n/locales/zh-CN/common.json @@ -116,15 +116,6 @@ "thinking_complete_safety": "(思考完成,但由于安全设置输出被阻止。)", "thinking_complete_recitation": "(思考完成,但由于引用检查输出被阻止。)" }, - "cerebras": { - "authenticationFailed": "Cerebras API 身份验证失败。请检查你的 API 密钥是否有效且未过期。", - "accessForbidden": "Cerebras API 访问被禁止。你的 API 密钥可能无法访问请求的模型或功能。", - "rateLimitExceeded": "Cerebras API 速率限制已超出。请稍等后再发起另一个请求。", - "serverError": "Cerebras API 服务器错误 ({{status}})。请稍后重试。", - "genericError": "Cerebras API 错误 ({{status}}):{{message}}", - "noResponseBody": "Cerebras API 错误:无响应主体", - "completionError": "Cerebras 完成错误:{{error}}" - }, "roo": { "authenticationRequired": "Roo 提供商需要云认证。请登录 Roo Code Cloud。" }, @@ -210,10 +201,7 @@ "task_placeholder": "在这里输入任务" }, "settings": { - "providers": { - "groqApiKey": "Groq API 密钥", - "getGroqApiKey": "获取 Groq API 密钥" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/i18n/locales/zh-TW/common.json b/src/i18n/locales/zh-TW/common.json index 572cdb46519..be4a76fc5b9 100644 --- a/src/i18n/locales/zh-TW/common.json +++ b/src/i18n/locales/zh-TW/common.json @@ -110,15 +110,6 @@ "thinking_complete_safety": "(思考完成,但由於安全設定輸出被阻止。)", "thinking_complete_recitation": "(思考完成,但由於引用檢查輸出被阻止。)" }, - "cerebras": { - "authenticationFailed": "Cerebras API 驗證失敗。請檢查您的 API 金鑰是否有效且未過期。", - "accessForbidden": "Cerebras API 存取被拒絕。您的 API 金鑰可能無法存取所請求的模型或功能。", - "rateLimitExceeded": "Cerebras API 速率限制已超出。請稍候再發出另一個請求。", - "serverError": "Cerebras API 伺服器錯誤 ({{status}})。請稍後重試。", - "genericError": "Cerebras API 錯誤 ({{status}}):{{message}}", - "noResponseBody": "Cerebras API 錯誤:無回應主體", - "completionError": "Cerebras 完成錯誤:{{error}}" - }, "roo": { "authenticationRequired": "Roo 提供者需要雲端認證。請登入 Roo Code Cloud。" }, @@ -205,10 +196,7 @@ "task_placeholder": "在這裡輸入工作" }, "settings": { - "providers": { - "groqApiKey": "Groq API 金鑰", - "getGroqApiKey": "取得 Groq API 金鑰" - } + "providers": {} }, "customModes": { "errors": { diff --git a/src/package.json b/src/package.json index 70cc99ba731..6b6ac2c2d50 100644 --- a/src/package.json +++ b/src/package.json @@ -452,12 +452,10 @@ "dependencies": { "@ai-sdk/amazon-bedrock": "^4.0.51", "@ai-sdk/baseten": "^1.0.31", - "@ai-sdk/cerebras": "^2.0.31", "@ai-sdk/deepseek": "^2.0.18", "@ai-sdk/fireworks": "^2.0.32", "@ai-sdk/google": "^3.0.22", "@ai-sdk/google-vertex": "^4.0.45", - "@ai-sdk/groq": "^3.0.22", "@ai-sdk/mistral": "^3.0.19", "@ai-sdk/xai": "^3.0.48", "@anthropic-ai/sdk": "^0.37.0", diff --git a/src/shared/ProfileValidator.ts b/src/shared/ProfileValidator.ts index 3ca5b5616d0..ae58763d6ac 100644 --- a/src/shared/ProfileValidator.ts +++ b/src/shared/ProfileValidator.ts @@ -61,16 +61,11 @@ export class ProfileValidator { case "mistral": case "deepseek": case "xai": - case "groq": case "sambanova": - case "chutes": case "fireworks": - case "featherless": return profile.apiModelId case "litellm": return profile.litellmModelId - case "unbound": - return profile.unboundModelId case "lmstudio": return profile.lmStudioModelId case "vscode-lm": @@ -82,10 +77,6 @@ export class ProfileValidator { return profile.ollamaModelId case "requesty": return profile.requestyModelId - case "io-intelligence": - return profile.ioIntelligenceModelId - case "deepinfra": - return profile.deepInfraModelId case "fake-ai": default: return undefined diff --git a/src/shared/__tests__/ProfileValidator.spec.ts b/src/shared/__tests__/ProfileValidator.spec.ts index 04bd171696e..9bf913cdc27 100644 --- a/src/shared/__tests__/ProfileValidator.spec.ts +++ b/src/shared/__tests__/ProfileValidator.spec.ts @@ -176,11 +176,8 @@ describe("ProfileValidator", () => { "mistral", "deepseek", "xai", - "groq", - "chutes", "sambanova", "fireworks", - "featherless", ] apiModelProviders.forEach((provider) => { @@ -216,22 +213,6 @@ describe("ProfileValidator", () => { expect(ProfileValidator.isProfileAllowed(profile, allowList)).toBe(true) }) - // Test for io-intelligence provider which uses ioIntelligenceModelId - it(`should extract ioIntelligenceModelId for io-intelligence provider`, () => { - const allowList: OrganizationAllowList = { - allowAll: false, - providers: { - "io-intelligence": { allowAll: false, models: ["test-model"] }, - }, - } - const profile: ProviderSettings = { - apiProvider: "io-intelligence" as any, - ioIntelligenceModelId: "test-model", - } - - expect(ProfileValidator.isProfileAllowed(profile, allowList)).toBe(true) - }) - it("should extract vsCodeLmModelSelector.id for vscode-lm provider", () => { const allowList: OrganizationAllowList = { allowAll: false, @@ -247,21 +228,6 @@ describe("ProfileValidator", () => { expect(ProfileValidator.isProfileAllowed(profile, allowList)).toBe(true) }) - it("should extract unboundModelId for unbound provider", () => { - const allowList: OrganizationAllowList = { - allowAll: false, - providers: { - unbound: { allowAll: false, models: ["unbound-model"] }, - }, - } - const profile: ProviderSettings = { - apiProvider: "unbound", - unboundModelId: "unbound-model", - } - - expect(ProfileValidator.isProfileAllowed(profile, allowList)).toBe(true) - }) - it("should extract lmStudioModelId for lmstudio provider", () => { const allowList: OrganizationAllowList = { allowAll: false, diff --git a/src/shared/__tests__/checkExistApiConfig.spec.ts b/src/shared/__tests__/checkExistApiConfig.spec.ts index 55dae005f25..d6dd1db24f3 100644 --- a/src/shared/__tests__/checkExistApiConfig.spec.ts +++ b/src/shared/__tests__/checkExistApiConfig.spec.ts @@ -55,7 +55,6 @@ describe("checkExistKey", () => { mistralApiKey: undefined, vsCodeLmModelSelector: undefined, requestyApiKey: undefined, - unboundApiKey: undefined, } expect(checkExistKey(config)).toBe(false) }) diff --git a/src/shared/api.ts b/src/shared/api.ts index b2ba1e35420..7e999e12890 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -171,16 +171,11 @@ type CommonFetchParams = { const dynamicProviderExtras = { openrouter: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type "vercel-ai-gateway": {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type - huggingface: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type litellm: {} as { apiKey: string; baseUrl: string }, - deepinfra: {} as { apiKey?: string; baseUrl?: string }, - "io-intelligence": {} as { apiKey: string }, requesty: {} as { apiKey?: string; baseUrl?: string }, - unbound: {} as { apiKey?: string }, ollama: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type lmstudio: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type roo: {} as { apiKey?: string; baseUrl?: string }, - chutes: {} as { apiKey?: string }, } as const satisfies Record // Build the dynamic options union from the map, intersected with CommonFetchParams diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 5f8e0998350..87544529a0e 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -10,21 +10,16 @@ import { DEFAULT_CONSECUTIVE_MISTAKE_LIMIT, openRouterDefaultModelId, requestyDefaultModelId, - unboundDefaultModelId, litellmDefaultModelId, openAiNativeDefaultModelId, openAiCodexDefaultModelId, anthropicDefaultModelId, - doubaoDefaultModelId, qwenCodeDefaultModelId, geminiDefaultModelId, deepSeekDefaultModelId, moonshotDefaultModelId, mistralDefaultModelId, xaiDefaultModelId, - groqDefaultModelId, - cerebrasDefaultModelId, - chutesDefaultModelId, basetenDefaultModelId, bedrockDefaultModelId, vertexDefaultModelId, @@ -32,11 +27,8 @@ import { internationalZAiDefaultModelId, mainlandZAiDefaultModelId, fireworksDefaultModelId, - featherlessDefaultModelId, - ioIntelligenceDefaultModelId, rooDefaultModelId, vercelAiGatewayDefaultModelId, - deepInfraDefaultModelId, minimaxDefaultModelId, } from "@roo-code/types" @@ -75,14 +67,8 @@ import { Anthropic, Baseten, Bedrock, - Cerebras, - Chutes, DeepSeek, - Doubao, Gemini, - Groq, - HuggingFace, - IOIntelligence, LMStudio, LiteLLM, Mistral, @@ -96,15 +82,12 @@ import { Requesty, Roo, SambaNova, - Unbound, Vertex, VSCodeLM, XAI, ZAi, Fireworks, - Featherless, VercelAiGateway, - DeepInfra, MiniMax, } from "./providers" @@ -243,11 +226,7 @@ const ApiOptions = ({ vscode.postMessage({ type: "requestLmStudioModels" }) } else if (selectedProvider === "vscode-lm") { vscode.postMessage({ type: "requestVsCodeLmModels" }) - } else if ( - selectedProvider === "litellm" || - selectedProvider === "deepinfra" || - selectedProvider === "roo" - ) { + } else if (selectedProvider === "litellm" || selectedProvider === "roo") { vscode.postMessage({ type: "requestRouterModels" }) } }, @@ -261,8 +240,6 @@ const ApiOptions = ({ apiConfiguration?.lmStudioBaseUrl, apiConfiguration?.litellmBaseUrl, apiConfiguration?.litellmApiKey, - apiConfiguration?.deepInfraApiKey, - apiConfiguration?.deepInfraBaseUrl, customHeaders, ], ) @@ -282,7 +259,7 @@ const ApiOptions = ({ // It would be much easier to have a single attribute that stores // the modelId, but we have a separate attribute for each of - // OpenRouter, Unbound, and Requesty. + // OpenRouter and Requesty. // If you switch to one of these providers and the corresponding // modelId is not set then you immediately end up in an error state. // To address that we set the modelId to the default value for th @@ -336,25 +313,19 @@ const ApiOptions = ({ } > > = { - deepinfra: { field: "deepInfraModelId", default: deepInfraDefaultModelId }, openrouter: { field: "openRouterModelId", default: openRouterDefaultModelId }, - unbound: { field: "unboundModelId", default: unboundDefaultModelId }, requesty: { field: "requestyModelId", default: requestyDefaultModelId }, litellm: { field: "litellmModelId", default: litellmDefaultModelId }, anthropic: { field: "apiModelId", default: anthropicDefaultModelId }, - cerebras: { field: "apiModelId", default: cerebrasDefaultModelId }, "openai-codex": { field: "apiModelId", default: openAiCodexDefaultModelId }, "qwen-code": { field: "apiModelId", default: qwenCodeDefaultModelId }, "openai-native": { field: "apiModelId", default: openAiNativeDefaultModelId }, gemini: { field: "apiModelId", default: geminiDefaultModelId }, deepseek: { field: "apiModelId", default: deepSeekDefaultModelId }, - doubao: { field: "apiModelId", default: doubaoDefaultModelId }, moonshot: { field: "apiModelId", default: moonshotDefaultModelId }, minimax: { field: "apiModelId", default: minimaxDefaultModelId }, mistral: { field: "apiModelId", default: mistralDefaultModelId }, xai: { field: "apiModelId", default: xaiDefaultModelId }, - groq: { field: "apiModelId", default: groqDefaultModelId }, - chutes: { field: "apiModelId", default: chutesDefaultModelId }, baseten: { field: "apiModelId", default: basetenDefaultModelId }, bedrock: { field: "apiModelId", default: bedrockDefaultModelId }, vertex: { field: "apiModelId", default: vertexDefaultModelId }, @@ -367,8 +338,6 @@ const ApiOptions = ({ : internationalZAiDefaultModelId, }, fireworks: { field: "apiModelId", default: fireworksDefaultModelId }, - featherless: { field: "apiModelId", default: featherlessDefaultModelId }, - "io-intelligence": { field: "ioIntelligenceModelId", default: ioIntelligenceDefaultModelId }, roo: { field: "apiModelId", default: rooDefaultModelId }, "vercel-ai-gateway": { field: "vercelAiGatewayModelId", default: vercelAiGatewayDefaultModelId }, openai: { field: "openAiModelId" }, @@ -526,29 +495,6 @@ const ApiOptions = ({ /> )} - {selectedProvider === "unbound" && ( - - )} - - {selectedProvider === "deepinfra" && ( - - )} - {selectedProvider === "anthropic" && ( )} - {selectedProvider === "doubao" && ( - - )} - {selectedProvider === "qwen-code" && ( )} - {selectedProvider === "groq" && ( - - )} - - {selectedProvider === "huggingface" && ( - - )} - - {selectedProvider === "cerebras" && ( - - )} - - {selectedProvider === "chutes" && ( - - )} - {selectedProvider === "litellm" && ( )} - {selectedProvider === "io-intelligence" && ( - - )} - {selectedProvider === "vercel-ai-gateway" && ( )} - {selectedProvider === "featherless" && ( - - )} - {/* Generic model picker for providers with static models */} {shouldUseGenericModelPicker(selectedProvider) && ( <> diff --git a/webview-ui/src/components/settings/ModelPicker.tsx b/webview-ui/src/components/settings/ModelPicker.tsx index c1bfa34beb8..82b3c41f671 100644 --- a/webview-ui/src/components/settings/ModelPicker.tsx +++ b/webview-ui/src/components/settings/ModelPicker.tsx @@ -29,12 +29,9 @@ import { ApiErrorMessage } from "./ApiErrorMessage" type ModelIdKey = keyof Pick< ProviderSettings, | "openRouterModelId" - | "unboundModelId" | "requestyModelId" | "openAiModelId" | "litellmModelId" - | "deepInfraModelId" - | "ioIntelligenceModelId" | "vercelAiGatewayModelId" | "apiModelId" | "ollamaModelId" diff --git a/webview-ui/src/components/settings/__tests__/ApiOptions.provider-filtering.spec.tsx b/webview-ui/src/components/settings/__tests__/ApiOptions.provider-filtering.spec.tsx index 544bd84a2a5..f650424fd0f 100644 --- a/webview-ui/src/components/settings/__tests__/ApiOptions.provider-filtering.spec.tsx +++ b/webview-ui/src/components/settings/__tests__/ApiOptions.provider-filtering.spec.tsx @@ -167,9 +167,7 @@ describe("ApiOptions Provider Filtering", () => { expect(providerValues).toContain("ollama") expect(providerValues).toContain("lmstudio") expect(providerValues).toContain("litellm") - expect(providerValues).toContain("unbound") expect(providerValues).toContain("requesty") - expect(providerValues).toContain("io-intelligence") }) it("should filter static providers based on organization allow list", () => { diff --git a/webview-ui/src/components/settings/constants.ts b/webview-ui/src/components/settings/constants.ts index aceceb9dd9d..640f5c6be2c 100644 --- a/webview-ui/src/components/settings/constants.ts +++ b/webview-ui/src/components/settings/constants.ts @@ -3,7 +3,6 @@ import { type ModelInfo, anthropicModels, bedrockModels, - cerebrasModels, deepSeekModels, moonshotModels, geminiModels, @@ -13,12 +12,9 @@ import { qwenCodeModels, vertexModels, xaiModels, - groqModels, sambaNovaModels, - doubaoModels, internationalZAiModels, fireworksModels, - featherlessModels, minimaxModels, basetenModels, } from "@roo-code/types" @@ -26,9 +22,7 @@ import { export const MODELS_BY_PROVIDER: Partial>> = { anthropic: anthropicModels, bedrock: bedrockModels, - cerebras: cerebrasModels, deepseek: deepSeekModels, - doubao: doubaoModels, moonshot: moonshotModels, gemini: geminiModels, mistral: mistralModels, @@ -37,22 +31,17 @@ export const MODELS_BY_PROVIDER: Partial void -} - -export const Cerebras = ({ apiConfiguration, setApiConfigurationField }: CerebrasProps) => { - const { t } = useAppTranslation() - - const handleInputChange = useCallback( - ( - field: K, - transform: (event: E) => ProviderSettings[K] = inputEventTransform, - ) => - (event: E | Event) => { - setApiConfigurationField(field, transform(event as E)) - }, - [setApiConfigurationField], - ) - - return ( - <> - - - -
- {t("settings:providers.apiKeyStorageNotice")} -
- {!apiConfiguration?.cerebrasApiKey && ( - - {t("settings:providers.getCerebrasApiKey")} - - )} - - ) -} diff --git a/webview-ui/src/components/settings/providers/Chutes.tsx b/webview-ui/src/components/settings/providers/Chutes.tsx deleted file mode 100644 index 90962e5ccb2..00000000000 --- a/webview-ui/src/components/settings/providers/Chutes.tsx +++ /dev/null @@ -1,76 +0,0 @@ -import { useCallback } from "react" -import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" - -import type { ProviderSettings, OrganizationAllowList, RouterModels } from "@roo-code/types" -import { chutesDefaultModelId } from "@roo-code/types" - -import { useAppTranslation } from "@src/i18n/TranslationContext" -import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" - -import { ModelPicker } from "../ModelPicker" -import { inputEventTransform } from "../transforms" - -type ChutesProps = { - apiConfiguration: ProviderSettings - setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void - routerModels?: RouterModels - organizationAllowList: OrganizationAllowList - modelValidationError?: string - simplifySettings?: boolean -} - -export const Chutes = ({ - apiConfiguration, - setApiConfigurationField, - routerModels, - organizationAllowList, - modelValidationError, - simplifySettings, -}: ChutesProps) => { - const { t } = useAppTranslation() - - const handleInputChange = useCallback( - ( - field: K, - transform: (event: E) => ProviderSettings[K] = inputEventTransform, - ) => - (event: E | Event) => { - setApiConfigurationField(field, transform(event as E)) - }, - [setApiConfigurationField], - ) - - return ( - <> - - - -
- {t("settings:providers.apiKeyStorageNotice")} -
- {!apiConfiguration?.chutesApiKey && ( - - {t("settings:providers.getChutesApiKey")} - - )} - - - - ) -} diff --git a/webview-ui/src/components/settings/providers/DeepInfra.tsx b/webview-ui/src/components/settings/providers/DeepInfra.tsx deleted file mode 100644 index fbff11a1d38..00000000000 --- a/webview-ui/src/components/settings/providers/DeepInfra.tsx +++ /dev/null @@ -1,100 +0,0 @@ -import { useCallback, useEffect, useState } from "react" -import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" - -import { - type OrganizationAllowList, - type ProviderSettings, - type RouterModels, - deepInfraDefaultModelId, -} from "@roo-code/types" - -import { vscode } from "@src/utils/vscode" -import { useAppTranslation } from "@src/i18n/TranslationContext" -import { Button } from "@src/components/ui" - -import { inputEventTransform } from "../transforms" -import { ModelPicker } from "../ModelPicker" - -type DeepInfraProps = { - apiConfiguration: ProviderSettings - setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void - routerModels?: RouterModels - refetchRouterModels: () => void - organizationAllowList: OrganizationAllowList - modelValidationError?: string - simplifySettings?: boolean -} - -export const DeepInfra = ({ - apiConfiguration, - setApiConfigurationField, - routerModels, - refetchRouterModels, - organizationAllowList, - modelValidationError, - simplifySettings, -}: DeepInfraProps) => { - const { t } = useAppTranslation() - - const [didRefetch, setDidRefetch] = useState() - - const handleInputChange = useCallback( - ( - field: K, - transform: (event: E) => ProviderSettings[K] = inputEventTransform, - ) => - (event: E | Event) => { - setApiConfigurationField(field, transform(event as E)) - }, - [setApiConfigurationField], - ) - - useEffect(() => { - // When base URL or API key changes, trigger a silent refresh of models - // The outer ApiOptions debounces and sends requestRouterModels; this keeps UI responsive - }, [apiConfiguration.deepInfraBaseUrl, apiConfiguration.deepInfraApiKey]) - - return ( - <> - - - - - - {didRefetch && ( -
- {t("settings:providers.refreshModels.hint")} -
- )} - - - - ) -} diff --git a/webview-ui/src/components/settings/providers/Doubao.tsx b/webview-ui/src/components/settings/providers/Doubao.tsx deleted file mode 100644 index ed56529e385..00000000000 --- a/webview-ui/src/components/settings/providers/Doubao.tsx +++ /dev/null @@ -1,53 +0,0 @@ -import { useCallback } from "react" -import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" - -import type { ProviderSettings } from "@roo-code/types" - -import { useAppTranslation } from "@src/i18n/TranslationContext" -import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" - -import { inputEventTransform } from "../transforms" - -type DoubaoProps = { - apiConfiguration: ProviderSettings - setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void - simplifySettings?: boolean -} - -export const Doubao = ({ apiConfiguration, setApiConfigurationField }: DoubaoProps) => { - const { t } = useAppTranslation() - - const handleInputChange = useCallback( - ( - field: K, - transform: (event: E) => ProviderSettings[K] = inputEventTransform, - ) => - (event: E | Event) => { - setApiConfigurationField(field, transform(event as E)) - }, - [setApiConfigurationField], - ) - - return ( - <> - - - -
- {t("settings:providers.apiKeyStorageNotice")} -
- {!apiConfiguration?.doubaoApiKey && ( - - {t("settings:providers.getDoubaoApiKey")} - - )} - - ) -} diff --git a/webview-ui/src/components/settings/providers/Featherless.tsx b/webview-ui/src/components/settings/providers/Featherless.tsx deleted file mode 100644 index 264e295dcca..00000000000 --- a/webview-ui/src/components/settings/providers/Featherless.tsx +++ /dev/null @@ -1,50 +0,0 @@ -import { useCallback } from "react" -import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" - -import type { ProviderSettings } from "@roo-code/types" - -import { useAppTranslation } from "@src/i18n/TranslationContext" -import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" - -import { inputEventTransform } from "../transforms" - -type FeatherlessProps = { - apiConfiguration: ProviderSettings - setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void -} - -export const Featherless = ({ apiConfiguration, setApiConfigurationField }: FeatherlessProps) => { - const { t } = useAppTranslation() - - const handleInputChange = useCallback( - ( - field: K, - transform: (event: E) => ProviderSettings[K] = inputEventTransform, - ) => - (event: E | Event) => { - setApiConfigurationField(field, transform(event as E)) - }, - [setApiConfigurationField], - ) - - return ( - <> - - - -
- {t("settings:providers.apiKeyStorageNotice")} -
- {!apiConfiguration?.featherlessApiKey && ( - - {t("settings:providers.getFeatherlessApiKey")} - - )} - - ) -} diff --git a/webview-ui/src/components/settings/providers/Groq.tsx b/webview-ui/src/components/settings/providers/Groq.tsx deleted file mode 100644 index a8a910d1ace..00000000000 --- a/webview-ui/src/components/settings/providers/Groq.tsx +++ /dev/null @@ -1,50 +0,0 @@ -import { useCallback } from "react" -import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" - -import type { ProviderSettings } from "@roo-code/types" - -import { useAppTranslation } from "@src/i18n/TranslationContext" -import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" - -import { inputEventTransform } from "../transforms" - -type GroqProps = { - apiConfiguration: ProviderSettings - setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void -} - -export const Groq = ({ apiConfiguration, setApiConfigurationField }: GroqProps) => { - const { t } = useAppTranslation() - - const handleInputChange = useCallback( - ( - field: K, - transform: (event: E) => ProviderSettings[K] = inputEventTransform, - ) => - (event: E | Event) => { - setApiConfigurationField(field, transform(event as E)) - }, - [setApiConfigurationField], - ) - - return ( - <> - - - -
- {t("settings:providers.apiKeyStorageNotice")} -
- {!apiConfiguration?.groqApiKey && ( - - {t("settings:providers.getGroqApiKey")} - - )} - - ) -} diff --git a/webview-ui/src/components/settings/providers/HuggingFace.tsx b/webview-ui/src/components/settings/providers/HuggingFace.tsx deleted file mode 100644 index 2a587df3bd2..00000000000 --- a/webview-ui/src/components/settings/providers/HuggingFace.tsx +++ /dev/null @@ -1,277 +0,0 @@ -import { useCallback, useState, useEffect, useMemo } from "react" -import { useEvent } from "react-use" -import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" - -import type { ProviderSettings, ExtensionMessage } from "@roo-code/types" - -import { vscode } from "@src/utils/vscode" -import { useAppTranslation } from "@src/i18n/TranslationContext" -import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" -import { SearchableSelect, type SearchableSelectOption } from "@src/components/ui" -import { cn } from "@src/lib/utils" -import { formatPrice } from "@/utils/formatPrice" - -import { inputEventTransform } from "../transforms" - -type HuggingFaceModel = { - id: string - object: string - created: number - owned_by: string - providers: Array<{ - provider: string - status: "live" | "staging" | "error" - supports_tools?: boolean - supports_structured_output?: boolean - context_length?: number - pricing?: { - input: number - output: number - } - }> -} - -type HuggingFaceProps = { - apiConfiguration: ProviderSettings - setApiConfigurationField: ( - field: keyof ProviderSettings, - value: ProviderSettings[keyof ProviderSettings], - isUserAction?: boolean, - ) => void -} - -export const HuggingFace = ({ apiConfiguration, setApiConfigurationField }: HuggingFaceProps) => { - const { t } = useAppTranslation() - const [models, setModels] = useState([]) - const [loading, setLoading] = useState(false) - const [selectedProvider, setSelectedProvider] = useState( - apiConfiguration?.huggingFaceInferenceProvider || "auto", - ) - - const handleInputChange = useCallback( - ( - field: K, - transform: (event: E) => ProviderSettings[K] = inputEventTransform, - ) => - (event: E | Event) => { - setApiConfigurationField(field, transform(event as E)) - }, - [setApiConfigurationField], - ) - - // Fetch models when component mounts. - useEffect(() => { - setLoading(true) - vscode.postMessage({ type: "requestHuggingFaceModels" }) - }, []) - - // Handle messages from extension. - const onMessage = useCallback((event: MessageEvent) => { - const message: ExtensionMessage = event.data - - switch (message.type) { - case "huggingFaceModels": - setModels(message.huggingFaceModels?.sort((a, b) => a.id.localeCompare(b.id)) || []) - setLoading(false) - break - } - }, []) - - useEvent("message", onMessage) - - // Get current model and its providers - const currentModel = models.find((m) => m.id === apiConfiguration?.huggingFaceModelId) - const availableProviders = useMemo(() => currentModel?.providers || [], [currentModel?.providers]) - - // Set default provider when model changes - useEffect(() => { - if (currentModel && availableProviders.length > 0) { - const savedProvider = apiConfiguration?.huggingFaceInferenceProvider - if (savedProvider) { - // Use saved provider if it exists - setSelectedProvider(savedProvider) - } else { - const currentProvider = availableProviders.find((p) => p.provider === selectedProvider) - if (!currentProvider) { - // Set to "auto" as default - const defaultProvider = "auto" - setSelectedProvider(defaultProvider) - setApiConfigurationField("huggingFaceInferenceProvider", defaultProvider, false) // false = automatic default - } - } - } - }, [ - currentModel, - availableProviders, - selectedProvider, - apiConfiguration?.huggingFaceInferenceProvider, - setApiConfigurationField, - ]) - - const handleModelSelect = (modelId: string) => { - setApiConfigurationField("huggingFaceModelId", modelId) - // Reset provider selection when model changes - const defaultProvider = "auto" - setSelectedProvider(defaultProvider) - setApiConfigurationField("huggingFaceInferenceProvider", defaultProvider) - } - - const handleProviderSelect = (provider: string) => { - setSelectedProvider(provider) - setApiConfigurationField("huggingFaceInferenceProvider", provider) - } - - // Format provider name for display - const formatProviderName = (provider: string) => { - const nameMap: Record = { - sambanova: "SambaNova", - "fireworks-ai": "Fireworks", - together: "Together AI", - nebius: "Nebius AI Studio", - hyperbolic: "Hyperbolic", - novita: "Novita", - cohere: "Cohere", - "hf-inference": "HF Inference API", - replicate: "Replicate", - } - return nameMap[provider] || provider.charAt(0).toUpperCase() + provider.slice(1) - } - - // Get current provider - const currentProvider = useMemo(() => { - if (!currentModel || !selectedProvider || selectedProvider === "auto") return null - return currentModel.providers.find((p) => p.provider === selectedProvider) - }, [currentModel, selectedProvider]) - - // Get model capabilities based on current provider - const modelCapabilities = useMemo(() => { - if (!currentModel) return null - - // For now, assume text-only models since we don't have pipeline_tag in new API - // This could be enhanced by checking model name patterns or adding vision support detection - const supportsImages = false - - // Use provider-specific capabilities if a specific provider is selected - const maxTokens = - currentProvider?.context_length || currentModel.providers.find((p) => p.context_length)?.context_length - const supportsTools = currentProvider?.supports_tools || currentModel.providers.some((p) => p.supports_tools) - - return { - supportsImages, - maxTokens, - supportsTools, - } - }, [currentModel, currentProvider]) - - return ( - <> - - - - -
- {t("settings:providers.apiKeyStorageNotice")} -
- - {!apiConfiguration?.huggingFaceApiKey && ( - - {t("settings:providers.getHuggingFaceApiKey")} - - )} - -
- - - ({ - value: model.id, - label: model.id, - }), - )} - placeholder={t("settings:providers.huggingFaceSelectModel")} - searchPlaceholder={t("settings:providers.huggingFaceSearchModels")} - emptyMessage={t("settings:providers.huggingFaceNoModelsFound")} - disabled={loading} - /> -
- - {currentModel && availableProviders.length > 0 && ( -
- - ({ - value: mapping.provider, - label: `${formatProviderName(mapping.provider)} (${mapping.status})`, - }), - ), - ]} - placeholder={t("settings:providers.huggingFaceSelectProvider")} - searchPlaceholder={t("settings:providers.huggingFaceSearchProviders")} - emptyMessage={t("settings:providers.huggingFaceNoProvidersFound")} - /> -
- )} - - {/* Model capabilities */} - {currentModel && modelCapabilities && ( -
-
- - {modelCapabilities.supportsImages - ? t("settings:modelInfo.supportsImages") - : t("settings:modelInfo.noImages")} -
- {modelCapabilities.maxTokens && ( -
- {t("settings:modelInfo.maxOutput")}:{" "} - {modelCapabilities.maxTokens.toLocaleString()} tokens -
- )} - {currentProvider?.pricing && ( - <> -
- {t("settings:modelInfo.inputPrice")}:{" "} - {formatPrice(currentProvider.pricing.input)} / 1M tokens -
-
- {t("settings:modelInfo.outputPrice")}:{" "} - {formatPrice(currentProvider.pricing.output)} / 1M tokens -
- - )} -
- )} - - ) -} diff --git a/webview-ui/src/components/settings/providers/IOIntelligence.tsx b/webview-ui/src/components/settings/providers/IOIntelligence.tsx deleted file mode 100644 index f54d77b14ae..00000000000 --- a/webview-ui/src/components/settings/providers/IOIntelligence.tsx +++ /dev/null @@ -1,80 +0,0 @@ -import { useCallback } from "react" -import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" - -import { - type ProviderSettings, - type OrganizationAllowList, - ioIntelligenceDefaultModelId, - ioIntelligenceModels, -} from "@roo-code/types" - -import { useAppTranslation } from "@src/i18n/TranslationContext" -import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" -import { useExtensionState } from "@src/context/ExtensionStateContext" - -import { ModelPicker } from "../ModelPicker" - -import { inputEventTransform } from "../transforms" - -type IOIntelligenceProps = { - apiConfiguration: ProviderSettings - setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void - organizationAllowList: OrganizationAllowList - modelValidationError?: string - simplifySettings?: boolean -} - -export const IOIntelligence = ({ - apiConfiguration, - setApiConfigurationField, - organizationAllowList, - modelValidationError, - simplifySettings, -}: IOIntelligenceProps) => { - const { t } = useAppTranslation() - const { routerModels } = useExtensionState() - - const handleInputChange = useCallback( - ( - field: K, - transform: (event: E) => ProviderSettings[K] = inputEventTransform, - ) => - (event: E | Event) => { - setApiConfigurationField(field, transform(event as E)) - }, - [setApiConfigurationField], - ) - - return ( - <> - - - -
- {t("settings:providers.apiKeyStorageNotice")} -
- {!apiConfiguration?.ioIntelligenceApiKey && ( - - {t("settings:providers.getIoIntelligenceApiKey")} - - )} - - - ) -} diff --git a/webview-ui/src/components/settings/providers/Unbound.tsx b/webview-ui/src/components/settings/providers/Unbound.tsx deleted file mode 100644 index 639f1cefab8..00000000000 --- a/webview-ui/src/components/settings/providers/Unbound.tsx +++ /dev/null @@ -1,197 +0,0 @@ -import { useCallback, useState, useRef } from "react" -import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" -import { useQueryClient } from "@tanstack/react-query" - -import { - type ProviderSettings, - type OrganizationAllowList, - type RouterModels, - unboundDefaultModelId, -} from "@roo-code/types" - -import { useAppTranslation } from "@src/i18n/TranslationContext" -import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" -import { vscode } from "@src/utils/vscode" -import { Button } from "@src/components/ui" - -import { inputEventTransform } from "../transforms" -import { ModelPicker } from "../ModelPicker" - -type UnboundProps = { - apiConfiguration: ProviderSettings - setApiConfigurationField: ( - field: keyof ProviderSettings, - value: ProviderSettings[keyof ProviderSettings], - isUserAction?: boolean, - ) => void - routerModels?: RouterModels - organizationAllowList: OrganizationAllowList - modelValidationError?: string - simplifySettings?: boolean -} - -export const Unbound = ({ - apiConfiguration, - setApiConfigurationField, - routerModels, - organizationAllowList, - modelValidationError, - simplifySettings, -}: UnboundProps) => { - const { t } = useAppTranslation() - const [didRefetch, setDidRefetch] = useState() - const [isInvalidKey, setIsInvalidKey] = useState(false) - const queryClient = useQueryClient() - - // Add refs to store timer IDs - const didRefetchTimerRef = useRef() - const invalidKeyTimerRef = useRef() - - const handleInputChange = useCallback( - ( - field: K, - transform: (event: E) => ProviderSettings[K] = inputEventTransform, - ) => - (event: E | Event) => { - setApiConfigurationField(field, transform(event as E)) - }, - [setApiConfigurationField], - ) - - const saveConfiguration = useCallback(async () => { - vscode.postMessage({ - type: "upsertApiConfiguration", - text: "default", - apiConfiguration: apiConfiguration, - }) - - const waitForStateUpdate = new Promise((resolve, reject) => { - const timeoutId = setTimeout(() => { - window.removeEventListener("message", messageHandler) - reject(new Error("Timeout waiting for state update")) - }, 10000) // 10 second timeout - - const messageHandler = (event: MessageEvent) => { - const message = event.data - if (message.type === "state") { - clearTimeout(timeoutId) - window.removeEventListener("message", messageHandler) - resolve() - } - } - window.addEventListener("message", messageHandler) - }) - - try { - await waitForStateUpdate - } catch (error) { - console.error("Failed to save configuration:", error) - } - }, [apiConfiguration]) - - const requestModels = useCallback(async () => { - vscode.postMessage({ type: "flushRouterModels", text: "unbound" }) - - const modelsPromise = new Promise((resolve) => { - const messageHandler = (event: MessageEvent) => { - const message = event.data - - if (message.type === "routerModels") { - window.removeEventListener("message", messageHandler) - resolve() - } - } - - window.addEventListener("message", messageHandler) - }) - - vscode.postMessage({ type: "requestRouterModels" }) - - await modelsPromise - - await queryClient.invalidateQueries({ queryKey: ["routerModels"] }) - - // After refreshing models, check if current model is in the updated list - // If not, select the first available model - const updatedModels = queryClient.getQueryData<{ unbound: RouterModels }>(["routerModels"])?.unbound - if (updatedModels && Object.keys(updatedModels).length > 0) { - const currentModelId = apiConfiguration?.unboundModelId - const modelExists = currentModelId && Object.prototype.hasOwnProperty.call(updatedModels, currentModelId) - - if (!currentModelId || !modelExists) { - const firstAvailableModelId = Object.keys(updatedModels)[0] - setApiConfigurationField("unboundModelId", firstAvailableModelId, false) // false = automatic model selection - } - } - - if (!updatedModels || Object.keys(updatedModels).includes("error")) { - return false - } else { - return true - } - }, [queryClient, apiConfiguration, setApiConfigurationField]) - - const handleRefresh = useCallback(async () => { - await saveConfiguration() - const requestModelsResult = await requestModels() - - if (requestModelsResult) { - setDidRefetch(true) - didRefetchTimerRef.current = setTimeout(() => setDidRefetch(false), 3000) - } else { - setIsInvalidKey(true) - invalidKeyTimerRef.current = setTimeout(() => setIsInvalidKey(false), 3000) - } - }, [saveConfiguration, requestModels]) - - return ( - <> - - - -
- {t("settings:providers.apiKeyStorageNotice")} -
- {!apiConfiguration?.unboundApiKey && ( - - {t("settings:providers.getUnboundApiKey")} - - )} -
- -
- {didRefetch && ( -
- {t("settings:providers.unboundRefreshModelsSuccess")} -
- )} - {isInvalidKey && ( -
- {t("settings:providers.unboundInvalidApiKey")} -
- )} - - - ) -} diff --git a/webview-ui/src/components/settings/providers/__tests__/HuggingFace.spec.tsx b/webview-ui/src/components/settings/providers/__tests__/HuggingFace.spec.tsx deleted file mode 100644 index 5a652b1f148..00000000000 --- a/webview-ui/src/components/settings/providers/__tests__/HuggingFace.spec.tsx +++ /dev/null @@ -1,300 +0,0 @@ -import { render, screen } from "@/utils/test-utils" -import { HuggingFace } from "../HuggingFace" -import { ProviderSettings } from "@roo-code/types" - -// Mock the VSCode components -vi.mock("@vscode/webview-ui-toolkit/react", () => ({ - VSCodeTextField: ({ - children, - value, - onInput, - placeholder, - className, - style, - "data-testid": dataTestId, - ...rest - }: any) => { - return ( -
- {children} - onInput && onInput(e)} - placeholder={placeholder} - data-testid={dataTestId} - {...rest} - /> -
- ) - }, - VSCodeCheckbox: ({ children, checked, onChange, ...rest }: any) => ( -
- - {children} -
- ), - VSCodeLink: ({ children, href, onClick }: any) => ( - - {children} - - ), - VSCodeButton: ({ children, onClick, ...rest }: any) => ( - - ), -})) - -// Mock the translation hook -vi.mock("@src/i18n/TranslationContext", () => ({ - useAppTranslation: () => ({ - t: (key: string) => { - // Return the key for testing, but simulate some actual translations - const translations: Record = { - "settings:providers.getHuggingFaceApiKey": "Get Hugging Face API Key", - "settings:providers.huggingFaceApiKey": "Hugging Face API Key", - "settings:providers.huggingFaceModelId": "Model ID", - "settings:modelInfo.fetchingModels": "Fetching models...", - "settings:modelInfo.errorFetchingModels": "Error fetching models", - "settings:modelInfo.noModelsFound": "No models found", - "settings:modelInfo.noImages": "Does not support images", - } - return translations[key] || key - }, - }), -})) - -// Mock the UI components -vi.mock("@src/components/ui", () => ({ - Button: ({ children, onClick, variant, ...rest }: any) => ( - - ), - Select: ({ children }: any) =>
{children}
, - SelectContent: ({ children }: any) =>
{children}
, - SelectItem: ({ children }: any) =>
{children}
, - SelectTrigger: ({ children }: any) =>
{children}
, - SelectValue: ({ placeholder }: any) =>
{placeholder}
, - SearchableSelect: ({ value, onValueChange, placeholder, children }: any) => ( -
- onValueChange && onValueChange(e.target.value)} - placeholder={placeholder} - /> - {children} -
- ), -})) - -// Mock the formatPrice utility -vi.mock("@/utils/formatPrice", () => ({ - formatPrice: (price: number) => `$${price.toFixed(2)}`, -})) - -// Create a mock postMessage function -const mockPostMessage = vi.fn() - -// Mock the vscode module -vi.mock("@src/utils/vscode", () => ({ - vscode: { - postMessage: vi.fn(), - }, -})) - -// Import the mocked module to set up the spy -import { vscode } from "@src/utils/vscode" - -describe("HuggingFace Component", () => { - const mockSetApiConfigurationField = vi.fn() - - beforeEach(() => { - vi.clearAllMocks() - // Set up the mock implementation - vi.mocked(vscode.postMessage).mockImplementation(mockPostMessage) - }) - - it("should render with internationalized labels", () => { - const apiConfiguration: Partial = { - huggingFaceApiKey: "", - huggingFaceModelId: "", - } - - render( - , - ) - - // Check that the translated labels are rendered - expect(screen.getByText("Get Hugging Face API Key")).toBeInTheDocument() - expect(screen.getByText("Hugging Face API Key")).toBeInTheDocument() - expect(screen.getByText("Model ID")).toBeInTheDocument() - }) - - it("should render API key input field", () => { - const apiConfiguration: Partial = { - huggingFaceApiKey: "test-api-key", - huggingFaceModelId: "", - } - - render( - , - ) - - // Check that the API key input is rendered with the correct value - const apiKeyInput = screen.getByDisplayValue("test-api-key") - expect(apiKeyInput).toBeInTheDocument() - }) - - it("should render model selection components", () => { - const apiConfiguration: Partial = { - huggingFaceApiKey: "test-api-key", - huggingFaceModelId: "test-model", - } - - render( - , - ) - - // Check that the searchable select component is rendered - expect(screen.getByTestId("searchable-select")).toBeInTheDocument() - expect(screen.getByTestId("searchable-select-input")).toBeInTheDocument() - }) - - it("should display the get API key link", () => { - const apiConfiguration: Partial = { - huggingFaceApiKey: "", - huggingFaceModelId: "", - } - - render( - , - ) - - // Check that the API key button is rendered - const apiKeyButton = screen.getByTestId("button") - expect(apiKeyButton).toBeInTheDocument() - expect(apiKeyButton).toHaveTextContent("Get Hugging Face API Key") - }) - - it("should fetch models when component mounts", () => { - const apiConfiguration: Partial = { - huggingFaceApiKey: "test-api-key", - huggingFaceModelId: "", - } - - render( - , - ) - - // Check that the fetch models message was sent - expect(mockPostMessage).toHaveBeenCalledWith({ - type: "requestHuggingFaceModels", - }) - }) - - it("should display loading state while fetching models", () => { - const apiConfiguration: Partial = { - huggingFaceApiKey: "test-api-key", - huggingFaceModelId: "", - } - - render( - , - ) - - // Check for loading text in the label - expect(screen.getByText("settings:providers.huggingFaceLoading")).toBeInTheDocument() - }) - - it("should display model capabilities when a model is selected", async () => { - const apiConfiguration: Partial = { - huggingFaceApiKey: "test-api-key", - huggingFaceModelId: "test-model", - huggingFaceInferenceProvider: "test-provider", // Select a specific provider to show pricing - } - - const { rerender } = render( - , - ) - - // Simulate receiving models from the backend - const mockModels = [ - { - id: "test-model", - object: "model", - created: Date.now(), - owned_by: "test", - providers: [ - { - provider: "test-provider", - status: "live" as const, - supports_tools: false, - supports_structured_output: false, - context_length: 8192, - pricing: { - input: 0.001, - output: 0.002, - }, - }, - ], - }, - ] - - // Simulate message event - const messageEvent = new MessageEvent("message", { - data: { - type: "huggingFaceModels", - huggingFaceModels: mockModels, - }, - }) - window.dispatchEvent(messageEvent) - - // Re-render to trigger effect - rerender( - , - ) - - // Check that model capabilities are displayed - expect(screen.getByText("Does not support images")).toBeInTheDocument() - expect(screen.getByText("8,192 tokens")).toBeInTheDocument() - // Check that both input and output prices are displayed - const priceElements = screen.getAllByText("$0.00 / 1M tokens") - expect(priceElements).toHaveLength(2) // One for input, one for output - }) -}) diff --git a/webview-ui/src/components/settings/providers/index.ts b/webview-ui/src/components/settings/providers/index.ts index bca620d052d..d7684fb945e 100644 --- a/webview-ui/src/components/settings/providers/index.ts +++ b/webview-ui/src/components/settings/providers/index.ts @@ -1,13 +1,7 @@ export { Anthropic } from "./Anthropic" export { Bedrock } from "./Bedrock" -export { Cerebras } from "./Cerebras" -export { Chutes } from "./Chutes" export { DeepSeek } from "./DeepSeek" -export { Doubao } from "./Doubao" export { Gemini } from "./Gemini" -export { Groq } from "./Groq" -export { HuggingFace } from "./HuggingFace" -export { IOIntelligence } from "./IOIntelligence" export { LMStudio } from "./LMStudio" export { Mistral } from "./Mistral" export { Moonshot } from "./Moonshot" @@ -20,15 +14,12 @@ export { QwenCode } from "./QwenCode" export { Roo } from "./Roo" export { Requesty } from "./Requesty" export { SambaNova } from "./SambaNova" -export { Unbound } from "./Unbound" export { Vertex } from "./Vertex" export { VSCodeLM } from "./VSCodeLM" export { XAI } from "./XAI" export { ZAi } from "./ZAi" export { LiteLLM } from "./LiteLLM" export { Fireworks } from "./Fireworks" -export { Featherless } from "./Featherless" export { VercelAiGateway } from "./VercelAiGateway" -export { DeepInfra } from "./DeepInfra" export { MiniMax } from "./MiniMax" export { Baseten } from "./Baseten" diff --git a/webview-ui/src/components/settings/utils/providerModelConfig.ts b/webview-ui/src/components/settings/utils/providerModelConfig.ts index fdeb2647e5a..85fb54d6e92 100644 --- a/webview-ui/src/components/settings/utils/providerModelConfig.ts +++ b/webview-ui/src/components/settings/utils/providerModelConfig.ts @@ -2,9 +2,7 @@ import type { ProviderName, ModelInfo, ProviderSettings } from "@roo-code/types" import { anthropicDefaultModelId, bedrockDefaultModelId, - cerebrasDefaultModelId, deepSeekDefaultModelId, - doubaoDefaultModelId, moonshotDefaultModelId, geminiDefaultModelId, mistralDefaultModelId, @@ -12,12 +10,10 @@ import { qwenCodeDefaultModelId, vertexDefaultModelId, xaiDefaultModelId, - groqDefaultModelId, sambaNovaDefaultModelId, internationalZAiDefaultModelId, mainlandZAiDefaultModelId, fireworksDefaultModelId, - featherlessDefaultModelId, minimaxDefaultModelId, basetenDefaultModelId, } from "@roo-code/types" @@ -32,9 +28,7 @@ export interface ProviderServiceConfig { export const PROVIDER_SERVICE_CONFIG: Partial> = { anthropic: { serviceName: "Anthropic", serviceUrl: "https://console.anthropic.com" }, bedrock: { serviceName: "Amazon Bedrock", serviceUrl: "https://aws.amazon.com/bedrock" }, - cerebras: { serviceName: "Cerebras", serviceUrl: "https://cerebras.ai" }, deepseek: { serviceName: "DeepSeek", serviceUrl: "https://platform.deepseek.com" }, - doubao: { serviceName: "Doubao", serviceUrl: "https://www.volcengine.com/product/doubao" }, moonshot: { serviceName: "Moonshot", serviceUrl: "https://platform.moonshot.cn" }, gemini: { serviceName: "Google Gemini", serviceUrl: "https://ai.google.dev" }, mistral: { serviceName: "Mistral", serviceUrl: "https://console.mistral.ai" }, @@ -42,11 +36,9 @@ export const PROVIDER_SERVICE_CONFIG: Partial> = { anthropic: anthropicDefaultModelId, bedrock: bedrockDefaultModelId, - cerebras: cerebrasDefaultModelId, deepseek: deepSeekDefaultModelId, - doubao: doubaoDefaultModelId, moonshot: moonshotDefaultModelId, gemini: geminiDefaultModelId, mistral: mistralDefaultModelId, @@ -70,11 +60,9 @@ export const PROVIDER_DEFAULT_MODEL_IDS: Partial> = "qwen-code": qwenCodeDefaultModelId, vertex: vertexDefaultModelId, xai: xaiDefaultModelId, - groq: groqDefaultModelId, sambanova: sambaNovaDefaultModelId, zai: internationalZAiDefaultModelId, fireworks: fireworksDefaultModelId, - featherless: featherlessDefaultModelId, minimax: minimaxDefaultModelId, baseten: basetenDefaultModelId, } @@ -130,19 +118,14 @@ export const isStaticModelProvider = (provider: ProviderName): boolean => { export const PROVIDERS_WITH_CUSTOM_MODEL_UI: ProviderName[] = [ "openrouter", "requesty", - "unbound", - "deepinfra", "openai", // OpenAI Compatible "openai-codex", // OpenAI Codex has custom UI with auth and rate limits "litellm", - "io-intelligence", "vercel-ai-gateway", "roo", - "chutes", "ollama", "lmstudio", "vscode-lm", - "huggingface", ] /** diff --git a/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts b/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts index e42ba33fd0e..8925adf5fda 100644 --- a/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts +++ b/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts @@ -61,9 +61,7 @@ describe("useSelectedModel", () => { "test-model": baseModelInfo, }, requesty: {}, - unbound: {}, litellm: {}, - "io-intelligence": {}, }, isLoading: false, isError: false, @@ -124,9 +122,7 @@ describe("useSelectedModel", () => { }, }, requesty: {}, - unbound: {}, litellm: {}, - "io-intelligence": {}, }, isLoading: false, isError: false, @@ -191,9 +187,7 @@ describe("useSelectedModel", () => { "test-model": baseModelInfo, }, requesty: {}, - unbound: {}, litellm: {}, - "io-intelligence": {}, }, isLoading: false, isError: false, @@ -245,9 +239,7 @@ describe("useSelectedModel", () => { data: { openrouter: { "test-model": baseModelInfo }, requesty: {}, - unbound: {}, litellm: {}, - "io-intelligence": {}, }, isLoading: false, isError: false, @@ -288,9 +280,7 @@ describe("useSelectedModel", () => { }, }, requesty: {}, - unbound: {}, litellm: {}, - "io-intelligence": {}, }, isLoading: false, isError: false, @@ -350,7 +340,7 @@ describe("useSelectedModel", () => { it("should NOT set loading when openrouter provider metadata is loading but provider is static (anthropic)", () => { mockUseRouterModels.mockReturnValue({ - data: { openrouter: {}, requesty: {}, unbound: {}, litellm: {}, "io-intelligence": {} }, + data: { openrouter: {}, requesty: {}, litellm: {} }, isLoading: false, isError: false, } as any) @@ -418,9 +408,7 @@ describe("useSelectedModel", () => { data: { openrouter: {}, requesty: {}, - unbound: {}, litellm: {}, - "io-intelligence": {}, }, isLoading: false, isError: false, @@ -490,9 +478,7 @@ describe("useSelectedModel", () => { data: { openrouter: {}, requesty: {}, - unbound: {}, litellm: {}, - "io-intelligence": {}, }, isLoading: false, isError: false, @@ -518,7 +504,6 @@ describe("useSelectedModel", () => { data: { openrouter: {}, requesty: {}, - unbound: {}, litellm: { "existing-model": { maxTokens: 4096, @@ -527,7 +512,6 @@ describe("useSelectedModel", () => { supportsPromptCache: false, }, }, - "io-intelligence": {}, }, isLoading: false, isError: false, @@ -561,11 +545,9 @@ describe("useSelectedModel", () => { data: { openrouter: {}, requesty: {}, - unbound: {}, litellm: { "custom-model": customModelInfo, }, - "io-intelligence": {}, }, isLoading: false, isError: false, @@ -591,9 +573,7 @@ describe("useSelectedModel", () => { data: { openrouter: {}, requesty: {}, - unbound: {}, litellm: {}, - "io-intelligence": {}, }, isLoading: false, isError: false, diff --git a/webview-ui/src/components/ui/hooks/useSelectedModel.ts b/webview-ui/src/components/ui/hooks/useSelectedModel.ts index 5336b635836..7be7c353ba1 100644 --- a/webview-ui/src/components/ui/hooks/useSelectedModel.ts +++ b/webview-ui/src/components/ui/hooks/useSelectedModel.ts @@ -6,7 +6,6 @@ import { type RouterModels, anthropicModels, bedrockModels, - cerebrasModels, deepSeekModels, moonshotModels, minimaxModels, @@ -16,17 +15,13 @@ import { openAiNativeModels, vertexModels, xaiModels, - groqModels, vscodeLlmModels, vscodeLlmDefaultModelId, openAiCodexModels, sambaNovaModels, - doubaoModels, internationalZAiModels, mainlandZAiModels, fireworksModels, - featherlessModels, - ioIntelligenceModels, basetenModels, qwenCodeModels, litellmDefaultModelInfo, @@ -160,11 +155,6 @@ function getSelectedModel({ const routerInfo = routerModels.requesty?.[id] return { id, info: routerInfo } } - case "unbound": { - const id = getValidatedModelId(apiConfiguration.unboundModelId, routerModels.unbound, defaultModelId) - const routerInfo = routerModels.unbound?.[id] - return { id, info: routerInfo } - } case "litellm": { const id = getValidatedModelId(apiConfiguration.litellmModelId, routerModels.litellm, defaultModelId) const routerInfo = routerModels.litellm?.[id] @@ -175,26 +165,6 @@ function getSelectedModel({ const info = xaiModels[id as keyof typeof xaiModels] return info ? { id, info } : { id, info: undefined } } - case "groq": { - const id = apiConfiguration.apiModelId ?? defaultModelId - const info = groqModels[id as keyof typeof groqModels] - return { id, info } - } - case "huggingface": { - const id = apiConfiguration.huggingFaceModelId ?? "meta-llama/Llama-3.3-70B-Instruct" - const info = { - maxTokens: 8192, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - } - return { id, info } - } - case "chutes": { - const id = getValidatedModelId(apiConfiguration.apiModelId, routerModels.chutes, defaultModelId) - const info = routerModels.chutes?.[id] - return { id, info } - } case "baseten": { const id = apiConfiguration.apiModelId ?? defaultModelId const info = basetenModels[id as keyof typeof basetenModels] @@ -257,11 +227,6 @@ function getSelectedModel({ const info = deepSeekModels[id as keyof typeof deepSeekModels] return { id, info } } - case "doubao": { - const id = apiConfiguration.apiModelId ?? defaultModelId - const info = doubaoModels[id as keyof typeof doubaoModels] - return { id, info } - } case "moonshot": { const id = apiConfiguration.apiModelId ?? defaultModelId const info = moonshotModels[id as keyof typeof moonshotModels] @@ -320,11 +285,6 @@ function getSelectedModel({ info: modelInfo ? { ...lMStudioDefaultModelInfo, ...modelInfo } : undefined, } } - case "deepinfra": { - const id = getValidatedModelId(apiConfiguration.deepInfraModelId, routerModels.deepinfra, defaultModelId) - const info = routerModels.deepinfra?.[id] - return { id, info } - } case "vscode-lm": { const id = apiConfiguration?.vsCodeLmModelSelector ? `${apiConfiguration.vsCodeLmModelSelector.vendor}/${apiConfiguration.vsCodeLmModelSelector.family}` @@ -333,11 +293,6 @@ function getSelectedModel({ const info = vscodeLlmModels[modelFamily as keyof typeof vscodeLlmModels] return { id, info: { ...openAiModelInfoSaneDefaults, ...info, supportsImages: false } } // VSCode LM API currently doesn't support images. } - case "cerebras": { - const id = apiConfiguration.apiModelId ?? defaultModelId - const info = cerebrasModels[id as keyof typeof cerebrasModels] - return { id, info } - } case "sambanova": { const id = apiConfiguration.apiModelId ?? defaultModelId const info = sambaNovaModels[id as keyof typeof sambaNovaModels] @@ -348,21 +303,6 @@ function getSelectedModel({ const info = fireworksModels[id as keyof typeof fireworksModels] return { id, info } } - case "featherless": { - const id = apiConfiguration.apiModelId ?? defaultModelId - const info = featherlessModels[id as keyof typeof featherlessModels] - return { id, info } - } - case "io-intelligence": { - const id = getValidatedModelId( - apiConfiguration.ioIntelligenceModelId, - routerModels["io-intelligence"], - defaultModelId, - ) - const info = - routerModels["io-intelligence"]?.[id] ?? ioIntelligenceModels[id as keyof typeof ioIntelligenceModels] - return { id, info } - } case "roo": { const id = getValidatedModelId(apiConfiguration.apiModelId, routerModels.roo, defaultModelId) const info = routerModels.roo?.[id] diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index cfcc0727c57..e4502bd53ab 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Amplia la finestra de context a 1 milió de tokens per a Claude Sonnet 4 / 4.5 / Claude Opus 4.6", "basetenApiKey": "Clau API de Baseten", "getBasetenApiKey": "Obtenir clau API de Baseten", - "cerebrasApiKey": "Clau API de Cerebras", - "getCerebrasApiKey": "Obtenir clau API de Cerebras", - "chutesApiKey": "Clau API de Chutes", - "getChutesApiKey": "Obtenir clau API de Chutes", "fireworksApiKey": "Clau API de Fireworks", "getFireworksApiKey": "Obtenir clau API de Fireworks", - "featherlessApiKey": "Clau API de Featherless", - "getFeatherlessApiKey": "Obtenir clau API de Featherless", - "ioIntelligenceApiKey": "Clau API d'IO Intelligence", - "ioIntelligenceApiKeyPlaceholder": "Introdueix la teva clau d'API de IO Intelligence", - "getIoIntelligenceApiKey": "Obtenir clau API d'IO Intelligence", "deepSeekApiKey": "Clau API de DeepSeek", "getDeepSeekApiKey": "Obtenir clau API de DeepSeek", - "doubaoApiKey": "Clau API de Doubao", - "getDoubaoApiKey": "Obtenir clau API de Doubao", "moonshotApiKey": "Clau API de Moonshot", "getMoonshotApiKey": "Obtenir clau API de Moonshot", "moonshotBaseUrl": "Punt d'entrada de Moonshot", @@ -403,23 +392,8 @@ "getMiniMaxApiKey": "Obtenir clau API de MiniMax", "minimaxBaseUrl": "Punt d'entrada de MiniMax", "geminiApiKey": "Clau API de Gemini", - "getGroqApiKey": "Obtenir clau API de Groq", - "groqApiKey": "Clau API de Groq", "getSambaNovaApiKey": "Obtenir clau API de SambaNova", "sambaNovaApiKey": "Clau API de SambaNova", - "getHuggingFaceApiKey": "Obtenir clau API de Hugging Face", - "huggingFaceApiKey": "Clau API de Hugging Face", - "huggingFaceModelId": "ID del model", - "huggingFaceLoading": "Carregant...", - "huggingFaceModelsCount": "({{count}} models)", - "huggingFaceSelectModel": "Selecciona un model...", - "huggingFaceSearchModels": "Cerca models...", - "huggingFaceNoModelsFound": "No s'han trobat models", - "huggingFaceProvider": "Proveïdor", - "huggingFaceProviderAuto": "Automàtic", - "huggingFaceSelectProvider": "Selecciona un proveïdor...", - "huggingFaceSearchProviders": "Cerca proveïdors...", - "huggingFaceNoProvidersFound": "No s'han trobat proveïdors", "getGeminiApiKey": "Obtenir clau API de Gemini", "openAiApiKey": "Clau API d'OpenAI", "apiKey": "Clau API", @@ -491,10 +465,6 @@ "description": "Ollama permet executar models localment al vostre ordinador. Per a instruccions sobre com començar, consulteu la Guia d'inici ràpid.", "warning": "Nota: Roo Code utilitza prompts complexos i funciona millor amb models Claude. Els models menys capaços poden no funcionar com s'espera." }, - "unboundApiKey": "Clau API d'Unbound", - "getUnboundApiKey": "Obtenir clau API d'Unbound", - "unboundRefreshModelsSuccess": "Llista de models actualitzada! Ara podeu seleccionar entre els últims models.", - "unboundInvalidApiKey": "Clau API no vàlida. Si us plau, comproveu la vostra clau API i torneu-ho a provar.", "roo": { "authenticatedMessage": "Autenticat de forma segura a través del teu compte de Roo Code Cloud.", "connectButton": "Connecta amb Roo Code Cloud" diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index f3e753e011d..13030ca3847 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -316,8 +316,6 @@ "getOpenRouterApiKey": "OpenRouter API-Schlüssel erhalten", "vercelAiGatewayApiKey": "Vercel AI Gateway API-Schlüssel", "getVercelAiGatewayApiKey": "Vercel AI Gateway API-Schlüssel erhalten", - "doubaoApiKey": "Doubao API-Schlüssel", - "getDoubaoApiKey": "Doubao API-Schlüssel erhalten", "apiKeyStorageNotice": "API-Schlüssel werden sicher im VSCode Secret Storage gespeichert", "openAiCodexRateLimits": { "title": "Usage Limits for Codex{{planLabel}}", @@ -379,17 +377,8 @@ "vertex1MContextBetaDescription": "Erweitert das Kontextfenster für Claude Sonnet 4 / 4.5 / Claude Opus 4.6 auf 1 Million Token", "basetenApiKey": "Baseten API-Schlüssel", "getBasetenApiKey": "Baseten API-Schlüssel erhalten", - "cerebrasApiKey": "Cerebras API-Schlüssel", - "getCerebrasApiKey": "Cerebras API-Schlüssel erhalten", - "chutesApiKey": "Chutes API-Schlüssel", - "getChutesApiKey": "Chutes API-Schlüssel erhalten", "fireworksApiKey": "Fireworks API-Schlüssel", "getFireworksApiKey": "Fireworks API-Schlüssel erhalten", - "featherlessApiKey": "Featherless API-Schlüssel", - "getFeatherlessApiKey": "Featherless API-Schlüssel erhalten", - "ioIntelligenceApiKey": "IO Intelligence API-Schlüssel", - "ioIntelligenceApiKeyPlaceholder": "Gib deinen IO Intelligence API-Schlüssel ein", - "getIoIntelligenceApiKey": "IO Intelligence API-Schlüssel erhalten", "deepSeekApiKey": "DeepSeek API-Schlüssel", "getDeepSeekApiKey": "DeepSeek API-Schlüssel erhalten", "moonshotApiKey": "Moonshot API-Schlüssel", @@ -403,23 +392,8 @@ "getMiniMaxApiKey": "MiniMax API-Schlüssel erhalten", "minimaxBaseUrl": "MiniMax-Einstiegspunkt", "geminiApiKey": "Gemini API-Schlüssel", - "getGroqApiKey": "Groq API-Schlüssel erhalten", - "groqApiKey": "Groq API-Schlüssel", "getSambaNovaApiKey": "SambaNova API-Schlüssel erhalten", "sambaNovaApiKey": "SambaNova API-Schlüssel", - "getHuggingFaceApiKey": "Hugging Face API-Schlüssel erhalten", - "huggingFaceApiKey": "Hugging Face API-Schlüssel", - "huggingFaceModelId": "Modell-ID", - "huggingFaceLoading": "Lädt...", - "huggingFaceModelsCount": "({{count}} Modelle)", - "huggingFaceSelectModel": "Modell auswählen...", - "huggingFaceSearchModels": "Modelle durchsuchen...", - "huggingFaceNoModelsFound": "Keine Modelle gefunden", - "huggingFaceProvider": "Anbieter", - "huggingFaceProviderAuto": "Automatisch", - "huggingFaceSelectProvider": "Anbieter auswählen...", - "huggingFaceSearchProviders": "Anbieter durchsuchen...", - "huggingFaceNoProvidersFound": "Keine Anbieter gefunden", "getGeminiApiKey": "Gemini API-Schlüssel erhalten", "openAiApiKey": "OpenAI API-Schlüssel", "apiKey": "API-Schlüssel", @@ -491,10 +465,6 @@ "description": "Ollama ermöglicht es dir, Modelle lokal auf deinem Computer auszuführen. Eine Anleitung zum Einstieg findest du im Schnellstart-Guide.", "warning": "Hinweis: Roo Code verwendet komplexe Prompts und funktioniert am besten mit Claude-Modellen. Weniger leistungsfähige Modelle funktionieren möglicherweise nicht wie erwartet." }, - "unboundApiKey": "Unbound API-Schlüssel", - "getUnboundApiKey": "Unbound API-Schlüssel erhalten", - "unboundRefreshModelsSuccess": "Modellliste aktualisiert! Sie können jetzt aus den neuesten Modellen auswählen.", - "unboundInvalidApiKey": "Ungültiger API-Schlüssel. Bitte überprüfen Sie Ihren API-Schlüssel und versuchen Sie es erneut.", "roo": { "authenticatedMessage": "Sicher authentifiziert über dein Roo Code Cloud-Konto.", "connectButton": "Mit Roo Code Cloud verbinden" diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 61dfaf42af5..42bdfe24308 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -440,21 +440,10 @@ "vertex1MContextBetaDescription": "Extends context window to 1 million tokens for Claude Sonnet 4 / 4.5 / Claude Opus 4.6", "basetenApiKey": "Baseten API Key", "getBasetenApiKey": "Get Baseten API Key", - "cerebrasApiKey": "Cerebras API Key", - "getCerebrasApiKey": "Get Cerebras API Key", - "chutesApiKey": "Chutes API Key", - "getChutesApiKey": "Get Chutes API Key", "fireworksApiKey": "Fireworks API Key", "getFireworksApiKey": "Get Fireworks API Key", - "featherlessApiKey": "Featherless API Key", - "getFeatherlessApiKey": "Get Featherless API Key", - "ioIntelligenceApiKey": "IO Intelligence API Key", - "ioIntelligenceApiKeyPlaceholder": "Enter your IO Intelligence API key", - "getIoIntelligenceApiKey": "Get IO Intelligence API Key", "deepSeekApiKey": "DeepSeek API Key", "getDeepSeekApiKey": "Get DeepSeek API Key", - "doubaoApiKey": "Doubao API Key", - "getDoubaoApiKey": "Get Doubao API Key", "moonshotApiKey": "Moonshot API Key", "getMoonshotApiKey": "Get Moonshot API Key", "moonshotBaseUrl": "Moonshot Entrypoint", @@ -466,23 +455,8 @@ "zaiEntrypoint": "Z AI Entrypoint", "zaiEntrypointDescription": "Please select the appropriate API entrypoint based on your location. If you are in China, choose open.bigmodel.cn. Otherwise, choose api.z.ai.", "geminiApiKey": "Gemini API Key", - "getGroqApiKey": "Get Groq API Key", - "groqApiKey": "Groq API Key", "getSambaNovaApiKey": "Get SambaNova API Key", "sambaNovaApiKey": "SambaNova API Key", - "getHuggingFaceApiKey": "Get Hugging Face API Key", - "huggingFaceApiKey": "Hugging Face API Key", - "huggingFaceModelId": "Model ID", - "huggingFaceLoading": "Loading...", - "huggingFaceModelsCount": "({{count}} models)", - "huggingFaceSelectModel": "Select a model...", - "huggingFaceSearchModels": "Search models...", - "huggingFaceNoModelsFound": "No models found", - "huggingFaceProvider": "Provider", - "huggingFaceProviderAuto": "Auto", - "huggingFaceSelectProvider": "Select a provider...", - "huggingFaceSearchProviders": "Search providers...", - "huggingFaceNoProvidersFound": "No providers found", "getGeminiApiKey": "Get Gemini API Key", "openAiApiKey": "OpenAI API Key", "apiKey": "API Key", @@ -554,10 +528,6 @@ "description": "Ollama allows you to run models locally on your computer. For instructions on how to get started, see their quickstart guide.", "warning": "Note: Roo Code uses complex prompts and works best with Claude models. Less capable models may not work as expected." }, - "unboundApiKey": "Unbound API Key", - "getUnboundApiKey": "Get Unbound API Key", - "unboundRefreshModelsSuccess": "Models list updated! You can now select from the latest models.", - "unboundInvalidApiKey": "Invalid API key. Please check your API key and try again.", "roo": { "authenticatedMessage": "Securely authenticated through your Roo Code Cloud account.", "connectButton": "Connect to Roo Code Cloud" diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index aafa9568e05..419146cbc81 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Amplía la ventana de contexto a 1 millón de tokens para Claude Sonnet 4 / 4.5 / Claude Opus 4.6", "basetenApiKey": "Clave API de Baseten", "getBasetenApiKey": "Obtener clave API de Baseten", - "cerebrasApiKey": "Clave API de Cerebras", - "getCerebrasApiKey": "Obtener clave API de Cerebras", - "chutesApiKey": "Clave API de Chutes", - "getChutesApiKey": "Obtener clave API de Chutes", "fireworksApiKey": "Clave API de Fireworks", "getFireworksApiKey": "Obtener clave API de Fireworks", - "featherlessApiKey": "Clave API de Featherless", - "getFeatherlessApiKey": "Obtener clave API de Featherless", - "ioIntelligenceApiKey": "Clave API de IO Intelligence", - "ioIntelligenceApiKeyPlaceholder": "Introduce tu clave de API de IO Intelligence", - "getIoIntelligenceApiKey": "Obtener clave API de IO Intelligence", "deepSeekApiKey": "Clave API de DeepSeek", "getDeepSeekApiKey": "Obtener clave API de DeepSeek", - "doubaoApiKey": "Clave API de Doubao", - "getDoubaoApiKey": "Obtener clave API de Doubao", "moonshotApiKey": "Clave API de Moonshot", "getMoonshotApiKey": "Obtener clave API de Moonshot", "moonshotBaseUrl": "Punto de entrada de Moonshot", @@ -403,23 +392,8 @@ "getMiniMaxApiKey": "Obtener clave API de MiniMax", "minimaxBaseUrl": "Punto de entrada de MiniMax", "geminiApiKey": "Clave API de Gemini", - "getGroqApiKey": "Obtener clave API de Groq", - "groqApiKey": "Clave API de Groq", "getSambaNovaApiKey": "Obtener clave API de SambaNova", "sambaNovaApiKey": "Clave API de SambaNova", - "getHuggingFaceApiKey": "Obtener clave API de Hugging Face", - "huggingFaceApiKey": "Clave API de Hugging Face", - "huggingFaceModelId": "ID del modelo", - "huggingFaceLoading": "Cargando...", - "huggingFaceModelsCount": "({{count}} modelos)", - "huggingFaceSelectModel": "Seleccionar un modelo...", - "huggingFaceSearchModels": "Buscar modelos...", - "huggingFaceNoModelsFound": "No se encontraron modelos", - "huggingFaceProvider": "Proveedor", - "huggingFaceProviderAuto": "Automático", - "huggingFaceSelectProvider": "Seleccionar un proveedor...", - "huggingFaceSearchProviders": "Buscar proveedores...", - "huggingFaceNoProvidersFound": "No se encontraron proveedores", "getGeminiApiKey": "Obtener clave API de Gemini", "openAiApiKey": "Clave API de OpenAI", "apiKey": "Clave API", @@ -491,10 +465,6 @@ "description": "Ollama le permite ejecutar modelos localmente en su computadora. Para obtener instrucciones sobre cómo comenzar, consulte la guía de inicio rápido.", "warning": "Nota: Roo Code utiliza prompts complejos y funciona mejor con modelos Claude. Los modelos menos capaces pueden no funcionar como se espera." }, - "unboundApiKey": "Clave API de Unbound", - "getUnboundApiKey": "Obtener clave API de Unbound", - "unboundRefreshModelsSuccess": "¡Lista de modelos actualizada! Ahora puede seleccionar entre los últimos modelos.", - "unboundInvalidApiKey": "Clave API inválida. Por favor, verifique su clave API e inténtelo de nuevo.", "roo": { "authenticatedMessage": "Autenticado de forma segura a través de tu cuenta de Roo Code Cloud.", "connectButton": "Conectar a Roo Code Cloud" diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index ba51bf7ca83..cecca34916d 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Étend la fenêtre de contexte à 1 million de tokens pour Claude Sonnet 4 / 4.5 / Claude Opus 4.6", "basetenApiKey": "Clé API Baseten", "getBasetenApiKey": "Obtenir la clé API Baseten", - "cerebrasApiKey": "Clé API Cerebras", - "getCerebrasApiKey": "Obtenir la clé API Cerebras", - "chutesApiKey": "Clé API Chutes", - "getChutesApiKey": "Obtenir la clé API Chutes", "fireworksApiKey": "Clé API Fireworks", "getFireworksApiKey": "Obtenir la clé API Fireworks", - "featherlessApiKey": "Clé API Featherless", - "getFeatherlessApiKey": "Obtenir la clé API Featherless", - "ioIntelligenceApiKey": "Clé API IO Intelligence", - "ioIntelligenceApiKeyPlaceholder": "Saisissez votre clé d'API IO Intelligence", - "getIoIntelligenceApiKey": "Obtenir la clé API IO Intelligence", "deepSeekApiKey": "Clé API DeepSeek", "getDeepSeekApiKey": "Obtenir la clé API DeepSeek", - "doubaoApiKey": "Clé API Doubao", - "getDoubaoApiKey": "Obtenir la clé API Doubao", "moonshotApiKey": "Clé API Moonshot", "getMoonshotApiKey": "Obtenir la clé API Moonshot", "moonshotBaseUrl": "Point d'entrée Moonshot", @@ -403,23 +392,8 @@ "getMiniMaxApiKey": "Obtenir la clé API MiniMax", "minimaxBaseUrl": "Point d'entrée MiniMax", "geminiApiKey": "Clé API Gemini", - "getGroqApiKey": "Obtenir la clé API Groq", - "groqApiKey": "Clé API Groq", "getSambaNovaApiKey": "Obtenir la clé API SambaNova", "sambaNovaApiKey": "Clé API SambaNova", - "getHuggingFaceApiKey": "Obtenir la clé API Hugging Face", - "huggingFaceApiKey": "Clé API Hugging Face", - "huggingFaceModelId": "ID du modèle", - "huggingFaceLoading": "Chargement...", - "huggingFaceModelsCount": "({{count}} modèles)", - "huggingFaceSelectModel": "Sélectionner un modèle...", - "huggingFaceSearchModels": "Rechercher des modèles...", - "huggingFaceNoModelsFound": "Aucun modèle trouvé", - "huggingFaceProvider": "Fournisseur", - "huggingFaceProviderAuto": "Automatique", - "huggingFaceSelectProvider": "Sélectionner un fournisseur...", - "huggingFaceSearchProviders": "Rechercher des fournisseurs...", - "huggingFaceNoProvidersFound": "Aucun fournisseur trouvé", "getGeminiApiKey": "Obtenir la clé API Gemini", "openAiApiKey": "Clé API OpenAI", "apiKey": "Clé API", @@ -491,10 +465,6 @@ "description": "Ollama vous permet d'exécuter des modèles localement sur votre ordinateur. Pour obtenir des instructions sur la mise en route, consultez le guide de démarrage rapide.", "warning": "Remarque : Roo Code utilise des prompts complexes et fonctionne mieux avec les modèles Claude. Les modèles moins performants peuvent ne pas fonctionner comme prévu." }, - "unboundApiKey": "Clé API Unbound", - "getUnboundApiKey": "Obtenir la clé API Unbound", - "unboundRefreshModelsSuccess": "Liste des modèles mise à jour ! Vous pouvez maintenant sélectionner parmi les derniers modèles.", - "unboundInvalidApiKey": "Clé API invalide. Veuillez vérifier votre clé API et réessayer.", "roo": { "authenticatedMessage": "Authentifié de manière sécurisée via ton compte Roo Code Cloud.", "connectButton": "Se connecter à Roo Code Cloud" diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index e9a3c3cbd26..de01293bd87 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Claude Sonnet 4 / 4.5 / Claude Opus 4.6 के लिए संदर्भ विंडो को 1 मिलियन टोकन तक बढ़ाता है", "basetenApiKey": "Baseten API कुंजी", "getBasetenApiKey": "Baseten API कुंजी प्राप्त करें", - "cerebrasApiKey": "Cerebras API कुंजी", - "getCerebrasApiKey": "Cerebras API कुंजी प्राप्त करें", - "chutesApiKey": "Chutes API कुंजी", - "getChutesApiKey": "Chutes API कुंजी प्राप्त करें", "fireworksApiKey": "Fireworks API कुंजी", "getFireworksApiKey": "Fireworks API कुंजी प्राप्त करें", - "featherlessApiKey": "Featherless API कुंजी", - "getFeatherlessApiKey": "Featherless API कुंजी प्राप्त करें", - "ioIntelligenceApiKey": "IO Intelligence API कुंजी", - "ioIntelligenceApiKeyPlaceholder": "अपना आईओ इंटेलिजेंस एपीआई कुंजी दर्ज करें", - "getIoIntelligenceApiKey": "IO Intelligence API कुंजी प्राप्त करें", "deepSeekApiKey": "DeepSeek API कुंजी", "getDeepSeekApiKey": "DeepSeek API कुंजी प्राप्त करें", - "doubaoApiKey": "डौबाओ API कुंजी", - "getDoubaoApiKey": "डौबाओ API कुंजी प्राप्त करें", "moonshotApiKey": "Moonshot API कुंजी", "getMoonshotApiKey": "Moonshot API कुंजी प्राप्त करें", "moonshotBaseUrl": "Moonshot प्रवेश बिंदु", @@ -403,23 +392,8 @@ "getMiniMaxApiKey": "MiniMax API कुंजी प्राप्त करें", "minimaxBaseUrl": "MiniMax प्रवेश बिंदु", "geminiApiKey": "Gemini API कुंजी", - "getGroqApiKey": "Groq API कुंजी प्राप्त करें", - "groqApiKey": "Groq API कुंजी", "getSambaNovaApiKey": "SambaNova API कुंजी प्राप्त करें", "sambaNovaApiKey": "SambaNova API कुंजी", - "getHuggingFaceApiKey": "Hugging Face API कुंजी प्राप्त करें", - "huggingFaceApiKey": "Hugging Face API कुंजी", - "huggingFaceModelId": "मॉडल ID", - "huggingFaceLoading": "लोड हो रहा है...", - "huggingFaceModelsCount": "({{count}} मॉडल)", - "huggingFaceSelectModel": "एक मॉडल चुनें...", - "huggingFaceSearchModels": "मॉडल खोजें...", - "huggingFaceNoModelsFound": "कोई मॉडल नहीं मिला", - "huggingFaceProvider": "प्रदाता", - "huggingFaceProviderAuto": "स्वचालित", - "huggingFaceSelectProvider": "एक प्रदाता चुनें...", - "huggingFaceSearchProviders": "प्रदाता खोजें...", - "huggingFaceNoProvidersFound": "कोई प्रदाता नहीं मिला", "getGeminiApiKey": "Gemini API कुंजी प्राप्त करें", "openAiApiKey": "OpenAI API कुंजी", "apiKey": "API कुंजी", @@ -491,10 +465,6 @@ "description": "Ollama आपको अपने कंप्यूटर पर स्थानीय रूप से मॉडल चलाने की अनुमति देता है। आरंभ करने के निर्देशों के लिए, उनकी क्विकस्टार्ट गाइड देखें।", "warning": "नोट: Roo Code जटिल प्रॉम्प्ट्स का उपयोग करता है और Claude मॉडल के साथ सबसे अच्छा काम करता है। कम क्षमता वाले मॉडल अपेक्षित रूप से काम नहीं कर सकते हैं।" }, - "unboundApiKey": "Unbound API कुंजी", - "getUnboundApiKey": "Unbound API कुंजी प्राप्त करें", - "unboundRefreshModelsSuccess": "मॉडल सूची अपडेट हो गई है! अब आप नवीनतम मॉडलों में से चुन सकते हैं।", - "unboundInvalidApiKey": "अमान्य API कुंजी। कृपया अपनी API कुंजी की जांच करें और पुनः प्रयास करें।", "roo": { "authenticatedMessage": "आपके Roo Code Cloud खाते के माध्यम से सुरक्षित रूप से प्रमाणित।", "connectButton": "Roo Code Cloud से कनेक्ट करें" diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index cca52db56c3..7c0a7ed8688 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Memperluas jendela konteks menjadi 1 juta token untuk Claude Sonnet 4 / 4.5 / Claude Opus 4.6", "basetenApiKey": "Baseten API Key", "getBasetenApiKey": "Dapatkan Baseten API Key", - "cerebrasApiKey": "Cerebras API Key", - "getCerebrasApiKey": "Dapatkan Cerebras API Key", - "chutesApiKey": "Chutes API Key", - "getChutesApiKey": "Dapatkan Chutes API Key", "fireworksApiKey": "Fireworks API Key", "getFireworksApiKey": "Dapatkan Fireworks API Key", - "featherlessApiKey": "Featherless API Key", - "getFeatherlessApiKey": "Dapatkan Featherless API Key", - "ioIntelligenceApiKey": "IO Intelligence API Key", - "ioIntelligenceApiKeyPlaceholder": "Masukkan kunci API IO Intelligence Anda", - "getIoIntelligenceApiKey": "Dapatkan IO Intelligence API Key", "deepSeekApiKey": "DeepSeek API Key", "getDeepSeekApiKey": "Dapatkan DeepSeek API Key", - "doubaoApiKey": "Kunci API Doubao", - "getDoubaoApiKey": "Dapatkan Kunci API Doubao", "moonshotApiKey": "Kunci API Moonshot", "getMoonshotApiKey": "Dapatkan Kunci API Moonshot", "moonshotBaseUrl": "Titik Masuk Moonshot", @@ -403,24 +392,9 @@ "getMiniMaxApiKey": "Dapatkan Kunci API MiniMax", "minimaxBaseUrl": "Titik Masuk MiniMax", "geminiApiKey": "Gemini API Key", - "getGroqApiKey": "Dapatkan Groq API Key", - "groqApiKey": "Groq API Key", "getSambaNovaApiKey": "Dapatkan SambaNova API Key", "sambaNovaApiKey": "SambaNova API Key", - "getHuggingFaceApiKey": "Dapatkan Kunci API Hugging Face", - "huggingFaceApiKey": "Kunci API Hugging Face", - "huggingFaceModelId": "ID Model", "getGeminiApiKey": "Dapatkan Gemini API Key", - "huggingFaceLoading": "Memuat...", - "huggingFaceModelsCount": "({{count}} model)", - "huggingFaceSelectModel": "Pilih model...", - "huggingFaceSearchModels": "Cari model...", - "huggingFaceNoModelsFound": "Tidak ada model ditemukan", - "huggingFaceProvider": "Penyedia", - "huggingFaceProviderAuto": "Otomatis", - "huggingFaceSelectProvider": "Pilih penyedia...", - "huggingFaceSearchProviders": "Cari penyedia...", - "huggingFaceNoProvidersFound": "Tidak ada penyedia ditemukan", "openAiApiKey": "OpenAI API Key", "apiKey": "API Key", "openAiBaseUrl": "Base URL", @@ -491,10 +465,6 @@ "description": "Ollama memungkinkan kamu menjalankan model secara lokal di komputer. Untuk instruksi cara memulai, lihat panduan quickstart mereka.", "warning": "Catatan: Roo Code menggunakan prompt kompleks dan bekerja terbaik dengan model Claude. Model yang kurang mampu mungkin tidak bekerja seperti yang diharapkan." }, - "unboundApiKey": "Unbound API Key", - "getUnboundApiKey": "Dapatkan Unbound API Key", - "unboundRefreshModelsSuccess": "Daftar model diperbarui! Kamu sekarang dapat memilih dari model terbaru.", - "unboundInvalidApiKey": "API key tidak valid. Silakan periksa API key kamu dan coba lagi.", "roo": { "authenticatedMessage": "Terautentikasi dengan aman melalui akun Roo Code Cloud Anda.", "connectButton": "Hubungkan ke Roo Code Cloud" diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 5478568ebe9..ab0d2d14220 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Estende la finestra di contesto a 1 milione di token per Claude Sonnet 4 / 4.5 / Claude Opus 4.6", "basetenApiKey": "Chiave API Baseten", "getBasetenApiKey": "Ottieni chiave API Baseten", - "cerebrasApiKey": "Chiave API Cerebras", - "getCerebrasApiKey": "Ottieni chiave API Cerebras", - "chutesApiKey": "Chiave API Chutes", - "getChutesApiKey": "Ottieni chiave API Chutes", "fireworksApiKey": "Chiave API Fireworks", "getFireworksApiKey": "Ottieni chiave API Fireworks", - "featherlessApiKey": "Chiave API Featherless", - "getFeatherlessApiKey": "Ottieni chiave API Featherless", - "ioIntelligenceApiKey": "Chiave API IO Intelligence", - "ioIntelligenceApiKeyPlaceholder": "Inserisci la tua chiave API IO Intelligence", - "getIoIntelligenceApiKey": "Ottieni chiave API IO Intelligence", "deepSeekApiKey": "Chiave API DeepSeek", "getDeepSeekApiKey": "Ottieni chiave API DeepSeek", - "doubaoApiKey": "Chiave API Doubao", - "getDoubaoApiKey": "Ottieni chiave API Doubao", "moonshotApiKey": "Chiave API Moonshot", "getMoonshotApiKey": "Ottieni chiave API Moonshot", "moonshotBaseUrl": "Punto di ingresso Moonshot", @@ -403,23 +392,8 @@ "getMiniMaxApiKey": "Ottieni chiave API MiniMax", "minimaxBaseUrl": "Punto di ingresso MiniMax", "geminiApiKey": "Chiave API Gemini", - "getGroqApiKey": "Ottieni chiave API Groq", - "groqApiKey": "Chiave API Groq", "getSambaNovaApiKey": "Ottieni chiave API SambaNova", "sambaNovaApiKey": "Chiave API SambaNova", - "getHuggingFaceApiKey": "Ottieni chiave API Hugging Face", - "huggingFaceApiKey": "Chiave API Hugging Face", - "huggingFaceModelId": "ID modello", - "huggingFaceLoading": "Caricamento...", - "huggingFaceModelsCount": "({{count}} modelli)", - "huggingFaceSelectModel": "Seleziona un modello...", - "huggingFaceSearchModels": "Cerca modelli...", - "huggingFaceNoModelsFound": "Nessun modello trovato", - "huggingFaceProvider": "Provider", - "huggingFaceProviderAuto": "Automatico", - "huggingFaceSelectProvider": "Seleziona un provider...", - "huggingFaceSearchProviders": "Cerca provider...", - "huggingFaceNoProvidersFound": "Nessun provider trovato", "getGeminiApiKey": "Ottieni chiave API Gemini", "openAiApiKey": "Chiave API OpenAI", "apiKey": "Chiave API", @@ -491,10 +465,6 @@ "description": "Ollama ti permette di eseguire modelli localmente sul tuo computer. Per iniziare, consulta la guida rapida.", "warning": "Nota: Roo Code utiliza prompt complessi e funziona meglio con i modelli Claude. I modelli con capacità inferiori potrebbero non funzionare come previsto." }, - "unboundApiKey": "Chiave API Unbound", - "getUnboundApiKey": "Ottieni chiave API Unbound", - "unboundRefreshModelsSuccess": "Lista dei modelli aggiornata! Ora puoi selezionare tra gli ultimi modelli.", - "unboundInvalidApiKey": "Chiave API non valida. Controlla la tua chiave API e riprova.", "roo": { "authenticatedMessage": "Autenticato in modo sicuro tramite il tuo account Roo Code Cloud.", "connectButton": "Connetti a Roo Code Cloud" diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index c33fdd85e24..109f53afe64 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Claude Sonnet 4 / 4.5 / Claude Opus 4.6のコンテキストウィンドウを100万トークンに拡張します", "basetenApiKey": "Baseten APIキー", "getBasetenApiKey": "Baseten APIキーを取得", - "cerebrasApiKey": "Cerebras APIキー", - "getCerebrasApiKey": "Cerebras APIキーを取得", - "chutesApiKey": "Chutes APIキー", - "getChutesApiKey": "Chutes APIキーを取得", "fireworksApiKey": "Fireworks APIキー", "getFireworksApiKey": "Fireworks APIキーを取得", - "featherlessApiKey": "Featherless APIキー", - "getFeatherlessApiKey": "Featherless APIキーを取得", - "ioIntelligenceApiKey": "IO Intelligence APIキー", - "ioIntelligenceApiKeyPlaceholder": "IO Intelligence APIキーを入力してください", - "getIoIntelligenceApiKey": "IO Intelligence APIキーを取得", "deepSeekApiKey": "DeepSeek APIキー", "getDeepSeekApiKey": "DeepSeek APIキーを取得", - "doubaoApiKey": "Doubao APIキー", - "getDoubaoApiKey": "Doubao APIキーを取得", "moonshotApiKey": "Moonshot APIキー", "getMoonshotApiKey": "Moonshot APIキーを取得", "moonshotBaseUrl": "Moonshot エントリーポイント", @@ -403,23 +392,8 @@ "getMiniMaxApiKey": "MiniMax APIキーを取得", "minimaxBaseUrl": "MiniMax エントリーポイント", "geminiApiKey": "Gemini APIキー", - "getGroqApiKey": "Groq APIキーを取得", - "groqApiKey": "Groq APIキー", "getSambaNovaApiKey": "SambaNova APIキーを取得", "sambaNovaApiKey": "SambaNova APIキー", - "getHuggingFaceApiKey": "Hugging Face APIキーを取得", - "huggingFaceApiKey": "Hugging Face APIキー", - "huggingFaceModelId": "モデルID", - "huggingFaceLoading": "読み込み中...", - "huggingFaceModelsCount": "({{count}}個のモデル)", - "huggingFaceSelectModel": "モデルを選択...", - "huggingFaceSearchModels": "モデルを検索...", - "huggingFaceNoModelsFound": "モデルが見つかりません", - "huggingFaceProvider": "プロバイダー", - "huggingFaceProviderAuto": "自動", - "huggingFaceSelectProvider": "プロバイダーを選択...", - "huggingFaceSearchProviders": "プロバイダーを検索...", - "huggingFaceNoProvidersFound": "プロバイダーが見つかりません", "getGeminiApiKey": "Gemini APIキーを取得", "openAiApiKey": "OpenAI APIキー", "apiKey": "APIキー", @@ -491,10 +465,6 @@ "description": "Ollamaを使用すると、ローカルコンピューターでモデルを実行できます。始め方については、クイックスタートガイドをご覧ください。", "warning": "注意:Roo Codeは複雑なプロンプトを使用し、Claudeモデルで最適に動作します。能力の低いモデルは期待通りに動作しない場合があります。" }, - "unboundApiKey": "Unbound APIキー", - "getUnboundApiKey": "Unbound APIキーを取得", - "unboundRefreshModelsSuccess": "モデルリストが更新されました!最新のモデルから選択できます。", - "unboundInvalidApiKey": "無効なAPIキーです。APIキーを確認して、もう一度お試しください。", "roo": { "authenticatedMessage": "Roo Code Cloudアカウントを通じて安全に認証されています。", "connectButton": "Roo Code Cloudに接続" diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 146c6044ba9..2595528887a 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Claude Sonnet 4 / 4.5 / Claude Opus 4.6의 컨텍스트 창을 100만 토큰으로 확장", "basetenApiKey": "Baseten API 키", "getBasetenApiKey": "Baseten API 키 가져오기", - "cerebrasApiKey": "Cerebras API 키", - "getCerebrasApiKey": "Cerebras API 키 가져오기", - "chutesApiKey": "Chutes API 키", - "getChutesApiKey": "Chutes API 키 받기", "fireworksApiKey": "Fireworks API 키", "getFireworksApiKey": "Fireworks API 키 받기", - "featherlessApiKey": "Featherless API 키", - "getFeatherlessApiKey": "Featherless API 키 받기", - "ioIntelligenceApiKey": "IO Intelligence API 키", - "ioIntelligenceApiKeyPlaceholder": "IO Intelligence API 키를 입력하세요", - "getIoIntelligenceApiKey": "IO Intelligence API 키 받기", "deepSeekApiKey": "DeepSeek API 키", "getDeepSeekApiKey": "DeepSeek API 키 받기", - "doubaoApiKey": "Doubao API 키", - "getDoubaoApiKey": "Doubao API 키 받기", "moonshotApiKey": "Moonshot API 키", "getMoonshotApiKey": "Moonshot API 키 받기", "moonshotBaseUrl": "Moonshot 엔트리포인트", @@ -403,24 +392,9 @@ "getMiniMaxApiKey": "MiniMax API 키 받기", "minimaxBaseUrl": "MiniMax 엔트리포인트", "geminiApiKey": "Gemini API 키", - "getGroqApiKey": "Groq API 키 받기", - "groqApiKey": "Groq API 키", "getSambaNovaApiKey": "SambaNova API 키 받기", "sambaNovaApiKey": "SambaNova API 키", "getGeminiApiKey": "Gemini API 키 받기", - "getHuggingFaceApiKey": "Hugging Face API 키 받기", - "huggingFaceApiKey": "Hugging Face API 키", - "huggingFaceModelId": "모델 ID", - "huggingFaceLoading": "로딩 중...", - "huggingFaceModelsCount": "({{count}}개 모델)", - "huggingFaceSelectModel": "모델 선택...", - "huggingFaceSearchModels": "모델 검색...", - "huggingFaceNoModelsFound": "모델을 찾을 수 없음", - "huggingFaceProvider": "제공자", - "huggingFaceProviderAuto": "자동", - "huggingFaceSelectProvider": "제공자 선택...", - "huggingFaceSearchProviders": "제공자 검색...", - "huggingFaceNoProvidersFound": "제공자를 찾을 수 없음", "apiKey": "API 키", "openAiApiKey": "OpenAI API 키", "openAiBaseUrl": "기본 URL", @@ -491,10 +465,6 @@ "description": "Ollama를 사용하면 컴퓨터에서 로컬로 모델을 실행할 수 있습니다. 시작하는 방법은 빠른 시작 가이드를 참조하세요.", "warning": "참고: Roo Code는 복잡한 프롬프트를 사용하며 Claude 모델에서 가장 잘 작동합니다. 덜 강력한 모델은 예상대로 작동하지 않을 수 있습니다." }, - "unboundApiKey": "Unbound API 키", - "getUnboundApiKey": "Unbound API 키 받기", - "unboundRefreshModelsSuccess": "모델 목록이 업데이트되었습니다! 이제 최신 모델에서 선택할 수 있습니다.", - "unboundInvalidApiKey": "잘못된 API 키입니다. API 키를 확인하고 다시 시도해 주세요.", "roo": { "authenticatedMessage": "Roo Code Cloud 계정을 통해 안전하게 인증되었습니다.", "connectButton": "Roo Code Cloud에 연결" diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index 942b2d6bd8d..da1c5861bc1 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Breidt het contextvenster uit tot 1 miljoen tokens voor Claude Sonnet 4 / 4.5 / Claude Opus 4.6", "basetenApiKey": "Baseten API-sleutel", "getBasetenApiKey": "Baseten API-sleutel verkrijgen", - "cerebrasApiKey": "Cerebras API-sleutel", - "getCerebrasApiKey": "Cerebras API-sleutel verkrijgen", - "chutesApiKey": "Chutes API-sleutel", - "getChutesApiKey": "Chutes API-sleutel ophalen", "fireworksApiKey": "Fireworks API-sleutel", "getFireworksApiKey": "Fireworks API-sleutel ophalen", - "featherlessApiKey": "Featherless API-sleutel", - "getFeatherlessApiKey": "Featherless API-sleutel ophalen", - "ioIntelligenceApiKey": "IO Intelligence API-sleutel", - "ioIntelligenceApiKeyPlaceholder": "Voer je IO Intelligence API-sleutel in", - "getIoIntelligenceApiKey": "IO Intelligence API-sleutel ophalen", "deepSeekApiKey": "DeepSeek API-sleutel", "getDeepSeekApiKey": "DeepSeek API-sleutel ophalen", - "doubaoApiKey": "Doubao API-sleutel", - "getDoubaoApiKey": "Doubao API-sleutel ophalen", "moonshotApiKey": "Moonshot API-sleutel", "getMoonshotApiKey": "Moonshot API-sleutel ophalen", "moonshotBaseUrl": "Moonshot-ingangspunt", @@ -403,24 +392,9 @@ "getMiniMaxApiKey": "MiniMax API-sleutel ophalen", "minimaxBaseUrl": "MiniMax-ingangspunt", "geminiApiKey": "Gemini API-sleutel", - "getGroqApiKey": "Groq API-sleutel ophalen", - "groqApiKey": "Groq API-sleutel", "getSambaNovaApiKey": "SambaNova API-sleutel ophalen", "sambaNovaApiKey": "SambaNova API-sleutel", "getGeminiApiKey": "Gemini API-sleutel ophalen", - "getHuggingFaceApiKey": "Hugging Face API-sleutel ophalen", - "huggingFaceApiKey": "Hugging Face API-sleutel", - "huggingFaceModelId": "Model ID", - "huggingFaceLoading": "Laden...", - "huggingFaceModelsCount": "({{count}} modellen)", - "huggingFaceSelectModel": "Selecteer een model...", - "huggingFaceSearchModels": "Zoek modellen...", - "huggingFaceNoModelsFound": "Geen modellen gevonden", - "huggingFaceProvider": "Provider", - "huggingFaceProviderAuto": "Automatisch", - "huggingFaceSelectProvider": "Selecteer een provider...", - "huggingFaceSearchProviders": "Zoek providers...", - "huggingFaceNoProvidersFound": "Geen providers gevonden", "apiKey": "API-sleutel", "openAiApiKey": "OpenAI API-sleutel", "openAiBaseUrl": "Basis-URL", @@ -491,10 +465,6 @@ "description": "Ollama laat je modellen lokaal op je computer draaien. Zie hun quickstart-gids voor instructies.", "warning": "Let op: Roo Code gebruikt complexe prompts en werkt het beste met Claude-modellen. Minder krachtige modellen werken mogelijk niet zoals verwacht." }, - "unboundApiKey": "Unbound API-sleutel", - "getUnboundApiKey": "Unbound API-sleutel ophalen", - "unboundRefreshModelsSuccess": "Modellenlijst bijgewerkt! U kunt nu kiezen uit de nieuwste modellen.", - "unboundInvalidApiKey": "Ongeldige API-sleutel. Controleer uw API-sleutel en probeer het opnieuw.", "roo": { "authenticatedMessage": "Veilig geauthenticeerd via je Roo Code Cloud-account.", "connectButton": "Verbinden met Roo Code Cloud" diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 7a3632ab489..c587d582848 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Rozszerza okno kontekstowe do 1 miliona tokenów dla Claude Sonnet 4 / 4.5 / Claude Opus 4.6", "basetenApiKey": "Klucz API Baseten", "getBasetenApiKey": "Uzyskaj klucz API Baseten", - "cerebrasApiKey": "Klucz API Cerebras", - "getCerebrasApiKey": "Pobierz klucz API Cerebras", - "chutesApiKey": "Klucz API Chutes", - "getChutesApiKey": "Uzyskaj klucz API Chutes", "fireworksApiKey": "Klucz API Fireworks", "getFireworksApiKey": "Uzyskaj klucz API Fireworks", - "featherlessApiKey": "Klucz API Featherless", - "getFeatherlessApiKey": "Uzyskaj klucz API Featherless", - "ioIntelligenceApiKey": "Klucz API IO Intelligence", - "ioIntelligenceApiKeyPlaceholder": "Wprowadź swój klucz API IO Intelligence", - "getIoIntelligenceApiKey": "Uzyskaj klucz API IO Intelligence", "deepSeekApiKey": "Klucz API DeepSeek", "getDeepSeekApiKey": "Uzyskaj klucz API DeepSeek", - "doubaoApiKey": "Klucz API Doubao", - "getDoubaoApiKey": "Uzyskaj klucz API Doubao", "moonshotApiKey": "Klucz API Moonshot", "getMoonshotApiKey": "Uzyskaj klucz API Moonshot", "moonshotBaseUrl": "Punkt wejścia Moonshot", @@ -403,24 +392,9 @@ "getMiniMaxApiKey": "Uzyskaj klucz API MiniMax", "minimaxBaseUrl": "Punkt wejścia MiniMax", "geminiApiKey": "Klucz API Gemini", - "getGroqApiKey": "Uzyskaj klucz API Groq", - "groqApiKey": "Klucz API Groq", "getSambaNovaApiKey": "Uzyskaj klucz API SambaNova", "sambaNovaApiKey": "Klucz API SambaNova", "getGeminiApiKey": "Uzyskaj klucz API Gemini", - "getHuggingFaceApiKey": "Uzyskaj klucz API Hugging Face", - "huggingFaceApiKey": "Klucz API Hugging Face", - "huggingFaceModelId": "ID modelu", - "huggingFaceLoading": "Ładowanie...", - "huggingFaceModelsCount": "({{count}} modeli)", - "huggingFaceSelectModel": "Wybierz model...", - "huggingFaceSearchModels": "Szukaj modeli...", - "huggingFaceNoModelsFound": "Nie znaleziono modeli", - "huggingFaceProvider": "Dostawca", - "huggingFaceProviderAuto": "Automatyczny", - "huggingFaceSelectProvider": "Wybierz dostawcę...", - "huggingFaceSearchProviders": "Szukaj dostawców...", - "huggingFaceNoProvidersFound": "Nie znaleziono dostawców", "apiKey": "Klucz API", "openAiApiKey": "Klucz API OpenAI", "openAiBaseUrl": "URL bazowy", @@ -491,10 +465,6 @@ "description": "Ollama pozwala na lokalne uruchamianie modeli na twoim komputerze. Aby rozpocząć, zapoznaj się z przewodnikiem szybkiego startu.", "warning": "Uwaga: Roo Code używa złożonych podpowiedzi i działa najlepiej z modelami Claude. Modele o niższych możliwościach mogą nie działać zgodnie z oczekiwaniami." }, - "unboundApiKey": "Klucz API Unbound", - "getUnboundApiKey": "Uzyskaj klucz API Unbound", - "unboundRefreshModelsSuccess": "Lista modeli zaktualizowana! Możesz teraz wybierać spośród najnowszych modeli.", - "unboundInvalidApiKey": "Nieprawidłowy klucz API. Sprawdź swój klucz API i spróbuj ponownie.", "roo": { "authenticatedMessage": "Bezpiecznie uwierzytelniony przez twoje konto Roo Code Cloud.", "connectButton": "Połącz z Roo Code Cloud" diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index b82cfa8ee8f..ee848b55c70 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Estende a janela de contexto para 1 milhão de tokens para o Claude Sonnet 4 / 4.5 / Claude Opus 4.6", "basetenApiKey": "Chave de API Baseten", "getBasetenApiKey": "Obter chave de API Baseten", - "cerebrasApiKey": "Chave de API Cerebras", - "getCerebrasApiKey": "Obter chave de API Cerebras", - "chutesApiKey": "Chave de API Chutes", - "getChutesApiKey": "Obter chave de API Chutes", "fireworksApiKey": "Chave de API Fireworks", "getFireworksApiKey": "Obter chave de API Fireworks", - "featherlessApiKey": "Chave de API Featherless", - "getFeatherlessApiKey": "Obter chave de API Featherless", - "ioIntelligenceApiKey": "Chave de API IO Intelligence", - "ioIntelligenceApiKeyPlaceholder": "Insira sua chave de API da IO Intelligence", - "getIoIntelligenceApiKey": "Obter chave de API IO Intelligence", "deepSeekApiKey": "Chave de API DeepSeek", "getDeepSeekApiKey": "Obter chave de API DeepSeek", - "doubaoApiKey": "Chave de API Doubao", - "getDoubaoApiKey": "Obter chave de API Doubao", "moonshotApiKey": "Chave de API Moonshot", "getMoonshotApiKey": "Obter chave de API Moonshot", "moonshotBaseUrl": "Ponto de entrada Moonshot", @@ -403,24 +392,9 @@ "getMiniMaxApiKey": "Obter chave de API MiniMax", "minimaxBaseUrl": "Ponto de entrada MiniMax", "geminiApiKey": "Chave de API Gemini", - "getGroqApiKey": "Obter chave de API Groq", - "groqApiKey": "Chave de API Groq", "getSambaNovaApiKey": "Obter chave de API SambaNova", "sambaNovaApiKey": "Chave de API SambaNova", "getGeminiApiKey": "Obter chave de API Gemini", - "getHuggingFaceApiKey": "Obter chave de API Hugging Face", - "huggingFaceApiKey": "Chave de API Hugging Face", - "huggingFaceModelId": "ID do modelo", - "huggingFaceLoading": "Carregando...", - "huggingFaceModelsCount": "({{count}} modelos)", - "huggingFaceSelectModel": "Selecionar um modelo...", - "huggingFaceSearchModels": "Buscar modelos...", - "huggingFaceNoModelsFound": "Nenhum modelo encontrado", - "huggingFaceProvider": "Provedor", - "huggingFaceProviderAuto": "Automático", - "huggingFaceSelectProvider": "Selecionar um provedor...", - "huggingFaceSearchProviders": "Buscar provedores...", - "huggingFaceNoProvidersFound": "Nenhum provedor encontrado", "apiKey": "Chave de API", "openAiApiKey": "Chave de API OpenAI", "openAiBaseUrl": "URL Base", @@ -491,10 +465,6 @@ "description": "O Ollama permite que você execute modelos localmente em seu computador. Para instruções sobre como começar, veja o guia de início rápido deles.", "warning": "Nota: O Roo Code usa prompts complexos e funciona melhor com modelos Claude. Modelos menos capazes podem não funcionar como esperado." }, - "unboundApiKey": "Chave de API Unbound", - "getUnboundApiKey": "Obter chave de API Unbound", - "unboundRefreshModelsSuccess": "Lista de modelos atualizada! Agora você pode selecionar entre os modelos mais recentes.", - "unboundInvalidApiKey": "Chave API inválida. Por favor, verifique sua chave API e tente novamente.", "roo": { "authenticatedMessage": "Autenticado com segurança através da sua conta Roo Code Cloud.", "connectButton": "Conectar ao Roo Code Cloud" diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index 93cbaa83705..ee0638d00d3 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Расширяет контекстное окно до 1 миллиона токенов для Claude Sonnet 4 / 4.5 / Claude Opus 4.6", "basetenApiKey": "Baseten API-ключ", "getBasetenApiKey": "Получить Baseten API-ключ", - "cerebrasApiKey": "Cerebras API-ключ", - "getCerebrasApiKey": "Получить Cerebras API-ключ", - "chutesApiKey": "Chutes API-ключ", - "getChutesApiKey": "Получить Chutes API-ключ", "fireworksApiKey": "Fireworks API-ключ", "getFireworksApiKey": "Получить Fireworks API-ключ", - "featherlessApiKey": "Featherless API-ключ", - "getFeatherlessApiKey": "Получить Featherless API-ключ", - "ioIntelligenceApiKey": "IO Intelligence API-ключ", - "ioIntelligenceApiKeyPlaceholder": "Введите свой ключ API IO Intelligence", - "getIoIntelligenceApiKey": "Получить IO Intelligence API-ключ", "deepSeekApiKey": "DeepSeek API-ключ", "getDeepSeekApiKey": "Получить DeepSeek API-ключ", - "doubaoApiKey": "Doubao API-ключ", - "getDoubaoApiKey": "Получить Doubao API-ключ", "moonshotApiKey": "Moonshot API-ключ", "getMoonshotApiKey": "Получить Moonshot API-ключ", "moonshotBaseUrl": "Точка входа Moonshot", @@ -403,24 +392,9 @@ "getMiniMaxApiKey": "Получить MiniMax API-ключ", "minimaxBaseUrl": "Точка входа MiniMax", "geminiApiKey": "Gemini API-ключ", - "getGroqApiKey": "Получить Groq API-ключ", - "groqApiKey": "Groq API-ключ", "getSambaNovaApiKey": "Получить SambaNova API-ключ", "sambaNovaApiKey": "SambaNova API-ключ", "getGeminiApiKey": "Получить Gemini API-ключ", - "getHuggingFaceApiKey": "Получить Hugging Face API-ключ", - "huggingFaceApiKey": "Hugging Face API-ключ", - "huggingFaceModelId": "ID модели", - "huggingFaceLoading": "Загрузка...", - "huggingFaceModelsCount": "({{count}} моделей)", - "huggingFaceSelectModel": "Выберите модель...", - "huggingFaceSearchModels": "Поиск моделей...", - "huggingFaceNoModelsFound": "Модели не найдены", - "huggingFaceProvider": "Провайдер", - "huggingFaceProviderAuto": "Автоматически", - "huggingFaceSelectProvider": "Выберите провайдера...", - "huggingFaceSearchProviders": "Поиск провайдеров...", - "huggingFaceNoProvidersFound": "Провайдеры не найдены", "apiKey": "API-ключ", "openAiApiKey": "OpenAI API-ключ", "openAiBaseUrl": "Базовый URL", @@ -491,10 +465,6 @@ "description": "Ollama позволяет запускать модели локально на вашем компьютере. Для начала ознакомьтесь с кратким руководством.", "warning": "Примечание: Roo Code использует сложные подсказки и лучше всего работает с моделями Claude. Менее мощные модели могут работать некорректно." }, - "unboundApiKey": "Unbound API-ключ", - "getUnboundApiKey": "Получить Unbound API-ключ", - "unboundRefreshModelsSuccess": "Список моделей обновлен! Теперь вы можете выбрать из последних моделей.", - "unboundInvalidApiKey": "Недействительный API-ключ. Пожалуйста, проверьте ваш API-ключ и попробуйте снова.", "roo": { "authenticatedMessage": "Безопасно аутентифицирован через твой аккаунт Roo Code Cloud.", "connectButton": "Подключиться к Roo Code Cloud" diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 714d3f98b8e..7016ad5f702 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Claude Sonnet 4 / 4.5 / Claude Opus 4.6 için bağlam penceresini 1 milyon token'a genişletir", "basetenApiKey": "Baseten API Anahtarı", "getBasetenApiKey": "Baseten API Anahtarı Al", - "cerebrasApiKey": "Cerebras API Anahtarı", - "getCerebrasApiKey": "Cerebras API Anahtarını Al", - "chutesApiKey": "Chutes API Anahtarı", - "getChutesApiKey": "Chutes API Anahtarı Al", "fireworksApiKey": "Fireworks API Anahtarı", "getFireworksApiKey": "Fireworks API Anahtarı Al", - "featherlessApiKey": "Featherless API Anahtarı", - "getFeatherlessApiKey": "Featherless API Anahtarı Al", - "ioIntelligenceApiKey": "IO Intelligence API Anahtarı", - "ioIntelligenceApiKeyPlaceholder": "IO Intelligence API anahtarınızı girin", - "getIoIntelligenceApiKey": "IO Intelligence API Anahtarı Al", "deepSeekApiKey": "DeepSeek API Anahtarı", "getDeepSeekApiKey": "DeepSeek API Anahtarı Al", - "doubaoApiKey": "Doubao API Anahtarı", - "getDoubaoApiKey": "Doubao API Anahtarı Al", "moonshotApiKey": "Moonshot API Anahtarı", "getMoonshotApiKey": "Moonshot API Anahtarı Al", "moonshotBaseUrl": "Moonshot Giriş Noktası", @@ -403,23 +392,8 @@ "getMiniMaxApiKey": "MiniMax API Anahtarı Al", "minimaxBaseUrl": "MiniMax Giriş Noktası", "geminiApiKey": "Gemini API Anahtarı", - "getGroqApiKey": "Groq API Anahtarı Al", - "groqApiKey": "Groq API Anahtarı", "getSambaNovaApiKey": "SambaNova API Anahtarı Al", "sambaNovaApiKey": "SambaNova API Anahtarı", - "getHuggingFaceApiKey": "Hugging Face API Anahtarı Al", - "huggingFaceApiKey": "Hugging Face API Anahtarı", - "huggingFaceModelId": "Model ID", - "huggingFaceLoading": "Yükleniyor...", - "huggingFaceModelsCount": "({{count}} model)", - "huggingFaceSelectModel": "Bir model seç...", - "huggingFaceSearchModels": "Modelleri ara...", - "huggingFaceNoModelsFound": "Model bulunamadı", - "huggingFaceProvider": "Sağlayıcı", - "huggingFaceProviderAuto": "Otomatik", - "huggingFaceSelectProvider": "Bir sağlayıcı seç...", - "huggingFaceSearchProviders": "Sağlayıcıları ara...", - "huggingFaceNoProvidersFound": "Sağlayıcı bulunamadı", "getGeminiApiKey": "Gemini API Anahtarı Al", "openAiApiKey": "OpenAI API Anahtarı", "apiKey": "API Anahtarı", @@ -491,10 +465,6 @@ "description": "Ollama, modelleri bilgisayarınızda yerel olarak çalıştırmanıza olanak tanır. Başlamak için hızlı başlangıç kılavuzlarına bakın.", "warning": "Not: Roo Code karmaşık istemler kullanır ve Claude modelleriyle en iyi şekilde çalışır. Daha az yetenekli modeller beklendiği gibi çalışmayabilir." }, - "unboundApiKey": "Unbound API Anahtarı", - "getUnboundApiKey": "Unbound API Anahtarı Al", - "unboundRefreshModelsSuccess": "Model listesi güncellendi! Artık en son modeller arasından seçim yapabilirsiniz.", - "unboundInvalidApiKey": "Geçersiz API anahtarı. Lütfen API anahtarınızı kontrol edin ve tekrar deneyin.", "roo": { "authenticatedMessage": "Roo Code Cloud hesabın üzerinden güvenli bir şekilde kimlik doğrulandı.", "connectButton": "Roo Code Cloud'a Bağlan" diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index 15465c60f33..f0d986c86c9 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "Mở rộng cửa sổ ngữ cảnh lên 1 triệu token cho Claude Sonnet 4 / 4.5 / Claude Opus 4.6", "basetenApiKey": "Khóa API Baseten", "getBasetenApiKey": "Lấy khóa API Baseten", - "cerebrasApiKey": "Khóa API Cerebras", - "getCerebrasApiKey": "Lấy khóa API Cerebras", - "chutesApiKey": "Khóa API Chutes", - "getChutesApiKey": "Lấy khóa API Chutes", "fireworksApiKey": "Khóa API Fireworks", "getFireworksApiKey": "Lấy khóa API Fireworks", - "featherlessApiKey": "Khóa API Featherless", - "getFeatherlessApiKey": "Lấy khóa API Featherless", - "ioIntelligenceApiKey": "Khóa API IO Intelligence", - "ioIntelligenceApiKeyPlaceholder": "Nhập khóa API IO Intelligence của bạn", - "getIoIntelligenceApiKey": "Lấy khóa API IO Intelligence", "deepSeekApiKey": "Khóa API DeepSeek", "getDeepSeekApiKey": "Lấy khóa API DeepSeek", - "doubaoApiKey": "Khóa API Doubao", - "getDoubaoApiKey": "Lấy khóa API Doubao", "moonshotApiKey": "Khóa API Moonshot", "getMoonshotApiKey": "Lấy khóa API Moonshot", "moonshotBaseUrl": "Điểm vào Moonshot", @@ -403,23 +392,8 @@ "getMiniMaxApiKey": "Lấy khóa API MiniMax", "minimaxBaseUrl": "Điểm vào MiniMax", "geminiApiKey": "Khóa API Gemini", - "getGroqApiKey": "Lấy khóa API Groq", - "groqApiKey": "Khóa API Groq", "getSambaNovaApiKey": "Lấy khóa API SambaNova", "sambaNovaApiKey": "Khóa API SambaNova", - "getHuggingFaceApiKey": "Lấy Khóa API Hugging Face", - "huggingFaceApiKey": "Khóa API Hugging Face", - "huggingFaceModelId": "ID Mô hình", - "huggingFaceLoading": "Đang tải...", - "huggingFaceModelsCount": "({{count}} mô hình)", - "huggingFaceSelectModel": "Chọn một mô hình...", - "huggingFaceSearchModels": "Tìm kiếm mô hình...", - "huggingFaceNoModelsFound": "Không tìm thấy mô hình", - "huggingFaceProvider": "Nhà cung cấp", - "huggingFaceProviderAuto": "Tự động", - "huggingFaceSelectProvider": "Chọn một nhà cung cấp...", - "huggingFaceSearchProviders": "Tìm kiếm nhà cung cấp...", - "huggingFaceNoProvidersFound": "Không tìm thấy nhà cung cấp", "getGeminiApiKey": "Lấy khóa API Gemini", "openAiApiKey": "Khóa API OpenAI", "apiKey": "Khóa API", @@ -491,10 +465,6 @@ "description": "Ollama cho phép bạn chạy các mô hình cục bộ trên máy tính của bạn. Để biết hướng dẫn về cách bắt đầu, xem hướng dẫn nhanh của họ.", "warning": "Lưu ý: Roo Code sử dụng các lời nhắc phức tạp và hoạt động tốt nhất với các mô hình Claude. Các mô hình kém mạnh hơn có thể không hoạt động như mong đợi." }, - "unboundApiKey": "Khóa API Unbound", - "getUnboundApiKey": "Lấy khóa API Unbound", - "unboundRefreshModelsSuccess": "Đã cập nhật danh sách mô hình! Bây giờ bạn có thể chọn từ các mô hình mới nhất.", - "unboundInvalidApiKey": "Khóa API không hợp lệ. Vui lòng kiểm tra khóa API của bạn và thử lại.", "roo": { "authenticatedMessage": "Đã xác thực an toàn thông qua tài khoản Roo Code Cloud của bạn.", "connectButton": "Kết nối với Roo Code Cloud" diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 771bf7ca7e3..b35b4314d77 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -377,21 +377,10 @@ "vertex1MContextBetaDescription": "为 Claude Sonnet 4 / 4.5 / Claude Opus 4.6 将上下文窗口扩展至 100 万个 token", "basetenApiKey": "Baseten API 密钥", "getBasetenApiKey": "获取 Baseten API 密钥", - "cerebrasApiKey": "Cerebras API 密钥", - "getCerebrasApiKey": "获取 Cerebras API 密钥", - "chutesApiKey": "Chutes API 密钥", - "getChutesApiKey": "获取 Chutes API 密钥", "fireworksApiKey": "Fireworks API 密钥", "getFireworksApiKey": "获取 Fireworks API 密钥", - "featherlessApiKey": "Featherless API 密钥", - "getFeatherlessApiKey": "获取 Featherless API 密钥", - "ioIntelligenceApiKey": "IO Intelligence API 密钥", - "ioIntelligenceApiKeyPlaceholder": "输入您的 IO Intelligence API 密钥", - "getIoIntelligenceApiKey": "获取 IO Intelligence API 密钥", "deepSeekApiKey": "DeepSeek API 密钥", "getDeepSeekApiKey": "获取 DeepSeek API 密钥", - "doubaoApiKey": "豆包 API 密钥", - "getDoubaoApiKey": "获取豆包 API 密钥", "moonshotApiKey": "Moonshot API 密钥", "getMoonshotApiKey": "获取 Moonshot API 密钥", "moonshotBaseUrl": "Moonshot 服务站点", @@ -403,23 +392,8 @@ "zaiEntrypoint": "Z AI 服务站点", "zaiEntrypointDescription": "请根据您的位置选择适当的 API 服务站点。如果您在中国,请选择 open.bigmodel.cn。否则,请选择 api.z.ai。", "geminiApiKey": "Gemini API 密钥", - "getGroqApiKey": "获取 Groq API 密钥", - "groqApiKey": "Groq API 密钥", "getSambaNovaApiKey": "获取 SambaNova API 密钥", "sambaNovaApiKey": "SambaNova API 密钥", - "getHuggingFaceApiKey": "获取 Hugging Face API 密钥", - "huggingFaceApiKey": "Hugging Face API 密钥", - "huggingFaceModelId": "模型 ID", - "huggingFaceLoading": "加载中...", - "huggingFaceModelsCount": "({{count}} 个模型)", - "huggingFaceSelectModel": "选择模型...", - "huggingFaceSearchModels": "搜索模型...", - "huggingFaceNoModelsFound": "未找到模型", - "huggingFaceProvider": "提供商", - "huggingFaceProviderAuto": "自动", - "huggingFaceSelectProvider": "选择提供商...", - "huggingFaceSearchProviders": "搜索提供商...", - "huggingFaceNoProvidersFound": "未找到提供商", "getGeminiApiKey": "获取 Gemini API 密钥", "openAiApiKey": "OpenAI API 密钥", "apiKey": "API 密钥", @@ -491,10 +465,6 @@ "description": "Ollama 允许您在本地计算机上运行模型。有关如何开始使用的说明,请参阅其快速入门指南。", "warning": "注意:Roo Code 使用复杂的提示,与 Claude 模型配合最佳。功能较弱的模型可能无法按预期工作。" }, - "unboundApiKey": "Unbound API 密钥", - "getUnboundApiKey": "获取 Unbound API 密钥", - "unboundRefreshModelsSuccess": "模型列表已更新!您现在可以从最新模型中选择。", - "unboundInvalidApiKey": "无效的API密钥。请检查您的API密钥并重试。", "roo": { "authenticatedMessage": "已通过 Roo Code Cloud 账户安全认证。", "connectButton": "连接到 Roo Code Cloud" diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 8c19327e1fc..1ba02bbb425 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -387,21 +387,10 @@ "vertex1MContextBetaDescription": "為 Claude Sonnet 4 / 4.5 / Claude Opus 4.6 將上下文視窗擴展至 100 萬個 token", "basetenApiKey": "Baseten API 金鑰", "getBasetenApiKey": "取得 Baseten API 金鑰", - "cerebrasApiKey": "Cerebras API 金鑰", - "getCerebrasApiKey": "取得 Cerebras API 金鑰", - "chutesApiKey": "Chutes API 金鑰", - "getChutesApiKey": "取得 Chutes API 金鑰", "fireworksApiKey": "Fireworks API 金鑰", "getFireworksApiKey": "取得 Fireworks API 金鑰", - "featherlessApiKey": "Featherless API 金鑰", - "getFeatherlessApiKey": "取得 Featherless API 金鑰", - "ioIntelligenceApiKey": "IO Intelligence API 金鑰", - "ioIntelligenceApiKeyPlaceholder": "輸入您的 IO Intelligence API 金鑰", - "getIoIntelligenceApiKey": "取得 IO Intelligence API 金鑰", "deepSeekApiKey": "DeepSeek API 金鑰", "getDeepSeekApiKey": "取得 DeepSeek API 金鑰", - "doubaoApiKey": "豆包 API 金鑰", - "getDoubaoApiKey": "取得豆包 API 金鑰", "moonshotApiKey": "Moonshot API 金鑰", "getMoonshotApiKey": "取得 Moonshot API 金鑰", "moonshotBaseUrl": "Moonshot 服務端點", @@ -413,23 +402,8 @@ "zaiEntrypoint": "Z AI 服務端點", "zaiEntrypointDescription": "請根據您的位置選擇適當的 API 服務端點。如果您在中國,請選擇 open.bigmodel.cn。否則,請選擇 api.z.ai。", "geminiApiKey": "Gemini API 金鑰", - "getGroqApiKey": "取得 Groq API 金鑰", - "groqApiKey": "Groq API 金鑰", "getSambaNovaApiKey": "取得 SambaNova API 金鑰", "sambaNovaApiKey": "SambaNova API 金鑰", - "getHuggingFaceApiKey": "取得 Hugging Face API 金鑰", - "huggingFaceApiKey": "Hugging Face API 金鑰", - "huggingFaceModelId": "模型 ID", - "huggingFaceLoading": "載入中...", - "huggingFaceModelsCount": "({{count}} 個模型)", - "huggingFaceSelectModel": "選擇模型...", - "huggingFaceSearchModels": "搜尋模型...", - "huggingFaceNoModelsFound": "找不到模型", - "huggingFaceProvider": "供應商", - "huggingFaceProviderAuto": "自動", - "huggingFaceSelectProvider": "選擇供應商...", - "huggingFaceSearchProviders": "搜尋供應商...", - "huggingFaceNoProvidersFound": "找不到供應商", "getGeminiApiKey": "取得 Gemini API 金鑰", "openAiApiKey": "OpenAI API 金鑰", "apiKey": "API 金鑰", @@ -501,10 +475,6 @@ "description": "Ollama 允許您在本機電腦執行模型。請參閱快速入門指南。", "warning": "注意:Roo Code 使用複雜提示詞,與 Claude 模型搭配最佳。功能較弱的模型可能無法正常運作。" }, - "unboundApiKey": "Unbound API 金鑰", - "getUnboundApiKey": "取得 Unbound API 金鑰", - "unboundRefreshModelsSuccess": "模型列表已更新!您現在可以從最新模型中選擇。", - "unboundInvalidApiKey": "無效的 API 金鑰。請檢查您的 API 金鑰並重試。", "roo": { "authenticatedMessage": "已透過 Roo Code Cloud 帳戶安全認證。", "connectButton": "連線到 Roo Code Cloud" diff --git a/webview-ui/src/utils/__tests__/validate.spec.ts b/webview-ui/src/utils/__tests__/validate.spec.ts index 09239b649ca..0a046adc54d 100644 --- a/webview-ui/src/utils/__tests__/validate.spec.ts +++ b/webview-ui/src/utils/__tests__/validate.spec.ts @@ -39,16 +39,11 @@ describe("Model Validation Functions", () => { }, }, requesty: {}, - unbound: {}, litellm: {}, ollama: {}, lmstudio: {}, - deepinfra: {}, - "io-intelligence": {}, "vercel-ai-gateway": {}, - huggingface: {}, roo: {}, - chutes: {}, } const allowAllOrganization: OrganizationAllowList = { diff --git a/webview-ui/src/utils/validate.ts b/webview-ui/src/utils/validate.ts index df50ca88432..bb57da32663 100644 --- a/webview-ui/src/utils/validate.ts +++ b/webview-ui/src/utils/validate.ts @@ -42,21 +42,11 @@ function validateModelsAndKeysProvided(apiConfiguration: ProviderSettings): stri return i18next.t("settings:validation.apiKey") } break - case "unbound": - if (!apiConfiguration.unboundApiKey) { - return i18next.t("settings:validation.apiKey") - } - break case "requesty": if (!apiConfiguration.requestyApiKey) { return i18next.t("settings:validation.apiKey") } break - case "deepinfra": - if (!apiConfiguration.deepInfraApiKey) { - return i18next.t("settings:validation.apiKey") - } - break case "litellm": if (!apiConfiguration.litellmApiKey) { return i18next.t("settings:validation.apiKey") @@ -112,34 +102,11 @@ function validateModelsAndKeysProvided(apiConfiguration: ProviderSettings): stri return i18next.t("settings:validation.modelSelector") } break - case "huggingface": - if (!apiConfiguration.huggingFaceApiKey) { - return i18next.t("settings:validation.apiKey") - } - if (!apiConfiguration.huggingFaceModelId) { - return i18next.t("settings:validation.modelId") - } - break - case "cerebras": - if (!apiConfiguration.cerebrasApiKey) { - return i18next.t("settings:validation.apiKey") - } - break case "fireworks": if (!apiConfiguration.fireworksApiKey) { return i18next.t("settings:validation.apiKey") } break - case "io-intelligence": - if (!apiConfiguration.ioIntelligenceApiKey) { - return i18next.t("settings:validation.apiKey") - } - break - case "featherless": - if (!apiConfiguration.featherlessApiKey) { - return i18next.t("settings:validation.apiKey") - } - break case "qwen-code": if (!apiConfiguration.qwenCodeOauthPath) { return i18next.t("settings:validation.qwenCodeOauthPath") From aebcaa263e5b7a9e4c8cde9261a0f932bd2b7407 Mon Sep 17 00:00:00 2001 From: Hannes Rudolph Date: Sat, 7 Feb 2026 19:43:44 -0700 Subject: [PATCH 2/9] feat: show retired-provider message for removed provider profiles Preserve API profiles that reference removed providers instead of silently stripping their apiProvider. When a user selects a profile configured for a retired provider, the settings UI now shows an empathetic message explaining the removal instead of the provider configuration form. - Add retiredProviderNames array and isRetiredProvider() helper to packages/types/src/provider-settings.ts - Update ProviderSettingsManager sanitization to preserve retired providers (only strip truly unknown values) - Update ContextProxy sanitization to preserve retired providers - Render retired-provider message in ApiOptions.tsx when selected provider is in the retired list - Add tests for sanitization, ContextProxy, and UI behavior --- packages/types/src/provider-settings.ts | 31 +- src/api/index.ts | 8 +- src/core/config/ContextProxy.ts | 21 +- src/core/config/ProviderSettingsManager.ts | 32 +- .../config/__tests__/ContextProxy.spec.ts | 58 +- .../__tests__/ProviderSettingsManager.spec.ts | 124 +++- src/core/task/Task.ts | 25 +- src/core/webview/ClineProvider.ts | 12 +- webview-ui/src/components/chat/ChatView.tsx | 5 + .../chat/RetiredProviderWarning.tsx | 34 + .../src/components/settings/ApiOptions.tsx | 610 ++++++++++-------- .../src/components/settings/ModelPicker.tsx | 11 +- .../settings/__tests__/ApiOptions.spec.tsx | 27 + .../components/ui/hooks/useSelectedModel.ts | 34 +- webview-ui/src/index.css | 4 + webview-ui/src/utils/validate.ts | 4 +- 16 files changed, 708 insertions(+), 332 deletions(-) create mode 100644 webview-ui/src/components/chat/RetiredProviderWarning.tsx diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 31064be09b5..fef422666d2 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -129,6 +129,33 @@ export type ProviderName = z.infer export const isProviderName = (key: unknown): key is ProviderName => typeof key === "string" && providerNames.includes(key as ProviderName) +/** + * RetiredProviderName + */ + +export const retiredProviderNames = [ + "cerebras", + "chutes", + "deepinfra", + "doubao", + "featherless", + "groq", + "huggingface", + "io-intelligence", + "unbound", +] as const + +export const retiredProviderNamesSchema = z.enum(retiredProviderNames) + +export type RetiredProviderName = z.infer + +export const isRetiredProvider = (value: string): value is RetiredProviderName => + retiredProviderNames.includes(value as RetiredProviderName) + +export const providerNamesWithRetiredSchema = z.union([providerNamesSchema, retiredProviderNamesSchema]) + +export type ProviderNameWithRetired = z.infer + /** * ProviderSettingsEntry */ @@ -136,7 +163,7 @@ export const isProviderName = (key: unknown): key is ProviderName => export const providerSettingsEntrySchema = z.object({ id: z.string(), name: z.string(), - apiProvider: providerNamesSchema.optional(), + apiProvider: providerNamesWithRetiredSchema.optional(), modelId: z.string().optional(), }) @@ -386,7 +413,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv ]) export const providerSettingsSchema = z.object({ - apiProvider: providerNamesSchema.optional(), + apiProvider: providerNamesWithRetiredSchema.optional(), ...anthropicSchema.shape, ...openRouterSchema.shape, ...bedrockSchema.shape, diff --git a/src/api/index.ts b/src/api/index.ts index 78609d51736..0af7ab3a64c 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -1,7 +1,7 @@ import { Anthropic } from "@anthropic-ai/sdk" import OpenAI from "openai" -import type { ProviderSettings, ModelInfo } from "@roo-code/types" +import { isRetiredProvider, type ProviderSettings, type ModelInfo } from "@roo-code/types" import { ApiStream } from "./transform/stream" @@ -119,6 +119,12 @@ export interface ApiHandler { export function buildApiHandler(configuration: ProviderSettings): ApiHandler { const { apiProvider, ...options } = configuration + if (apiProvider && isRetiredProvider(apiProvider)) { + throw new Error( + `Sorry, this provider is no longer supported. We saw very few Roo users actually using it and we need to reduce the surface area of our codebase so we can keep shipping fast and serving our community well in this space. It was a really hard decision but it lets us focus on what matters most to you. It sucks, we know.\n\nPlease select a different provider in your API profile settings.`, + ) + } + switch (apiProvider) { case "anthropic": return new AnthropicHandler(options) diff --git a/src/core/config/ContextProxy.ts b/src/core/config/ContextProxy.ts index 87ce79a3251..2825d1c9452 100644 --- a/src/core/config/ContextProxy.ts +++ b/src/core/config/ContextProxy.ts @@ -16,6 +16,7 @@ import { globalSettingsSchema, isSecretStateKey, isProviderName, + isRetiredProvider, } from "@roo-code/types" import { TelemetryService } from "@roo-code/telemetry" @@ -223,14 +224,16 @@ export class ContextProxy { } /** - * Migrates invalid/removed apiProvider values by clearing them from storage. - * This handles cases where a user had a provider selected that was later removed - * from the extension (e.g., "glama"). + * Migrates unknown apiProvider values by clearing them from storage. + * Retired providers are preserved so users can keep historical configuration. */ private async migrateInvalidApiProvider() { try { const apiProvider = this.stateCache.apiProvider - if (apiProvider !== undefined && !isProviderName(apiProvider)) { + const isKnownProvider = + typeof apiProvider === "string" && (isProviderName(apiProvider) || isRetiredProvider(apiProvider)) + + if (apiProvider !== undefined && !isKnownProvider) { logger.info(`[ContextProxy] Found invalid provider "${apiProvider}" in storage - clearing it`) // Clear the invalid provider from both cache and storage this.stateCache.apiProvider = undefined @@ -439,8 +442,8 @@ export class ContextProxy { } /** - * Sanitizes provider values by resetting invalid/removed apiProvider values. - * This prevents schema validation errors for removed providers. + * Sanitizes provider values by resetting unknown apiProvider values. + * Active and retired providers are preserved. */ private sanitizeProviderValues(values: RooCodeSettings): RooCodeSettings { // Remove legacy Claude Code CLI wrapper keys that may still exist in global state. @@ -456,7 +459,11 @@ export class ContextProxy { } } - if (values.apiProvider !== undefined && !isProviderName(values.apiProvider)) { + const isKnownProvider = + typeof values.apiProvider === "string" && + (isProviderName(values.apiProvider) || isRetiredProvider(values.apiProvider)) + + if (values.apiProvider !== undefined && !isKnownProvider) { logger.info(`[ContextProxy] Sanitizing invalid provider "${values.apiProvider}" - resetting to undefined`) // Return a new values object without the invalid apiProvider const { apiProvider, ...restValues } = sanitizedValues diff --git a/src/core/config/ProviderSettingsManager.ts b/src/core/config/ProviderSettingsManager.ts index 3024540b676..4e7e1d1e206 100644 --- a/src/core/config/ProviderSettingsManager.ts +++ b/src/core/config/ProviderSettingsManager.ts @@ -12,6 +12,7 @@ import { getModelId, type ProviderName, isProviderName, + isRetiredProvider, } from "@roo-code/types" import { TelemetryService } from "@roo-code/telemetry" @@ -359,8 +360,12 @@ export class ProviderSettingsManager { const existingId = providerProfiles.apiConfigs[name]?.id const id = config.id || existingId || this.generateId() - // Filter out settings from other providers. - const filteredConfig = discriminatedProviderSettingsWithIdSchema.parse(config) + // For active providers, filter out settings from other providers. + // For retired providers, preserve full profile fields to avoid data loss. + const filteredConfig = + typeof config.apiProvider === "string" && isRetiredProvider(config.apiProvider) + ? providerSettingsWithIdSchema.parse(config) + : discriminatedProviderSettingsWithIdSchema.parse(config) providerProfiles.apiConfigs[name] = { ...filteredConfig, id } await this.store(providerProfiles) return id @@ -507,7 +512,14 @@ export class ProviderSettingsManager { const profiles = providerProfilesSchema.parse(await this.load()) const configs = profiles.apiConfigs for (const name in configs) { - // Avoid leaking properties from other providers. + const apiProvider = configs[name].apiProvider + + if (typeof apiProvider === "string" && isRetiredProvider(apiProvider)) { + // Preserve retired-provider profiles as-is to prevent dropping legacy fields. + continue + } + + // Avoid leaking properties from other active providers. configs[name] = discriminatedProviderSettingsWithIdSchema.parse(configs[name]) // If it has no apiProvider, skip filtering @@ -607,7 +619,8 @@ export class ProviderSettingsManager { } /** - * Sanitizes a provider config by resetting invalid/removed apiProvider values. + * Sanitizes a provider config by resetting unknown apiProvider values. + * Retired providers are preserved. * This handles cases where a user had a provider selected that was later removed * from the extension (e.g., "glama"). */ @@ -618,10 +631,15 @@ export class ProviderSettingsManager { const config = apiConfig as Record - // Check if apiProvider is set and if it's still valid - if (config.apiProvider !== undefined && !isProviderName(config.apiProvider)) { + const apiProvider = config.apiProvider + + // Check if apiProvider is set and if it's still recognized (active or retired) + if ( + apiProvider !== undefined && + (typeof apiProvider !== "string" || (!isProviderName(apiProvider) && !isRetiredProvider(apiProvider))) + ) { console.log( - `[ProviderSettingsManager] Sanitizing invalid provider "${config.apiProvider}" - resetting to undefined`, + `[ProviderSettingsManager] Sanitizing unknown provider "${config.apiProvider}" - resetting to undefined`, ) // Return a new config object without the invalid apiProvider // This effectively resets the profile so the user can select a valid provider diff --git a/src/core/config/__tests__/ContextProxy.spec.ts b/src/core/config/__tests__/ContextProxy.spec.ts index 2060260c6ca..7c1d2a6e3ca 100644 --- a/src/core/config/__tests__/ContextProxy.spec.ts +++ b/src/core/config/__tests__/ContextProxy.spec.ts @@ -424,7 +424,7 @@ describe("ContextProxy", () => { it("should reinitialize caches after reset", async () => { // Spy on initialization methods - const initializeSpy = vi.spyOn(proxy as any, "initialize") + const initializeSpy = vi.spyOn(proxy, "initialize") // Reset all state await proxy.resetAllState() @@ -452,6 +452,25 @@ describe("ContextProxy", () => { expect(mockGlobalState.update).toHaveBeenCalledWith("apiProvider", undefined) }) + it("should not clear retired apiProvider from storage during initialization", async () => { + // Reset and create a new proxy with retired provider in state + vi.clearAllMocks() + mockGlobalState.get.mockImplementation((key: string) => { + if (key === "apiProvider") { + return "groq" // Retired provider + } + return undefined + }) + + const proxyWithRetiredProvider = new ContextProxy(mockContext) + await proxyWithRetiredProvider.initialize() + + // Should NOT have called update for apiProvider (retired should be preserved) + const updateCalls = mockGlobalState.update.mock.calls + const apiProviderUpdateCalls = updateCalls.filter((call: unknown[]) => call[0] === "apiProvider") + expect(apiProviderUpdateCalls).toHaveLength(0) + }) + it("should not modify valid apiProvider during initialization", async () => { // Reset and create a new proxy with valid provider in state vi.clearAllMocks() @@ -467,18 +486,29 @@ describe("ContextProxy", () => { // Should NOT have called update for apiProvider (it's valid) const updateCalls = mockGlobalState.update.mock.calls - const apiProviderUpdateCalls = updateCalls.filter((call: any[]) => call[0] === "apiProvider") + const apiProviderUpdateCalls = updateCalls.filter((call: unknown[]) => call[0] === "apiProvider") expect(apiProviderUpdateCalls.length).toBe(0) }) }) describe("getProviderSettings", () => { it("should sanitize invalid apiProvider before parsing", async () => { - // Set an invalid provider in state - await proxy.updateGlobalState("apiProvider", "invalid-removed-provider" as any) - await proxy.updateGlobalState("apiModelId", "some-model") + // Reset and create a new proxy with an unknown provider in state + vi.clearAllMocks() + mockGlobalState.get.mockImplementation((key: string) => { + if (key === "apiProvider") { + return "invalid-removed-provider" + } + if (key === "apiModelId") { + return "some-model" + } + return undefined + }) - const settings = proxy.getProviderSettings() + const proxyWithInvalidProvider = new ContextProxy(mockContext) + await proxyWithInvalidProvider.initialize() + + const settings = proxyWithInvalidProvider.getProviderSettings() // The invalid apiProvider should be sanitized (removed) expect(settings.apiProvider).toBeUndefined() @@ -486,6 +516,22 @@ describe("ContextProxy", () => { expect(settings.apiModelId).toBe("some-model") }) + it("should preserve retired apiProvider and provider fields", async () => { + await proxy.setValues({ + apiProvider: "groq", + apiModelId: "llama3-70b", + openAiBaseUrl: "https://api.retired-provider.example/v1", + apiKey: "retired-provider-key", + }) + + const settings = proxy.getProviderSettings() + + expect(settings.apiProvider).toBe("groq") + expect(settings.apiModelId).toBe("llama3-70b") + expect(settings.openAiBaseUrl).toBe("https://api.retired-provider.example/v1") + expect(settings.apiKey).toBe("retired-provider-key") + }) + it("should pass through valid apiProvider", async () => { // Set a valid provider in state await proxy.updateGlobalState("apiProvider", "anthropic") diff --git a/src/core/config/__tests__/ProviderSettingsManager.spec.ts b/src/core/config/__tests__/ProviderSettingsManager.spec.ts index e233fc913c5..9a11ecb550f 100644 --- a/src/core/config/__tests__/ProviderSettingsManager.spec.ts +++ b/src/core/config/__tests__/ProviderSettingsManager.spec.ts @@ -566,6 +566,42 @@ describe("ProviderSettingsManager", () => { "Failed to save config: Error: Failed to write provider profiles to secrets: Error: Storage failed", ) }) + + it("should preserve full fields when saving retired provider profiles", async () => { + mockSecrets.get.mockResolvedValue( + JSON.stringify({ + currentApiConfigName: "default", + apiConfigs: { + default: {}, + }, + modeApiConfigs: { + code: "default", + architect: "default", + ask: "default", + }, + }), + ) + + const retiredConfig: ProviderSettings = { + apiProvider: "groq", + apiKey: "legacy-key", + apiModelId: "legacy-model", + openAiBaseUrl: "https://legacy.example/v1", + openAiApiKey: "legacy-openai-key", + modelMaxTokens: 4096, + } + + await providerSettingsManager.saveConfig("retired", retiredConfig) + + const storedConfig = JSON.parse(mockSecrets.store.mock.calls[mockSecrets.store.mock.calls.length - 1][1]) + expect(storedConfig.apiConfigs.retired.apiProvider).toBe("groq") + expect(storedConfig.apiConfigs.retired.apiKey).toBe("legacy-key") + expect(storedConfig.apiConfigs.retired.apiModelId).toBe("legacy-model") + expect(storedConfig.apiConfigs.retired.openAiBaseUrl).toBe("https://legacy.example/v1") + expect(storedConfig.apiConfigs.retired.openAiApiKey).toBe("legacy-openai-key") + expect(storedConfig.apiConfigs.retired.modelMaxTokens).toBe(4096) + expect(storedConfig.apiConfigs.retired.id).toBeTruthy() + }) }) describe("DeleteConfig", () => { @@ -695,9 +731,9 @@ describe("ProviderSettingsManager", () => { ) }) - it("should sanitize invalid/removed providers by resetting apiProvider to undefined", async () => { + it("should sanitize unknown providers by resetting apiProvider to undefined", async () => { // This tests the fix for the infinite loop issue when a provider is removed - const configWithRemovedProvider = { + const configWithUnknownProvider = { currentApiConfigName: "valid", apiConfigs: { valid: { @@ -706,8 +742,8 @@ describe("ProviderSettingsManager", () => { apiModelId: "claude-3-opus-20240229", id: "valid-id", }, - removedProvider: { - // Provider that was removed from the extension (e.g., "invalid-removed-provider") + unknownProvider: { + // Provider value that is neither active nor retired. id: "removed-id", apiProvider: "invalid-removed-provider", apiKey: "some-key", @@ -722,7 +758,7 @@ describe("ProviderSettingsManager", () => { }, } - mockSecrets.get.mockResolvedValue(JSON.stringify(configWithRemovedProvider)) + mockSecrets.get.mockResolvedValue(JSON.stringify(configWithUnknownProvider)) await providerSettingsManager.initialize() @@ -735,11 +771,51 @@ describe("ProviderSettingsManager", () => { expect(storedConfig.apiConfigs.valid).toBeDefined() expect(storedConfig.apiConfigs.valid.apiProvider).toBe("anthropic") - // The config with the removed provider should have its apiProvider reset to undefined + // The config with the unknown provider should have its apiProvider reset to undefined // but still be present (not filtered out entirely) - expect(storedConfig.apiConfigs.removedProvider).toBeDefined() - expect(storedConfig.apiConfigs.removedProvider.apiProvider).toBeUndefined() - expect(storedConfig.apiConfigs.removedProvider.id).toBe("removed-id") + expect(storedConfig.apiConfigs.unknownProvider).toBeDefined() + expect(storedConfig.apiConfigs.unknownProvider.apiProvider).toBeUndefined() + expect(storedConfig.apiConfigs.unknownProvider.id).toBe("removed-id") + }) + + it("should preserve retired providers and their fields during initialize", async () => { + const configWithRetiredProvider = { + currentApiConfigName: "retiredProvider", + apiConfigs: { + retiredProvider: { + id: "retired-id", + apiProvider: "groq", + apiKey: "legacy-key", + apiModelId: "legacy-model", + openAiBaseUrl: "https://legacy.example/v1", + modelMaxTokens: 1024, + }, + }, + migrations: { + rateLimitSecondsMigrated: false, + openAiHeadersMigrated: true, + consecutiveMistakeLimitMigrated: true, + todoListEnabledMigrated: true, + claudeCodeLegacySettingsMigrated: true, + }, + } + + mockGlobalState.get.mockResolvedValue(0) + mockSecrets.get.mockResolvedValue(JSON.stringify(configWithRetiredProvider)) + + await providerSettingsManager.initialize() + + const storeCalls = mockSecrets.store.mock.calls + expect(storeCalls.length).toBeGreaterThan(0) + const finalStoredConfigJson = storeCalls[storeCalls.length - 1][1] + const storedConfig = JSON.parse(finalStoredConfigJson) + + expect(storedConfig.apiConfigs.retiredProvider).toBeDefined() + expect(storedConfig.apiConfigs.retiredProvider.apiProvider).toBe("groq") + expect(storedConfig.apiConfigs.retiredProvider.apiKey).toBe("legacy-key") + expect(storedConfig.apiConfigs.retiredProvider.apiModelId).toBe("legacy-model") + expect(storedConfig.apiConfigs.retiredProvider.openAiBaseUrl).toBe("https://legacy.example/v1") + expect(storedConfig.apiConfigs.retiredProvider.modelMaxTokens).toBe(1024) }) it("should sanitize invalid providers and remove non-object profiles during load", async () => { @@ -791,6 +867,36 @@ describe("ProviderSettingsManager", () => { }) }) + describe("Export", () => { + it("should preserve retired provider profiles with full fields", async () => { + const existingConfig: ProviderProfiles = { + currentApiConfigName: "retired", + apiConfigs: { + retired: { + id: "retired-id", + apiProvider: "groq", + apiKey: "legacy-key", + apiModelId: "legacy-model", + openAiBaseUrl: "https://legacy.example/v1", + modelMaxTokens: 4096, + modelMaxThinkingTokens: 2048, + }, + }, + } + + mockSecrets.get.mockResolvedValue(JSON.stringify(existingConfig)) + + const exported = await providerSettingsManager.export() + + expect(exported.apiConfigs.retired.apiProvider).toBe("groq") + expect(exported.apiConfigs.retired.apiKey).toBe("legacy-key") + expect(exported.apiConfigs.retired.apiModelId).toBe("legacy-model") + expect(exported.apiConfigs.retired.openAiBaseUrl).toBe("https://legacy.example/v1") + expect(exported.apiConfigs.retired.modelMaxTokens).toBe(4096) + expect(exported.apiConfigs.retired.modelMaxThinkingTokens).toBe(2048) + }) + }) + describe("ResetAllConfigs", () => { it("should delete all stored configs", async () => { // Setup initial config diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index f4e41c1bfd7..9a111cdcb04 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -41,6 +41,7 @@ import { TodoItem, getApiProtocol, getModelId, + isRetiredProvider, isIdleAsk, isInteractiveAsk, isResumableAsk, @@ -1035,7 +1036,11 @@ export class Task extends EventEmitter implements TaskLike { // Other providers (notably Gemini 3) use different signature semantics (e.g. `thoughtSignature`) // and require round-tripping the signature in their own format. const modelId = getModelId(this.apiConfiguration) - const apiProtocol = getApiProtocol(this.apiConfiguration.apiProvider, modelId) + const apiProvider = this.apiConfiguration.apiProvider + const apiProtocol = getApiProtocol( + apiProvider && !isRetiredProvider(apiProvider) ? apiProvider : undefined, + modelId, + ) const isAnthropicProtocol = apiProtocol === "anthropic" // Start from the original assistant message @@ -2695,7 +2700,11 @@ export class Task extends EventEmitter implements TaskLike { // Determine API protocol based on provider and model const modelId = getModelId(this.apiConfiguration) - const apiProtocol = getApiProtocol(this.apiConfiguration.apiProvider, modelId) + const apiProvider = this.apiConfiguration.apiProvider + const apiProtocol = getApiProtocol( + apiProvider && !isRetiredProvider(apiProvider) ? apiProvider : undefined, + modelId, + ) // Respect user-configured provider rate limiting BEFORE we emit api_req_started. // This prevents the UI from showing an "API Request..." spinner while we are @@ -2816,7 +2825,11 @@ export class Task extends EventEmitter implements TaskLike { // Calculate total tokens and cost using provider-aware function const modelId = getModelId(this.apiConfiguration) - const apiProtocol = getApiProtocol(this.apiConfiguration.apiProvider, modelId) + const apiProvider = this.apiConfiguration.apiProvider + const apiProtocol = getApiProtocol( + apiProvider && !isRetiredProvider(apiProvider) ? apiProvider : undefined, + modelId, + ) const costResult = apiProtocol === "anthropic" @@ -3140,7 +3153,11 @@ export class Task extends EventEmitter implements TaskLike { // Capture telemetry with provider-aware cost calculation const modelId = getModelId(this.apiConfiguration) - const apiProtocol = getApiProtocol(this.apiConfiguration.apiProvider, modelId) + const apiProvider = this.apiConfiguration.apiProvider + const apiProtocol = getApiProtocol( + apiProvider && !isRetiredProvider(apiProvider) ? apiProvider : undefined, + modelId, + ) // Use the appropriate cost function based on the API protocol const costResult = diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index b598a27c272..fe11ae7dda5 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -45,6 +45,7 @@ import { DEFAULT_MODES, DEFAULT_CHECKPOINT_TIMEOUT_SECONDS, getModelId, + isRetiredProvider, } from "@roo-code/types" import { aggregateTaskCostsRecursive, type AggregatedCosts } from "./aggregateTaskCosts" import { TelemetryService } from "@roo-code/telemetry" @@ -2266,8 +2267,11 @@ export class ClineProvider const stateValues = this.contextProxy.getValues() const customModes = await this.customModesManager.getCustomModes() - // Determine apiProvider with the same logic as before. - const apiProvider: ProviderName = stateValues.apiProvider ? stateValues.apiProvider : "anthropic" + // Determine apiProvider with the same logic as before, while filtering retired providers. + const apiProvider: ProviderName = + stateValues.apiProvider && !isRetiredProvider(stateValues.apiProvider) + ? stateValues.apiProvider + : "anthropic" // Build the apiConfiguration object combining state values and secrets. const providerSettings = this.contextProxy.getProviderSettings() @@ -3105,12 +3109,14 @@ export class ClineProvider } } + const apiProvider = apiConfiguration?.apiProvider + return { language, mode, taskId: task?.taskId, parentTaskId: task?.parentTaskId, - apiProvider: apiConfiguration?.apiProvider, + apiProvider: apiProvider && !isRetiredProvider(apiProvider) ? apiProvider : undefined, modelId: task?.api?.getModel().id, diffStrategy: task?.diffStrategy?.getName(), isSubtask: task ? !!task.parentTaskId : undefined, diff --git a/webview-ui/src/components/chat/ChatView.tsx b/webview-ui/src/components/chat/ChatView.tsx index 5c377eb1f59..9a1d712be18 100644 --- a/webview-ui/src/components/chat/ChatView.tsx +++ b/webview-ui/src/components/chat/ChatView.tsx @@ -43,6 +43,7 @@ import { ChatTextArea } from "./ChatTextArea" import TaskHeader from "./TaskHeader" import SystemPromptWarning from "./SystemPromptWarning" import ProfileViolationWarning from "./ProfileViolationWarning" +import RetiredProviderWarning from "./RetiredProviderWarning" import { CheckpointWarning } from "./CheckpointWarning" import { QueuedMessages } from "./QueuedMessages" import { WorktreeSelector } from "./WorktreeSelector" @@ -1779,6 +1780,10 @@ const ChatViewComponent: React.ForwardRefRenderFunction +
+ +
+ {isProfileDisabled && (
diff --git a/webview-ui/src/components/chat/RetiredProviderWarning.tsx b/webview-ui/src/components/chat/RetiredProviderWarning.tsx new file mode 100644 index 00000000000..614230c7790 --- /dev/null +++ b/webview-ui/src/components/chat/RetiredProviderWarning.tsx @@ -0,0 +1,34 @@ +import React from "react" + +import { isRetiredProvider } from "@roo-code/types" + +import { useExtensionState } from "@src/context/ExtensionStateContext" + +export const RetiredProviderWarning: React.FC = () => { + const { apiConfiguration } = useExtensionState() + + const provider = apiConfiguration?.apiProvider + if (!provider || !isRetiredProvider(provider)) { + return null + } + + return ( +
+
+ + Provider No Longer Supported +
+

+ Sorry, this provider is no longer supported. We saw very few Roo users actually using it and we need to + reduce the surface area of our codebase so we can keep shipping fast and serving our community well in + this space. It was a really hard decision but it lets us focus on what matters most to you. It sucks, we + know. +

+

+ Please select a different provider in your API profile settings. +

+
+ ) +} + +export default RetiredProviderWarning diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 87544529a0e..3e086636078 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -7,6 +7,7 @@ import { ExternalLinkIcon } from "@radix-ui/react-icons" import { type ProviderName, type ProviderSettings, + isRetiredProvider, DEFAULT_CONSECUTIVE_MISTAKE_LIMIT, openRouterDefaultModelId, requestyDefaultModelId, @@ -179,6 +180,11 @@ const ApiOptions = ({ id: selectedModelId, info: selectedModelInfo, } = useSelectedModel(apiConfiguration) + const activeSelectedProvider: ProviderName | undefined = isRetiredProvider(selectedProvider) + ? undefined + : selectedProvider + const isRetiredSelectedProvider = + typeof apiConfiguration.apiProvider === "string" && isRetiredProvider(apiConfiguration.apiProvider) const { data: routerModels, refetch: refetchRouterModels } = useRouterModels() @@ -196,12 +202,16 @@ const ApiOptions = ({ // Update `apiModelId` whenever `selectedModelId` changes. useEffect(() => { + if (isRetiredSelectedProvider) { + return + } + if (selectedModelId && apiConfiguration.apiModelId !== selectedModelId) { // Pass false as third parameter to indicate this is not a user action // This is an internal sync, not a user-initiated change setApiConfigurationField("apiModelId", selectedModelId, false) } - }, [selectedModelId, setApiConfigurationField, apiConfiguration.apiModelId]) + }, [selectedModelId, setApiConfigurationField, apiConfiguration.apiModelId, isRetiredSelectedProvider]) // Debounced refresh model updates, only executed 250ms after the user // stops typing. @@ -245,13 +255,18 @@ const ApiOptions = ({ ) useEffect(() => { + if (isRetiredSelectedProvider) { + setErrorMessage(undefined) + return + } + const apiValidationResult = validateApiConfigurationExcludingModelErrors( apiConfiguration, routerModels, organizationAllowList, ) setErrorMessage(apiValidationResult) - }, [apiConfiguration, routerModels, organizationAllowList, setErrorMessage]) + }, [apiConfiguration, routerModels, organizationAllowList, setErrorMessage, isRetiredSelectedProvider]) const onProviderChange = useCallback( (value: ProviderName) => { @@ -469,311 +484,358 @@ const ApiOptions = ({ {errorMessage && } - {selectedProvider === "openrouter" && ( - - )} - - {selectedProvider === "requesty" && ( - - )} + {isRetiredSelectedProvider ? ( +
+ Sorry, this provider is no longer supported. We saw very few Roo users actually using it and we need + to reduce the surface area of our codebase so we can keep shipping fast and serving our community + well in this space. It was a really hard decision but it lets us focus on what matters most to you. + It sucks, we know. +
+ ) : ( + <> + {selectedProvider === "openrouter" && ( + + )} - {selectedProvider === "anthropic" && ( - - )} + {selectedProvider === "requesty" && ( + + )} - {selectedProvider === "openai-codex" && ( - - )} + {selectedProvider === "anthropic" && ( + + )} - {selectedProvider === "openai-native" && ( - - )} + {selectedProvider === "openai-codex" && ( + + )} - {selectedProvider === "mistral" && ( - - )} + {selectedProvider === "openai-native" && ( + + )} - {selectedProvider === "baseten" && ( - - )} + {selectedProvider === "mistral" && ( + + )} - {selectedProvider === "bedrock" && ( - - )} + {selectedProvider === "baseten" && ( + + )} - {selectedProvider === "vertex" && ( - - )} + {selectedProvider === "bedrock" && ( + + )} - {selectedProvider === "gemini" && ( - - )} + {selectedProvider === "vertex" && ( + + )} - {selectedProvider === "openai" && ( - - )} + {selectedProvider === "gemini" && ( + + )} - {selectedProvider === "lmstudio" && ( - - )} + {selectedProvider === "openai" && ( + + )} - {selectedProvider === "deepseek" && ( - - )} + {selectedProvider === "lmstudio" && ( + + )} - {selectedProvider === "qwen-code" && ( - - )} + {selectedProvider === "deepseek" && ( + + )} - {selectedProvider === "moonshot" && ( - - )} + {selectedProvider === "qwen-code" && ( + + )} - {selectedProvider === "minimax" && ( - - )} + {selectedProvider === "moonshot" && ( + + )} - {selectedProvider === "vscode-lm" && ( - - )} + {selectedProvider === "minimax" && ( + + )} - {selectedProvider === "ollama" && ( - - )} + {selectedProvider === "vscode-lm" && ( + + )} - {selectedProvider === "xai" && ( - - )} + {selectedProvider === "ollama" && ( + + )} - {selectedProvider === "litellm" && ( - - )} + {selectedProvider === "xai" && ( + + )} - {selectedProvider === "sambanova" && ( - - )} + {selectedProvider === "litellm" && ( + + )} - {selectedProvider === "zai" && ( - - )} + {selectedProvider === "sambanova" && ( + + )} - {selectedProvider === "vercel-ai-gateway" && ( - - )} + {selectedProvider === "zai" && ( + + )} - {selectedProvider === "fireworks" && ( - - )} + {selectedProvider === "vercel-ai-gateway" && ( + + )} - {selectedProvider === "roo" && ( - - )} + {selectedProvider === "fireworks" && ( + + )} - {/* Generic model picker for providers with static models */} - {shouldUseGenericModelPicker(selectedProvider) && ( - <> - - handleModelChangeSideEffects(selectedProvider, modelId, setApiConfigurationField) - } - /> - - {selectedProvider === "bedrock" && selectedModelId === "custom-arn" && ( - )} - - )} - {!fromWelcomeView && ( - - )} + {/* Generic model picker for providers with static models */} + {activeSelectedProvider && shouldUseGenericModelPicker(activeSelectedProvider) && ( + <> + + handleModelChangeSideEffects( + activeSelectedProvider, + modelId, + setApiConfigurationField, + ) + } + /> - {/* Gate Verbosity UI by capability flag */} - {!fromWelcomeView && selectedModelInfo?.supportsVerbosity && ( - - )} + {selectedProvider === "bedrock" && selectedModelId === "custom-arn" && ( + + )} + + )} - {!fromWelcomeView && ( - - - - {t("settings:advancedSettings.title")} - - - setApiConfigurationField(field, value)} - /> - {selectedModelInfo?.supportsTemperature !== false && ( - - )} - setApiConfigurationField("rateLimitSeconds", value)} + {!fromWelcomeView && ( + - setApiConfigurationField("consecutiveMistakeLimit", value)} + )} + + {/* Gate Verbosity UI by capability flag */} + {!fromWelcomeView && selectedModelInfo?.supportsVerbosity && ( + - {selectedProvider === "openrouter" && - openRouterModelProviders && - Object.keys(openRouterModelProviders).length > 0 && ( -
-
- - - - -
- -
- {t("settings:providers.openRouter.providerRouting.description")}{" "} - - {t("settings:providers.openRouter.providerRouting.learnMore")}. - -
-
- )} -
-
+ )} + + {!fromWelcomeView && ( + + + + {t("settings:advancedSettings.title")} + + + setApiConfigurationField(field, value)} + /> + {selectedModelInfo?.supportsTemperature !== false && ( + + )} + setApiConfigurationField("rateLimitSeconds", value)} + /> + setApiConfigurationField("consecutiveMistakeLimit", value)} + /> + {selectedProvider === "openrouter" && + openRouterModelProviders && + Object.keys(openRouterModelProviders).length > 0 && ( +
+
+ + + + +
+ +
+ {t("settings:providers.openRouter.providerRouting.description")}{" "} + + {t("settings:providers.openRouter.providerRouting.learnMore")}. + +
+
+ )} +
+
+ )} + )}
) diff --git a/webview-ui/src/components/settings/ModelPicker.tsx b/webview-ui/src/components/settings/ModelPicker.tsx index 82b3c41f671..7db427e3eec 100644 --- a/webview-ui/src/components/settings/ModelPicker.tsx +++ b/webview-ui/src/components/settings/ModelPicker.tsx @@ -3,7 +3,7 @@ import { VSCodeLink } from "@vscode/webview-ui-toolkit/react" import { Trans } from "react-i18next" import { ChevronsUpDown, Check, X, Info } from "lucide-react" -import type { ProviderSettings, ModelInfo, OrganizationAllowList } from "@roo-code/types" +import { type ProviderSettings, type ModelInfo, type OrganizationAllowList, isRetiredProvider } from "@roo-code/types" import { useAppTranslation } from "@src/i18n/TranslationContext" import { useSelectedModel } from "@/components/ui/hooks/useSelectedModel" @@ -104,8 +104,13 @@ export const ModelPicker = ({ return selectedModelId }, [displayTransform, apiConfiguration, modelIdKey, selectedModelId]) + const activeProvider = + apiConfiguration.apiProvider && isRetiredProvider(apiConfiguration.apiProvider) + ? undefined + : apiConfiguration.apiProvider + const modelIds = useMemo(() => { - const filteredModels = filterModels(models, apiConfiguration.apiProvider, organizationAllowList) + const filteredModels = filterModels(models, activeProvider, organizationAllowList) // Include the currently selected model even if deprecated (so users can see what they have selected) // But filter out other deprecated models from being newly selectable @@ -125,7 +130,7 @@ export const ModelPicker = ({ ) return Object.keys(availableModels).sort((a, b) => a.localeCompare(b)) - }, [models, apiConfiguration.apiProvider, organizationAllowList, selectedModelId]) + }, [models, activeProvider, organizationAllowList, selectedModelId]) const [searchValue, setSearchValue] = useState("") diff --git a/webview-ui/src/components/settings/__tests__/ApiOptions.spec.tsx b/webview-ui/src/components/settings/__tests__/ApiOptions.spec.tsx index 95f9207c62c..f862087dd06 100644 --- a/webview-ui/src/components/settings/__tests__/ApiOptions.spec.tsx +++ b/webview-ui/src/components/settings/__tests__/ApiOptions.spec.tsx @@ -662,4 +662,31 @@ describe("ApiOptions", () => { useExtensionStateMock.mockRestore() }) }) + + it("renders retired provider message and hides provider-specific forms", () => { + renderApiOptions({ + apiConfiguration: { + apiProvider: "groq", + }, + }) + + expect(screen.getByTestId("retired-provider-message")).toHaveTextContent( + "Sorry, this provider is no longer supported. We saw very few Roo users actually using it and we need to reduce the surface area of our codebase so we can keep shipping fast and serving our community well in this space. It was a really hard decision but it lets us focus on what matters most to you. It sucks, we know.", + ) + expect(screen.queryByTestId("litellm-provider")).not.toBeInTheDocument() + }) + + it("does not reintroduce retired providers into active provider options", () => { + renderApiOptions({ + apiConfiguration: { + apiProvider: "groq", + }, + }) + + const providerSelectContainer = screen.getByTestId("provider-select") + const providerSelect = providerSelectContainer.querySelector("select") as HTMLSelectElement + const providerOptions = Array.from(providerSelect.querySelectorAll("option")).map((option) => option.value) + + expect(providerOptions).not.toContain("groq") + }) }) diff --git a/webview-ui/src/components/ui/hooks/useSelectedModel.ts b/webview-ui/src/components/ui/hooks/useSelectedModel.ts index 7be7c353ba1..0ac82b50627 100644 --- a/webview-ui/src/components/ui/hooks/useSelectedModel.ts +++ b/webview-ui/src/components/ui/hooks/useSelectedModel.ts @@ -29,6 +29,7 @@ import { BEDROCK_1M_CONTEXT_MODEL_IDS, VERTEX_1M_CONTEXT_MODEL_IDS, isDynamicProvider, + isRetiredProvider, getProviderDefaultModelId, } from "@roo-code/types" @@ -51,14 +52,16 @@ function getValidatedModelId( export const useSelectedModel = (apiConfiguration?: ProviderSettings) => { const provider = apiConfiguration?.apiProvider || "anthropic" - const openRouterModelId = provider === "openrouter" ? apiConfiguration?.openRouterModelId : undefined - const lmStudioModelId = provider === "lmstudio" ? apiConfiguration?.lmStudioModelId : undefined - const ollamaModelId = provider === "ollama" ? apiConfiguration?.ollamaModelId : undefined + const activeProvider: ProviderName | undefined = isRetiredProvider(provider) ? undefined : provider + const dynamicProvider = activeProvider && isDynamicProvider(activeProvider) ? activeProvider : undefined + const openRouterModelId = activeProvider === "openrouter" ? apiConfiguration?.openRouterModelId : undefined + const lmStudioModelId = activeProvider === "lmstudio" ? apiConfiguration?.lmStudioModelId : undefined + const ollamaModelId = activeProvider === "ollama" ? apiConfiguration?.ollamaModelId : undefined // Only fetch router models for dynamic providers - const shouldFetchRouterModels = isDynamicProvider(provider) + const shouldFetchRouterModels = !!dynamicProvider const routerModels = useRouterModels({ - provider: shouldFetchRouterModels ? provider : undefined, + provider: dynamicProvider, enabled: shouldFetchRouterModels, }) @@ -68,16 +71,17 @@ export const useSelectedModel = (apiConfiguration?: ProviderSettings) => { // Compute readiness only for the data actually needed for the selected provider const needRouterModels = shouldFetchRouterModels - const needOpenRouterProviders = provider === "openrouter" + const needOpenRouterProviders = activeProvider === "openrouter" const needLmStudio = typeof lmStudioModelId !== "undefined" const needOllama = typeof ollamaModelId !== "undefined" - const hasValidRouterData = needRouterModels - ? routerModels.data && - routerModels.data[provider] !== undefined && - typeof routerModels.data[provider] === "object" && - !routerModels.isLoading - : true + const hasValidRouterData = + needRouterModels && dynamicProvider + ? routerModels.data && + routerModels.data[dynamicProvider] !== undefined && + typeof routerModels.data[dynamicProvider] === "object" && + !routerModels.isLoading + : true const isReady = (!needLmStudio || typeof lmStudioModels.data !== "undefined") && @@ -86,16 +90,16 @@ export const useSelectedModel = (apiConfiguration?: ProviderSettings) => { (!needOpenRouterProviders || typeof openRouterModelProviders.data !== "undefined") const { id, info } = - apiConfiguration && isReady + apiConfiguration && isReady && activeProvider ? getSelectedModel({ - provider, + provider: activeProvider, apiConfiguration, routerModels: (routerModels.data || {}) as RouterModels, openRouterModelProviders: (openRouterModelProviders.data || {}) as Record, lmStudioModels: (lmStudioModels.data || undefined) as ModelRecord | undefined, ollamaModels: (ollamaModels.data || undefined) as ModelRecord | undefined, }) - : { id: getProviderDefaultModelId(provider), info: undefined } + : { id: getProviderDefaultModelId(activeProvider ?? "anthropic"), info: undefined } return { provider, diff --git a/webview-ui/src/index.css b/webview-ui/src/index.css index 2395b69f57e..ebb9a2fb901 100644 --- a/webview-ui/src/index.css +++ b/webview-ui/src/index.css @@ -131,6 +131,10 @@ --color-vscode-inputValidation-infoBackground: var(--vscode-inputValidation-infoBackground); --color-vscode-inputValidation-infoBorder: var(--vscode-inputValidation-infoBorder); + --color-vscode-inputValidation-warningForeground: var(--vscode-inputValidation-warningForeground); + --color-vscode-inputValidation-warningBackground: var(--vscode-inputValidation-warningBackground); + --color-vscode-inputValidation-warningBorder: var(--vscode-inputValidation-warningBorder); + --color-vscode-widget-border: var(--vscode-widget-border); --color-vscode-widget-shadow: var(--vscode-widget-shadow); diff --git a/webview-ui/src/utils/validate.ts b/webview-ui/src/utils/validate.ts index bb57da32663..116013d03fa 100644 --- a/webview-ui/src/utils/validate.ts +++ b/webview-ui/src/utils/validate.ts @@ -7,6 +7,7 @@ import { type RouterModels, modelIdKeysByProvider, isProviderName, + isRetiredProvider, isDynamicProvider, isFauxProvider, isCustomProvider, @@ -153,7 +154,8 @@ function validateProviderAgainstOrganizationSettings( } if (!providerConfig.allowAll) { - const modelId = getModelIdForProvider(apiConfiguration, provider) + const activeProvider = isRetiredProvider(provider) ? undefined : provider + const modelId = activeProvider ? getModelIdForProvider(apiConfiguration, activeProvider) : undefined const allowedModels = providerConfig.models || [] if (modelId && !allowedModels.includes(modelId)) { From dd593e10562b587f58acc80e0dec491044e551af Mon Sep 17 00:00:00 2001 From: Hannes Rudolph Date: Sat, 7 Feb 2026 21:42:22 -0700 Subject: [PATCH 3/9] feat: add retired-provider warning banner in chat view --- webview-ui/src/components/chat/ChatView.tsx | 24 ++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/webview-ui/src/components/chat/ChatView.tsx b/webview-ui/src/components/chat/ChatView.tsx index 9a1d712be18..0045ab38c76 100644 --- a/webview-ui/src/components/chat/ChatView.tsx +++ b/webview-ui/src/components/chat/ChatView.tsx @@ -13,6 +13,7 @@ import { appendImages } from "@src/utils/imageUtils" import { getCostBreakdownIfNeeded } from "@src/utils/costFormatting" import type { ClineAsk, ClineSayTool, ClineMessage, ExtensionMessage, AudioType } from "@roo-code/types" +import { isRetiredProvider } from "@roo-code/types" import { findLast } from "@roo/array" import { SuggestionItem } from "@roo-code/types" @@ -226,6 +227,11 @@ const ChatViewComponent: React.ForwardRefRenderFunction !!apiConfiguration?.apiProvider && isRetiredProvider(apiConfiguration.apiProvider), + [apiConfiguration?.apiProvider], + ) + // UI layout depends on the last 2 messages (since it relies on the content // of these messages, we are deep comparing) i.e. the button state after // hitting button sets enableButtons to false, and this effect otherwise @@ -1515,7 +1521,7 @@ const ChatViewComponent: React.ForwardRefRenderFunction )} + + {isRetiredProviderActive && ( +
+ +
+ )} ) : (
@@ -1757,7 +1769,7 @@ const ChatViewComponent: React.ForwardRefRenderFunction -
- -
+ {isRetiredProviderActive && !task && ( +
+ +
+ )} {isProfileDisabled && (
From 4882da720709c63524c171102a4676b74050dc50 Mon Sep 17 00:00:00 2001 From: Hannes Rudolph Date: Sat, 7 Feb 2026 21:57:44 -0700 Subject: [PATCH 4/9] Revert "feat: add retired-provider warning banner in chat view" This reverts commit dd593e10562b587f58acc80e0dec491044e551af. --- webview-ui/src/components/chat/ChatView.tsx | 24 +++++---------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/webview-ui/src/components/chat/ChatView.tsx b/webview-ui/src/components/chat/ChatView.tsx index 0045ab38c76..9a1d712be18 100644 --- a/webview-ui/src/components/chat/ChatView.tsx +++ b/webview-ui/src/components/chat/ChatView.tsx @@ -13,7 +13,6 @@ import { appendImages } from "@src/utils/imageUtils" import { getCostBreakdownIfNeeded } from "@src/utils/costFormatting" import type { ClineAsk, ClineSayTool, ClineMessage, ExtensionMessage, AudioType } from "@roo-code/types" -import { isRetiredProvider } from "@roo-code/types" import { findLast } from "@roo/array" import { SuggestionItem } from "@roo-code/types" @@ -227,11 +226,6 @@ const ChatViewComponent: React.ForwardRefRenderFunction !!apiConfiguration?.apiProvider && isRetiredProvider(apiConfiguration.apiProvider), - [apiConfiguration?.apiProvider], - ) - // UI layout depends on the last 2 messages (since it relies on the content // of these messages, we are deep comparing) i.e. the button state after // hitting button sets enableButtons to false, and this effect otherwise @@ -1521,7 +1515,7 @@ const ChatViewComponent: React.ForwardRefRenderFunction
)} - - {isRetiredProviderActive && ( -
- -
- )} ) : (
@@ -1769,7 +1757,7 @@ const ChatViewComponent: React.ForwardRefRenderFunction - {isRetiredProviderActive && !task && ( -
- -
- )} +
+ +
{isProfileDisabled && (
From b4db88c472024c3bfe309454270aa1441f43d0c3 Mon Sep 17 00:00:00 2001 From: Hannes Rudolph Date: Sat, 7 Feb 2026 22:11:18 -0700 Subject: [PATCH 5/9] feat: show retired-provider message as inline chat response --- webview-ui/src/components/chat/ChatView.tsx | 49 ++++++++++++++++++--- 1 file changed, 42 insertions(+), 7 deletions(-) diff --git a/webview-ui/src/components/chat/ChatView.tsx b/webview-ui/src/components/chat/ChatView.tsx index 9a1d712be18..8cef374a099 100644 --- a/webview-ui/src/components/chat/ChatView.tsx +++ b/webview-ui/src/components/chat/ChatView.tsx @@ -13,6 +13,7 @@ import { appendImages } from "@src/utils/imageUtils" import { getCostBreakdownIfNeeded } from "@src/utils/costFormatting" import type { ClineAsk, ClineSayTool, ClineMessage, ExtensionMessage, AudioType } from "@roo-code/types" +import { isRetiredProvider } from "@roo-code/types" import { findLast } from "@roo/array" import { SuggestionItem } from "@roo-code/types" @@ -39,11 +40,11 @@ import Announcement from "./Announcement" import BrowserActionRow from "./BrowserActionRow" import BrowserSessionStatusRow from "./BrowserSessionStatusRow" import ChatRow from "./ChatRow" +import WarningRow from "./WarningRow" import { ChatTextArea } from "./ChatTextArea" import TaskHeader from "./TaskHeader" import SystemPromptWarning from "./SystemPromptWarning" import ProfileViolationWarning from "./ProfileViolationWarning" -import RetiredProviderWarning from "./RetiredProviderWarning" import { CheckpointWarning } from "./CheckpointWarning" import { QueuedMessages } from "./QueuedMessages" import { WorktreeSelector } from "./WorktreeSelector" @@ -100,6 +101,15 @@ const ChatViewComponent: React.ForwardRefRenderFunction { + setShowRetiredProviderWarning(false) + }, [providerName]) + const messagesRef = useRef(messages) useEffect(() => { @@ -609,6 +619,15 @@ const ChatViewComponent: React.ForwardRefRenderFunction 0) { + // Intercept when the active provider is retired — show a + // WarningRow instead of sending anything to the backend. + if (apiConfiguration?.apiProvider && isRetiredProvider(apiConfiguration.apiProvider)) { + setShowRetiredProviderWarning(true) + setInputValue("") + setSelectedImages([]) + return + } + // Queue message if: // - Task is busy (sendingDisabled) // - API request in progress (isStreaming) @@ -674,7 +693,14 @@ const ChatViewComponent: React.ForwardRefRenderFunction vscode.postMessage({ type: "clearTask" }), []) + const startNewTask = useCallback(() => { + setShowRetiredProviderWarning(false) + vscode.postMessage({ type: "clearTask" }) + }, []) // Handle stop button click from textarea const handleStopTask = useCallback(() => { @@ -1653,6 +1682,16 @@ const ChatViewComponent: React.ForwardRefRenderFunction
+ {showRetiredProviderWarning && ( +
+ vscode.postMessage({ type: "switchTab", tab: "settings" })} + /> +
+ )} {areButtonsVisible && (
-
- -
- {isProfileDisabled && (
From a85b307a04555277fec337216ad9a0a2ca19935c Mon Sep 17 00:00:00 2001 From: Hannes Rudolph Date: Sat, 7 Feb 2026 22:40:51 -0700 Subject: [PATCH 6/9] fix: show retired provider warning on home screen Move WarningRow outside {task && ...} conditional so it renders regardless of task state. Preserve user input on retired provider intercept so text isn't lost when switching providers. - Move showRetiredProviderWarning WarningRow to unconditional render area near ProfileViolationWarning - Remove setInputValue/setSelectedImages clearing from retired provider early return in handleSendMessage - Delete unused RetiredProviderWarning.tsx (dead code) --- webview-ui/src/components/chat/ChatView.tsx | 22 ++++++------ .../chat/RetiredProviderWarning.tsx | 34 ------------------- 2 files changed, 10 insertions(+), 46 deletions(-) delete mode 100644 webview-ui/src/components/chat/RetiredProviderWarning.tsx diff --git a/webview-ui/src/components/chat/ChatView.tsx b/webview-ui/src/components/chat/ChatView.tsx index 8cef374a099..f7cb3ebe625 100644 --- a/webview-ui/src/components/chat/ChatView.tsx +++ b/webview-ui/src/components/chat/ChatView.tsx @@ -623,8 +623,6 @@ const ChatViewComponent: React.ForwardRefRenderFunction
- {showRetiredProviderWarning && ( -
- vscode.postMessage({ type: "switchTab", tab: "settings" })} - /> -
- )} {areButtonsVisible && (
+ {showRetiredProviderWarning && ( +
+ vscode.postMessage({ type: "switchTab", tab: "settings" })} + /> +
+ )} { - const { apiConfiguration } = useExtensionState() - - const provider = apiConfiguration?.apiProvider - if (!provider || !isRetiredProvider(provider)) { - return null - } - - return ( -
-
- - Provider No Longer Supported -
-

- Sorry, this provider is no longer supported. We saw very few Roo users actually using it and we need to - reduce the surface area of our codebase so we can keep shipping fast and serving our community well in - this space. It was a really hard decision but it lets us focus on what matters most to you. It sucks, we - know. -

-

- Please select a different provider in your API profile settings. -

-
- ) -} - -export default RetiredProviderWarning From 7e74f3c836ece610d6b864f0bb16ae46aafe8e09 Mon Sep 17 00:00:00 2001 From: Hannes Rudolph Date: Sat, 7 Feb 2026 23:49:21 -0700 Subject: [PATCH 7/9] =?UTF-8?q?fix:=20address=20PR=20review=20=E2=80=94=20?= =?UTF-8?q?passthrough=20retired-provider=20fields=20and=20i18n=20strings?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Use passthrough() in saveConfig() and load() so legacy provider-specific fields (e.g. groqApiKey, deepInfraModelId) are preserved instead of silently stripped by strict Zod parse() - Move hardcoded English strings in ApiOptions.tsx and ChatView.tsx to i18n translation keys (settings:providers.retiredProviderMessage, chat:retiredProvider.{title,message,openSettings}) - Update tests to assert legacy provider-specific fields survive save and load round-trips --- src/core/config/ProviderSettingsManager.ts | 22 ++++++++++++++++--- .../__tests__/ProviderSettingsManager.spec.ts | 17 ++++++++++---- webview-ui/src/components/chat/ChatView.tsx | 6 ++--- .../src/components/settings/ApiOptions.tsx | 5 +---- webview-ui/src/i18n/locales/en/chat.json | 5 +++++ webview-ui/src/i18n/locales/en/settings.json | 1 + 6 files changed, 42 insertions(+), 14 deletions(-) diff --git a/src/core/config/ProviderSettingsManager.ts b/src/core/config/ProviderSettingsManager.ts index 4e7e1d1e206..6088bd68fe2 100644 --- a/src/core/config/ProviderSettingsManager.ts +++ b/src/core/config/ProviderSettingsManager.ts @@ -361,10 +361,12 @@ export class ProviderSettingsManager { const id = config.id || existingId || this.generateId() // For active providers, filter out settings from other providers. - // For retired providers, preserve full profile fields to avoid data loss. + // For retired providers, preserve full profile fields (including legacy + // provider-specific keys) to avoid data loss — passthrough() keeps + // unknown keys that strict parse() would strip. const filteredConfig = typeof config.apiProvider === "string" && isRetiredProvider(config.apiProvider) - ? providerSettingsWithIdSchema.parse(config) + ? providerSettingsWithIdSchema.passthrough().parse(config) : discriminatedProviderSettingsWithIdSchema.parse(config) providerProfiles.apiConfigs[name] = { ...filteredConfig, id } await this.store(providerProfiles) @@ -594,7 +596,21 @@ export class ProviderSettingsManager { // First, sanitize invalid apiProvider values before parsing // This handles removed providers (like "glama") gracefully const sanitizedConfig = this.sanitizeProviderConfig(apiConfig) - const result = providerSettingsWithIdSchema.safeParse(sanitizedConfig) + + // For retired providers, use passthrough() to preserve legacy + // provider-specific fields (e.g. groqApiKey, deepInfraModelId) + // that strict parse() would strip. + const providerValue = + typeof sanitizedConfig === "object" && + sanitizedConfig !== null && + "apiProvider" in sanitizedConfig + ? (sanitizedConfig as Record).apiProvider + : undefined + const schema = + typeof providerValue === "string" && isRetiredProvider(providerValue) + ? providerSettingsWithIdSchema.passthrough() + : providerSettingsWithIdSchema + const result = schema.safeParse(sanitizedConfig) return result.success ? { ...acc, [key]: result.data } : acc }, {} as Record, diff --git a/src/core/config/__tests__/ProviderSettingsManager.spec.ts b/src/core/config/__tests__/ProviderSettingsManager.spec.ts index 9a11ecb550f..3f6b4f78478 100644 --- a/src/core/config/__tests__/ProviderSettingsManager.spec.ts +++ b/src/core/config/__tests__/ProviderSettingsManager.spec.ts @@ -567,7 +567,7 @@ describe("ProviderSettingsManager", () => { ) }) - it("should preserve full fields when saving retired provider profiles", async () => { + it("should preserve full fields including legacy provider-specific keys when saving retired provider profiles", async () => { mockSecrets.get.mockResolvedValue( JSON.stringify({ currentApiConfigName: "default", @@ -582,14 +582,17 @@ describe("ProviderSettingsManager", () => { }), ) - const retiredConfig: ProviderSettings = { + // Include a legacy provider-specific field (groqApiKey) that is no + // longer in the schema — passthrough() must keep it. + const retiredConfig = { apiProvider: "groq", apiKey: "legacy-key", apiModelId: "legacy-model", openAiBaseUrl: "https://legacy.example/v1", openAiApiKey: "legacy-openai-key", modelMaxTokens: 4096, - } + groqApiKey: "legacy-groq-specific-key", + } as ProviderSettings await providerSettingsManager.saveConfig("retired", retiredConfig) @@ -600,6 +603,8 @@ describe("ProviderSettingsManager", () => { expect(storedConfig.apiConfigs.retired.openAiBaseUrl).toBe("https://legacy.example/v1") expect(storedConfig.apiConfigs.retired.openAiApiKey).toBe("legacy-openai-key") expect(storedConfig.apiConfigs.retired.modelMaxTokens).toBe(4096) + // Verify legacy provider-specific field is preserved via passthrough + expect(storedConfig.apiConfigs.retired.groqApiKey).toBe("legacy-groq-specific-key") expect(storedConfig.apiConfigs.retired.id).toBeTruthy() }) }) @@ -778,7 +783,7 @@ describe("ProviderSettingsManager", () => { expect(storedConfig.apiConfigs.unknownProvider.id).toBe("removed-id") }) - it("should preserve retired providers and their fields during initialize", async () => { + it("should preserve retired providers and their fields including legacy provider-specific keys during initialize", async () => { const configWithRetiredProvider = { currentApiConfigName: "retiredProvider", apiConfigs: { @@ -789,6 +794,8 @@ describe("ProviderSettingsManager", () => { apiModelId: "legacy-model", openAiBaseUrl: "https://legacy.example/v1", modelMaxTokens: 1024, + // Legacy provider-specific field no longer in schema + groqApiKey: "legacy-groq-key", }, }, migrations: { @@ -816,6 +823,8 @@ describe("ProviderSettingsManager", () => { expect(storedConfig.apiConfigs.retiredProvider.apiModelId).toBe("legacy-model") expect(storedConfig.apiConfigs.retiredProvider.openAiBaseUrl).toBe("https://legacy.example/v1") expect(storedConfig.apiConfigs.retiredProvider.modelMaxTokens).toBe(1024) + // Verify legacy provider-specific field is preserved via passthrough + expect(storedConfig.apiConfigs.retiredProvider.groqApiKey).toBe("legacy-groq-key") }) it("should sanitize invalid providers and remove non-object profiles during load", async () => { diff --git a/webview-ui/src/components/chat/ChatView.tsx b/webview-ui/src/components/chat/ChatView.tsx index f7cb3ebe625..90f939a56e1 100644 --- a/webview-ui/src/components/chat/ChatView.tsx +++ b/webview-ui/src/components/chat/ChatView.tsx @@ -1783,9 +1783,9 @@ const ChatViewComponent: React.ForwardRefRenderFunction vscode.postMessage({ type: "switchTab", tab: "settings" })} />
diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 3e086636078..5087b0123ea 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -488,10 +488,7 @@ const ApiOptions = ({
- Sorry, this provider is no longer supported. We saw very few Roo users actually using it and we need - to reduce the surface area of our codebase so we can keep shipping fast and serving our community - well in this space. It was a really hard decision but it lets us focus on what matters most to you. - It sucks, we know. + {t("settings:providers.retiredProviderMessage")}
) : ( <> diff --git a/webview-ui/src/i18n/locales/en/chat.json b/webview-ui/src/i18n/locales/en/chat.json index b9652cfce5c..627793fb040 100644 --- a/webview-ui/src/i18n/locales/en/chat.json +++ b/webview-ui/src/i18n/locales/en/chat.json @@ -500,5 +500,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "Provider no longer supported", + "message": "Sorry, this provider is no longer supported. We saw very few Roo users actually using it and we need to reduce the surface area of our codebase so we can keep shipping fast and serving our community well in this space. It was a really hard decision but it lets us focus on what matters most to you. It sucks, we know.", + "openSettings": "Open Settings" } } diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 42bdfe24308..ee343c979d5 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -372,6 +372,7 @@ "searchProviderPlaceholder": "Search providers", "noProviderMatchFound": "No providers found", "noMatchFound": "No matching profiles found", + "retiredProviderMessage": "Sorry, this provider is no longer supported. We saw very few Roo users actually using it and we need to reduce the surface area of our codebase so we can keep shipping fast and serving our community well in this space. It was a really hard decision but it lets us focus on what matters most to you. It sucks, we know.", "vscodeLmDescription": " The VS Code Language Model API allows you to run models provided by other VS Code extensions (including but not limited to GitHub Copilot). The easiest way to get started is to install the Copilot and Copilot Chat extensions from the VS Code Marketplace.", "awsCustomArnUse": "Enter a valid Amazon Bedrock ARN for the model you want to use. Format examples:", "awsCustomArnDesc": "Make sure the region in the ARN matches your selected AWS Region above.", From b4733a4c2afc7fd0bfadb0eb0e4455e50e3af27d Mon Sep 17 00:00:00 2001 From: Hannes Rudolph Date: Sat, 7 Feb 2026 23:58:55 -0700 Subject: [PATCH 8/9] i18n: add retired-provider translations for all 17 locales Translate providers.retiredProviderMessage (settings) and retiredProvider.{title,message,openSettings} (chat) into ca, de, es, fr, hi, id, it, ja, ko, nl, pl, pt-BR, ru, tr, vi, zh-CN, zh-TW. --- webview-ui/src/i18n/locales/ca/chat.json | 5 +++++ webview-ui/src/i18n/locales/ca/settings.json | 1 + webview-ui/src/i18n/locales/de/chat.json | 5 +++++ webview-ui/src/i18n/locales/de/settings.json | 1 + webview-ui/src/i18n/locales/es/chat.json | 5 +++++ webview-ui/src/i18n/locales/es/settings.json | 1 + webview-ui/src/i18n/locales/fr/chat.json | 5 +++++ webview-ui/src/i18n/locales/fr/settings.json | 1 + webview-ui/src/i18n/locales/hi/chat.json | 5 +++++ webview-ui/src/i18n/locales/hi/settings.json | 1 + webview-ui/src/i18n/locales/id/chat.json | 5 +++++ webview-ui/src/i18n/locales/id/settings.json | 1 + webview-ui/src/i18n/locales/it/chat.json | 5 +++++ webview-ui/src/i18n/locales/it/settings.json | 1 + webview-ui/src/i18n/locales/ja/chat.json | 5 +++++ webview-ui/src/i18n/locales/ja/settings.json | 1 + webview-ui/src/i18n/locales/ko/chat.json | 5 +++++ webview-ui/src/i18n/locales/ko/settings.json | 1 + webview-ui/src/i18n/locales/nl/chat.json | 5 +++++ webview-ui/src/i18n/locales/nl/settings.json | 1 + webview-ui/src/i18n/locales/pl/chat.json | 5 +++++ webview-ui/src/i18n/locales/pl/settings.json | 1 + webview-ui/src/i18n/locales/pt-BR/chat.json | 5 +++++ webview-ui/src/i18n/locales/pt-BR/settings.json | 1 + webview-ui/src/i18n/locales/ru/chat.json | 5 +++++ webview-ui/src/i18n/locales/ru/settings.json | 1 + webview-ui/src/i18n/locales/tr/chat.json | 5 +++++ webview-ui/src/i18n/locales/tr/settings.json | 1 + webview-ui/src/i18n/locales/vi/chat.json | 5 +++++ webview-ui/src/i18n/locales/vi/settings.json | 1 + webview-ui/src/i18n/locales/zh-CN/chat.json | 5 +++++ webview-ui/src/i18n/locales/zh-CN/settings.json | 1 + webview-ui/src/i18n/locales/zh-TW/chat.json | 5 +++++ webview-ui/src/i18n/locales/zh-TW/settings.json | 1 + 34 files changed, 102 insertions(+) diff --git a/webview-ui/src/i18n/locales/ca/chat.json b/webview-ui/src/i18n/locales/ca/chat.json index 5bba7dc4459..6d293c0ee0e 100644 --- a/webview-ui/src/i18n/locales/ca/chat.json +++ b/webview-ui/src/i18n/locales/ca/chat.json @@ -512,5 +512,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "Proveïdor ja no compatible", + "message": "Ho sentim, aquest proveïdor ja no és compatible. Hem vist molt pocs usuaris de Roo que realment l'utilitzaven i necessitem reduir l'abast del nostre codi per poder seguir avançant ràpidament i servir bé la nostra comunitat en aquest espai. Va ser una decisió molt difícil, però ens permet centrar-nos en el que més t'importa. Ho sabem, és una llàstima.", + "openSettings": "Obrir configuració" } } diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index e4502bd53ab..65b02b1aa27 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "Cerca proveïdors", "noProviderMatchFound": "No s'han trobat proveïdors", "noMatchFound": "No s'han trobat perfils coincidents", + "retiredProviderMessage": "Ho sentim, aquest proveïdor ja no és compatible. Hem vist molt pocs usuaris de Roo que realment l'utilitzaven i necessitem reduir l'abast del nostre codi per poder seguir avançant ràpidament i servir bé la nostra comunitat en aquest espai. Va ser una decisió molt difícil, però ens permet centrar-nos en el que més t'importa. Ho sabem, és una llàstima.", "vscodeLmDescription": "L'API del model de llenguatge de VS Code us permet executar models proporcionats per altres extensions de VS Code (incloent-hi, però no limitat a, GitHub Copilot). La manera més senzilla de començar és instal·lar les extensions Copilot i Copilot Chat des del VS Code Marketplace.", "awsCustomArnUse": "Introduïu un ARN vàlid d'Amazon Bedrock per al model que voleu utilitzar. Exemples de format:", "awsCustomArnDesc": "Assegureu-vos que la regió a l'ARN coincideix amb la regió d'AWS seleccionada anteriorment.", diff --git a/webview-ui/src/i18n/locales/de/chat.json b/webview-ui/src/i18n/locales/de/chat.json index 58bc85b60cf..e05e6ddd371 100644 --- a/webview-ui/src/i18n/locales/de/chat.json +++ b/webview-ui/src/i18n/locales/de/chat.json @@ -512,5 +512,10 @@ }, "readCommandOutput": { "title": "Roo las Befehlsausgabe" + }, + "retiredProvider": { + "title": "Anbieter wird nicht mehr unterstützt", + "message": "Leider wird dieser Anbieter nicht mehr unterstützt. Wir haben festgestellt, dass nur sehr wenige Roo-Nutzer ihn tatsächlich verwendet haben, und wir müssen den Umfang unserer Codebasis reduzieren, damit wir weiterhin schnell liefern und unserer Community in diesem Bereich gut dienen können. Es war eine wirklich schwere Entscheidung, aber sie ermöglicht uns, uns auf das zu konzentrieren, was dir am wichtigsten ist. Es ist ärgerlich, das wissen wir.", + "openSettings": "Einstellungen öffnen" } } diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 13030ca3847..fe114c9d877 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "Suchanbieter durchsuchen", "noProviderMatchFound": "Keine Anbieter gefunden", "noMatchFound": "Keine passenden Profile gefunden", + "retiredProviderMessage": "Leider wird dieser Anbieter nicht mehr unterstützt. Wir haben festgestellt, dass nur sehr wenige Roo-Nutzer ihn tatsächlich verwendet haben, und wir müssen den Umfang unserer Codebasis reduzieren, damit wir weiterhin schnell liefern und unserer Community in diesem Bereich gut dienen können. Es war eine wirklich schwere Entscheidung, aber sie ermöglicht uns, uns auf das zu konzentrieren, was dir am wichtigsten ist. Es ist ärgerlich, das wissen wir.", "vscodeLmDescription": "Die VS Code Language Model API ermöglicht das Ausführen von Modellen, die von anderen VS Code-Erweiterungen bereitgestellt werden (einschließlich, aber nicht beschränkt auf GitHub Copilot). Der einfachste Weg, um zu starten, besteht darin, die Erweiterungen Copilot und Copilot Chat aus dem VS Code Marketplace zu installieren.", "awsCustomArnUse": "Geben Sie eine gültige Amazon Bedrock ARN für das Modell ein, das Sie verwenden möchten. Formatbeispiele:", "awsCustomArnDesc": "Stellen Sie sicher, dass die Region in der ARN mit Ihrer oben ausgewählten AWS-Region übereinstimmt.", diff --git a/webview-ui/src/i18n/locales/es/chat.json b/webview-ui/src/i18n/locales/es/chat.json index 6c894642c88..0b947153d17 100644 --- a/webview-ui/src/i18n/locales/es/chat.json +++ b/webview-ui/src/i18n/locales/es/chat.json @@ -512,5 +512,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "Proveedor ya no compatible", + "message": "Lo sentimos, este proveedor ya no es compatible. Vimos que muy pocos usuarios de Roo lo usaban realmente y necesitamos reducir el alcance de nuestro código para poder seguir avanzando rápido y servir bien a nuestra comunidad en este espacio. Fue una decisión muy difícil, pero nos permite enfocarnos en lo que más te importa. Lo sabemos, es una lástima.", + "openSettings": "Abrir configuración" } } diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index 419146cbc81..676120c0556 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "Buscar proveedores", "noProviderMatchFound": "No se encontraron proveedores", "noMatchFound": "No se encontraron perfiles coincidentes", + "retiredProviderMessage": "Lo sentimos, este proveedor ya no es compatible. Vimos que muy pocos usuarios de Roo lo usaban realmente y necesitamos reducir el alcance de nuestro código para poder seguir avanzando rápido y servir bien a nuestra comunidad en este espacio. Fue una decisión muy difícil, pero nos permite enfocarnos en lo que más te importa. Lo sabemos, es una lástima.", "vscodeLmDescription": "La API del Modelo de Lenguaje de VS Code le permite ejecutar modelos proporcionados por otras extensiones de VS Code (incluido, entre otros, GitHub Copilot). La forma más sencilla de empezar es instalar las extensiones Copilot y Copilot Chat desde el VS Code Marketplace.", "awsCustomArnUse": "Ingrese un ARN de Amazon Bedrock válido para el modelo que desea utilizar. Ejemplos de formato:", "awsCustomArnDesc": "Asegúrese de que la región en el ARN coincida con la región de AWS seleccionada anteriormente.", diff --git a/webview-ui/src/i18n/locales/fr/chat.json b/webview-ui/src/i18n/locales/fr/chat.json index 598344de1d7..32c98e02ccc 100644 --- a/webview-ui/src/i18n/locales/fr/chat.json +++ b/webview-ui/src/i18n/locales/fr/chat.json @@ -512,5 +512,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "Fournisseur plus pris en charge", + "message": "Désolé, ce fournisseur n'est plus pris en charge. Nous avons constaté que très peu d'utilisateurs de Roo l'utilisaient réellement et nous devons réduire la portée de notre code pour continuer à avancer rapidement et bien servir notre communauté. C'était une décision vraiment difficile, mais elle nous permet de nous concentrer sur ce qui compte le plus pour toi. C'est rageant, on le sait.", + "openSettings": "Ouvrir les paramètres" } } diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index cecca34916d..e0bc9c7ea42 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "Rechercher des fournisseurs", "noProviderMatchFound": "Aucun fournisseur trouvé", "noMatchFound": "Aucun profil correspondant trouvé", + "retiredProviderMessage": "Désolé, ce fournisseur n'est plus pris en charge. Nous avons constaté que très peu d'utilisateurs de Roo l'utilisaient réellement et nous devons réduire la portée de notre code pour continuer à avancer rapidement et bien servir notre communauté. C'était une décision vraiment difficile, mais elle nous permet de nous concentrer sur ce qui compte le plus pour toi. C'est rageant, on le sait.", "vscodeLmDescription": "L'API du modèle de langage VS Code vous permet d'exécuter des modèles fournis par d'autres extensions VS Code (y compris, mais sans s'y limiter, GitHub Copilot). Le moyen le plus simple de commencer est d'installer les extensions Copilot et Copilot Chat depuis le VS Code Marketplace.", "awsCustomArnUse": "Entrez un ARN Amazon Bedrock valide pour le modèle que vous souhaitez utiliser. Exemples de format :", "awsCustomArnDesc": "Assurez-vous que la région dans l'ARN correspond à la région AWS sélectionnée ci-dessus.", diff --git a/webview-ui/src/i18n/locales/hi/chat.json b/webview-ui/src/i18n/locales/hi/chat.json index 74d5e4e3eef..9c5662b3876 100644 --- a/webview-ui/src/i18n/locales/hi/chat.json +++ b/webview-ui/src/i18n/locales/hi/chat.json @@ -512,5 +512,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "प्रदाता अब समर्थित नहीं है", + "message": "क्षमा करें, यह प्रदाता अब समर्थित नहीं है। हमने देखा कि बहुत कम Roo उपयोगकर्ता वास्तव में इसका उपयोग कर रहे थे और हमें अपने कोडबेस का दायरा कम करने की आवश्यकता है ताकि हम तेज़ी से काम करना और अपने समुदाय की अच्छी सेवा करना जारी रख सकें। यह वाकई एक कठिन निर्णय था, लेकिन इससे हम उस पर ध्यान केंद्रित कर सकते हैं जो तुम्हारे लिए सबसे ज़्यादा मायने रखता है। हम जानते हैं, यह बुरा लगता है।", + "openSettings": "सेटिंग्स खोलें" } } diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index de01293bd87..d9ff40f0a7b 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "प्रदाता खोजें", "noProviderMatchFound": "कोई प्रदाता नहीं मिला", "noMatchFound": "कोई मिलान प्रोफ़ाइल नहीं मिला", + "retiredProviderMessage": "क्षमा करें, यह प्रदाता अब समर्थित नहीं है। हमने देखा कि बहुत कम Roo उपयोगकर्ता वास्तव में इसका उपयोग कर रहे थे और हमें अपने कोडबेस का दायरा कम करने की आवश्यकता है ताकि हम तेज़ी से काम करना और अपने समुदाय की अच्छी सेवा करना जारी रख सकें। यह वाकई एक कठिन निर्णय था, लेकिन इससे हम उस पर ध्यान केंद्रित कर सकते हैं जो तुम्हारे लिए सबसे ज़्यादा मायने रखता है। हम जानते हैं, यह बुरा लगता है।", "vscodeLmDescription": "VS कोड भाषा मॉडल API आपको अन्य VS कोड एक्सटेंशन (जैसे GitHub Copilot) द्वारा प्रदान किए गए मॉडल चलाने की अनुमति देता है। शुरू करने का सबसे आसान तरीका VS कोड मार्केटप्लेस से Copilot और Copilot चैट एक्सटेंशन इंस्टॉल करना है।", "awsCustomArnUse": "आप जिस मॉडल का उपयोग करना चाहते हैं, उसके लिए एक वैध Amazon बेडरॉक ARN दर्ज करें। प्रारूप उदाहरण:", "awsCustomArnDesc": "सुनिश्चित करें कि ARN में क्षेत्र ऊपर चयनित AWS क्षेत्र से मेल खाता है।", diff --git a/webview-ui/src/i18n/locales/id/chat.json b/webview-ui/src/i18n/locales/id/chat.json index f814b9d4a9a..996fb07b7bf 100644 --- a/webview-ui/src/i18n/locales/id/chat.json +++ b/webview-ui/src/i18n/locales/id/chat.json @@ -518,5 +518,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "Penyedia tidak lagi didukung", + "message": "Maaf, penyedia ini tidak lagi didukung. Kami melihat sangat sedikit pengguna Roo yang benar-benar menggunakannya dan kami perlu mengurangi cakupan kode kami agar bisa terus bergerak cepat dan melayani komunitas kami dengan baik. Ini adalah keputusan yang sangat sulit, tapi ini memungkinkan kami fokus pada apa yang paling penting bagimu. Memang menyebalkan, kami tahu.", + "openSettings": "Buka Pengaturan" } } diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index 7c0a7ed8688..29e2ea6e9c0 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "Cari penyedia", "noProviderMatchFound": "Tidak ada penyedia ditemukan", "noMatchFound": "Tidak ada profil yang cocok ditemukan", + "retiredProviderMessage": "Maaf, penyedia ini tidak lagi didukung. Kami melihat sangat sedikit pengguna Roo yang benar-benar menggunakannya dan kami perlu mengurangi cakupan kode kami agar bisa terus bergerak cepat dan melayani komunitas kami dengan baik. Ini adalah keputusan yang sangat sulit, tapi ini memungkinkan kami fokus pada apa yang paling penting bagimu. Memang menyebalkan, kami tahu.", "vscodeLmDescription": " API Model Bahasa VS Code memungkinkan kamu menjalankan model yang disediakan oleh ekstensi VS Code lainnya (termasuk namun tidak terbatas pada GitHub Copilot). Cara termudah untuk memulai adalah menginstal ekstensi Copilot dan Copilot Chat dari VS Code Marketplace.", "awsCustomArnUse": "Masukkan ARN Amazon Bedrock yang valid untuk model yang ingin kamu gunakan. Contoh format:", "awsCustomArnDesc": "Pastikan region di ARN cocok dengan AWS Region yang kamu pilih di atas.", diff --git a/webview-ui/src/i18n/locales/it/chat.json b/webview-ui/src/i18n/locales/it/chat.json index eca4264df20..c660c6d285c 100644 --- a/webview-ui/src/i18n/locales/it/chat.json +++ b/webview-ui/src/i18n/locales/it/chat.json @@ -512,5 +512,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "Provider non più supportato", + "message": "Ci dispiace, questo provider non è più supportato. Abbiamo visto che pochissimi utenti di Roo lo utilizzavano effettivamente e dobbiamo ridurre la portata del nostro codice per continuare a procedere velocemente e servire bene la nostra community. È stata una decisione davvero difficile, ma ci permette di concentrarci su ciò che conta di più per te. Lo sappiamo, è una seccatura.", + "openSettings": "Apri impostazioni" } } diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index ab0d2d14220..9b62aaea733 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -309,6 +309,7 @@ "noMatchFound": "Nessun profilo corrispondente trovato", "searchProviderPlaceholder": "Cerca fornitori", "noProviderMatchFound": "Nessun fornitore trovato", + "retiredProviderMessage": "Ci dispiace, questo provider non è più supportato. Abbiamo visto che pochissimi utenti di Roo lo utilizzavano effettivamente e dobbiamo ridurre la portata del nostro codice per continuare a procedere velocemente e servire bene la nostra community. È stata una decisione davvero difficile, ma ci permette di concentrarci su ciò che conta di più per te. Lo sappiamo, è una seccatura.", "vscodeLmDescription": "L'API del Modello di Linguaggio di VS Code consente di eseguire modelli forniti da altre estensioni di VS Code (incluso, ma non limitato a, GitHub Copilot). Il modo più semplice per iniziare è installare le estensioni Copilot e Copilot Chat dal VS Code Marketplace.", "awsCustomArnUse": "Inserisci un ARN Amazon Bedrock valido per il modello che desideri utilizzare. Esempi di formato:", "awsCustomArnDesc": "Assicurati che la regione nell'ARN corrisponda alla regione AWS selezionata sopra.", diff --git a/webview-ui/src/i18n/locales/ja/chat.json b/webview-ui/src/i18n/locales/ja/chat.json index 9a0b1b2a35c..278b3dcbd67 100644 --- a/webview-ui/src/i18n/locales/ja/chat.json +++ b/webview-ui/src/i18n/locales/ja/chat.json @@ -512,5 +512,10 @@ }, "readCommandOutput": { "title": "Rooがコマンド出力を読み込みました" + }, + "retiredProvider": { + "title": "プロバイダーのサポート終了", + "message": "申し訳ございませんが、このプロバイダーはサポートを終了しました。実際に利用しているRooユーザーが非常に少なく、コードベースの範囲を縮小して、迅速な開発とコミュニティへの貢献を続ける必要がありました。本当に難しい決断でしたが、あなたにとって最も重要なことに集中するためです。残念ですが、ご理解ください。", + "openSettings": "設定を開く" } } diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 109f53afe64..ab53876d171 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "プロバイダーを検索", "noProviderMatchFound": "プロバイダーが見つかりません", "noMatchFound": "一致するプロファイルが見つかりません", + "retiredProviderMessage": "申し訳ございませんが、このプロバイダーはサポートを終了しました。実際に利用しているRooユーザーが非常に少なく、コードベースの範囲を縮小して、迅速な開発とコミュニティへの貢献を続ける必要がありました。本当に難しい決断でしたが、あなたにとって最も重要なことに集中するためです。残念ですが、ご理解ください。", "vscodeLmDescription": "VS Code言語モデルAPIを使用すると、他のVS Code拡張機能(GitHub Copilotなど)が提供するモデルを実行できます。最も簡単な方法は、VS Code MarketplaceからCopilotおよびCopilot Chat拡張機能をインストールすることです。", "awsCustomArnUse": "使用したいモデルの有効なAmazon Bedrock ARNを入力してください。形式の例:", "awsCustomArnDesc": "ARN内のリージョンが上で選択したAWSリージョンと一致していることを確認してください。", diff --git a/webview-ui/src/i18n/locales/ko/chat.json b/webview-ui/src/i18n/locales/ko/chat.json index 3b26c6e6e19..f0b5e910866 100644 --- a/webview-ui/src/i18n/locales/ko/chat.json +++ b/webview-ui/src/i18n/locales/ko/chat.json @@ -512,5 +512,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "공급자 지원 종료", + "message": "죄송합니다. 이 공급자는 더 이상 지원되지 않습니다. 실제로 사용하는 Roo 사용자가 매우 적었고, 빠르게 개발하고 커뮤니티에 잘 봉사하기 위해 코드베이스의 범위를 줄여야 했습니다. 정말 어려운 결정이었지만, 당신에게 가장 중요한 것에 집중할 수 있게 해줍니다. 불편을 드려 죄송합니다.", + "openSettings": "설정 열기" } } diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 2595528887a..8d5b33b76fb 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "공급자 검색", "noProviderMatchFound": "공급자를 찾을 수 없습니다", "noMatchFound": "일치하는 프로필이 없습니다", + "retiredProviderMessage": "죄송합니다. 이 공급자는 더 이상 지원되지 않습니다. 실제로 사용하는 Roo 사용자가 매우 적었고, 빠르게 개발하고 커뮤니티에 잘 봉사하기 위해 코드베이스의 범위를 줄여야 했습니다. 정말 어려운 결정이었지만, 당신에게 가장 중요한 것에 집중할 수 있게 해줍니다. 불편을 드려 죄송합니다.", "vscodeLmDescription": "VS Code 언어 모델 API를 사용하면 GitHub Copilot을 포함한 기타 VS Code 확장 프로그램이 제공하는 모델을 실행할 수 있습니다. 시작하려면 VS Code 마켓플레이스에서 Copilot 및 Copilot Chat 확장 프로그램을 설치하는 것이 가장 쉽습니다.", "awsCustomArnUse": "사용하려는 모델의 유효한 Amazon Bedrock ARN을 입력하세요. 형식 예시:", "awsCustomArnDesc": "ARN의 리전이 위에서 선택한 AWS 리전과 일치하는지 확인하세요.", diff --git a/webview-ui/src/i18n/locales/nl/chat.json b/webview-ui/src/i18n/locales/nl/chat.json index 241e9f22166..49d6f3db3da 100644 --- a/webview-ui/src/i18n/locales/nl/chat.json +++ b/webview-ui/src/i18n/locales/nl/chat.json @@ -512,5 +512,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "Provider wordt niet meer ondersteund", + "message": "Sorry, deze provider wordt niet meer ondersteund. We zagen dat heel weinig Roo-gebruikers het daadwerkelijk gebruikten en we moeten de omvang van onze codebase verkleinen zodat we snel kunnen blijven leveren en onze community goed kunnen blijven bedienen. Het was een heel moeilijke beslissing, maar het stelt ons in staat om ons te richten op wat het belangrijkst voor je is. Het is vervelend, dat weten we.", + "openSettings": "Instellingen openen" } } diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index da1c5861bc1..bebdb23f7b2 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "Zoek providers", "noProviderMatchFound": "Geen providers gevonden", "noMatchFound": "Geen overeenkomende profielen gevonden", + "retiredProviderMessage": "Sorry, deze provider wordt niet meer ondersteund. We zagen dat heel weinig Roo-gebruikers het daadwerkelijk gebruikten en we moeten de omvang van onze codebase verkleinen zodat we snel kunnen blijven leveren en onze community goed kunnen blijven bedienen. Het was een heel moeilijke beslissing, maar het stelt ons in staat om ons te richten op wat het belangrijkst voor je is. Het is vervelend, dat weten we.", "vscodeLmDescription": "De VS Code Language Model API stelt je in staat modellen te draaien die door andere VS Code-extensies worden geleverd (waaronder GitHub Copilot). De eenvoudigste manier om te beginnen is door de Copilot- en Copilot Chat-extensies te installeren vanuit de VS Code Marketplace.", "awsCustomArnUse": "Voer een geldige Amazon Bedrock ARN in voor het model dat je wilt gebruiken. Voorbeeldformaten:", "awsCustomArnDesc": "Zorg ervoor dat de regio in de ARN overeenkomt met je geselecteerde AWS-regio hierboven.", diff --git a/webview-ui/src/i18n/locales/pl/chat.json b/webview-ui/src/i18n/locales/pl/chat.json index ae6b5a96ac7..54f24c7550f 100644 --- a/webview-ui/src/i18n/locales/pl/chat.json +++ b/webview-ui/src/i18n/locales/pl/chat.json @@ -512,5 +512,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "Dostawca nie jest już obsługiwany", + "message": "Przepraszamy, ten dostawca nie jest już obsługiwany. Zauważyliśmy, że bardzo niewielu użytkowników Roo faktycznie z niego korzystało, a my musimy zmniejszyć zakres naszego kodu, aby móc dalej szybko dostarczać i dobrze służyć naszej społeczności. To była naprawdę trudna decyzja, ale pozwala nam skupić się na tym, co jest dla ciebie najważniejsze. Wiemy, że to frustrujące.", + "openSettings": "Otwórz ustawienia" } } diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index c587d582848..ac1c4c8bf54 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "Szukaj dostawców", "noProviderMatchFound": "Nie znaleziono dostawców", "noMatchFound": "Nie znaleziono pasujących profili", + "retiredProviderMessage": "Przepraszamy, ten dostawca nie jest już obsługiwany. Zauważyliśmy, że bardzo niewielu użytkowników Roo faktycznie z niego korzystało, a my musimy zmniejszyć zakres naszego kodu, aby móc dalej szybko dostarczać i dobrze służyć naszej społeczności. To była naprawdę trudna decyzja, ale pozwala nam skupić się na tym, co jest dla ciebie najważniejsze. Wiemy, że to frustrujące.", "vscodeLmDescription": "Interfejs API modelu językowego VS Code umożliwia uruchamianie modeli dostarczanych przez inne rozszerzenia VS Code (w tym, ale nie tylko, GitHub Copilot). Najłatwiejszym sposobem na rozpoczęcie jest zainstalowanie rozszerzeń Copilot i Copilot Chat z VS Code Marketplace.", "awsCustomArnUse": "Wprowadź prawidłowy Amazon Bedrock ARN dla modelu, którego chcesz użyć. Przykłady formatu:", "awsCustomArnDesc": "Upewnij się, że region w ARN odpowiada wybranemu powyżej regionowi AWS.", diff --git a/webview-ui/src/i18n/locales/pt-BR/chat.json b/webview-ui/src/i18n/locales/pt-BR/chat.json index 2ca72ebd970..cf06e0efc50 100644 --- a/webview-ui/src/i18n/locales/pt-BR/chat.json +++ b/webview-ui/src/i18n/locales/pt-BR/chat.json @@ -512,5 +512,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "Provedor não é mais suportado", + "message": "Desculpe, este provedor não é mais suportado. Percebemos que muito poucos usuários do Roo realmente o utilizavam e precisamos reduzir o escopo do nosso código para continuar entregando rápido e servindo bem nossa comunidade. Foi uma decisão muito difícil, mas nos permite focar no que mais importa para você. Sabemos que é chato.", + "openSettings": "Abrir configurações" } } diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index ee848b55c70..81829c15d0d 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "Pesquisar provedores", "noProviderMatchFound": "Nenhum provedor encontrado", "noMatchFound": "Nenhum perfil correspondente encontrado", + "retiredProviderMessage": "Desculpe, este provedor não é mais suportado. Percebemos que muito poucos usuários do Roo realmente o utilizavam e precisamos reduzir o escopo do nosso código para continuar entregando rápido e servindo bem nossa comunidade. Foi uma decisão muito difícil, mas nos permite focar no que mais importa para você. Sabemos que é chato.", "vscodeLmDescription": "A API do Modelo de Linguagem do VS Code permite executar modelos fornecidos por outras extensões do VS Code (incluindo, mas não se limitando, ao GitHub Copilot). A maneira mais fácil de começar é instalar as extensões Copilot e Copilot Chat no VS Code Marketplace.", "awsCustomArnUse": "Insira um ARN Amazon Bedrock válido para o modelo que deseja usar. Exemplos de formato:", "awsCustomArnDesc": "Certifique-se de que a região no ARN corresponde à região AWS selecionada acima.", diff --git a/webview-ui/src/i18n/locales/ru/chat.json b/webview-ui/src/i18n/locales/ru/chat.json index 347bf1be81e..e562f35c621 100644 --- a/webview-ui/src/i18n/locales/ru/chat.json +++ b/webview-ui/src/i18n/locales/ru/chat.json @@ -513,5 +513,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "Провайдер больше не поддерживается", + "message": "К сожалению, этот провайдер больше не поддерживается. Мы заметили, что очень немногие пользователи Roo действительно им пользовались, и нам нужно сократить объём кодовой базы, чтобы продолжать быстро развиваться и хорошо служить нашему сообществу. Это было очень непростое решение, но оно позволяет нам сосредоточиться на том, что для тебя важнее всего. Мы знаем, что это неприятно.", + "openSettings": "Открыть настройки" } } diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index ee0638d00d3..0ae16f3cd5f 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "Поиск провайдеров", "noProviderMatchFound": "Провайдеры не найдены", "noMatchFound": "Совпадений не найдено", + "retiredProviderMessage": "К сожалению, этот провайдер больше не поддерживается. Мы заметили, что очень немногие пользователи Roo действительно им пользовались, и нам нужно сократить объём кодовой базы, чтобы продолжать быстро развиваться и хорошо служить нашему сообществу. Это было очень непростое решение, но оно позволяет нам сосредоточиться на том, что для тебя важнее всего. Мы знаем, что это неприятно.", "vscodeLmDescription": "API языковой модели VS Code позволяет запускать модели, предоставляемые другими расширениями VS Code (включая, но не ограничиваясь GitHub Copilot). Для начала установите расширения Copilot и Copilot Chat из VS Code Marketplace.", "awsCustomArnUse": "Введите действительный Amazon Bedrock ARN для используемой модели. Примеры формата:", "awsCustomArnDesc": "Убедитесь, что регион в ARN совпадает с выбранным выше регионом AWS.", diff --git a/webview-ui/src/i18n/locales/tr/chat.json b/webview-ui/src/i18n/locales/tr/chat.json index 2301541cdff..e7f34dc87a4 100644 --- a/webview-ui/src/i18n/locales/tr/chat.json +++ b/webview-ui/src/i18n/locales/tr/chat.json @@ -513,5 +513,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "Sağlayıcı artık desteklenmiyor", + "message": "Üzgünüz, bu sağlayıcı artık desteklenmiyor. Çok az Roo kullanıcısının bunu gerçekten kullandığını gördük ve hızlı gelişmeye devam edip topluluğumuza iyi hizmet verebilmek için kod tabanımızın kapsamını daraltmamız gerekiyor. Gerçekten zor bir karardı ama senin için en önemli olana odaklanmamızı sağlıyor. Bunun can sıkıcı olduğunu biliyoruz.", + "openSettings": "Ayarları aç" } } diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 7016ad5f702..375a198a9c9 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "Sağlayıcıları ara", "noProviderMatchFound": "Eşleşen sağlayıcı bulunamadı", "noMatchFound": "Eşleşen profil bulunamadı", + "retiredProviderMessage": "Üzgünüz, bu sağlayıcı artık desteklenmiyor. Çok az Roo kullanıcısının bunu gerçekten kullandığını gördük ve hızlı gelişmeye devam edip topluluğumuza iyi hizmet verebilmek için kod tabanımızın kapsamını daraltmamız gerekiyor. Gerçekten zor bir karardı ama senin için en önemli olana odaklanmamızı sağlıyor. Bunun can sıkıcı olduğunu biliyoruz.", "vscodeLmDescription": "VS Code Dil Modeli API'si, diğer VS Code uzantıları tarafından sağlanan modelleri çalıştırmanıza olanak tanır (GitHub Copilot dahil ancak bunlarla sınırlı değildir). Başlamanın en kolay yolu, VS Code Marketplace'ten Copilot ve Copilot Chat uzantılarını yüklemektir.", "awsCustomArnUse": "Kullanmak istediğiniz model için geçerli bir Amazon Bedrock ARN'si girin. Format örnekleri:", "awsCustomArnDesc": "ARN içindeki bölgenin yukarıda seçilen AWS Bölgesiyle eşleştiğinden emin olun.", diff --git a/webview-ui/src/i18n/locales/vi/chat.json b/webview-ui/src/i18n/locales/vi/chat.json index 33b13d9b269..91bfa81bfcc 100644 --- a/webview-ui/src/i18n/locales/vi/chat.json +++ b/webview-ui/src/i18n/locales/vi/chat.json @@ -513,5 +513,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "Nhà cung cấp không còn được hỗ trợ", + "message": "Xin lỗi, nhà cung cấp này không còn được hỗ trợ. Chúng tôi nhận thấy rất ít người dùng Roo thực sự sử dụng nó và chúng tôi cần thu hẹp phạm vi mã nguồn để tiếp tục phát triển nhanh và phục vụ tốt cộng đồng. Đây là một quyết định thực sự khó khăn nhưng nó cho phép chúng tôi tập trung vào điều quan trọng nhất với bạn. Chúng tôi biết điều này thật phiền, xin thông cảm.", + "openSettings": "Mở cài đặt" } } diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index f0d986c86c9..93feec7a792 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "Tìm kiếm nhà cung cấp", "noProviderMatchFound": "Không tìm thấy nhà cung cấp", "noMatchFound": "Không tìm thấy hồ sơ phù hợp", + "retiredProviderMessage": "Xin lỗi, nhà cung cấp này không còn được hỗ trợ. Chúng tôi nhận thấy rất ít người dùng Roo thực sự sử dụng nó và chúng tôi cần thu hẹp phạm vi mã nguồn để tiếp tục phát triển nhanh và phục vụ tốt cộng đồng. Đây là một quyết định thực sự khó khăn nhưng nó cho phép chúng tôi tập trung vào điều quan trọng nhất với bạn. Chúng tôi biết điều này thật phiền, xin thông cảm.", "vscodeLmDescription": "API Mô hình Ngôn ngữ VS Code cho phép bạn chạy các mô hình được cung cấp bởi các tiện ích mở rộng khác của VS Code (bao gồm nhưng không giới hạn ở GitHub Copilot). Cách dễ nhất để bắt đầu là cài đặt các tiện ích mở rộng Copilot và Copilot Chat từ VS Code Marketplace.", "awsCustomArnUse": "Nhập một ARN Amazon Bedrock hợp lệ cho mô hình bạn muốn sử dụng. Ví dụ về định dạng:", "awsCustomArnDesc": "Đảm bảo rằng vùng trong ARN khớp với vùng AWS đã chọn ở trên.", diff --git a/webview-ui/src/i18n/locales/zh-CN/chat.json b/webview-ui/src/i18n/locales/zh-CN/chat.json index 7a94bfb48d7..60a5d7fdfef 100644 --- a/webview-ui/src/i18n/locales/zh-CN/chat.json +++ b/webview-ui/src/i18n/locales/zh-CN/chat.json @@ -513,5 +513,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "供应商不再受支持", + "message": "抱歉,此供应商已不再受支持。我们发现实际使用它的 Roo 用户非常少,我们需要缩减代码库的范围,以便继续快速交付并更好地服务社区。这是一个非常艰难的决定,但它能让我们专注于对你最重要的事情。我们知道这很遗憾。", + "openSettings": "打开设置" } } diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index b35b4314d77..b532619ac9b 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -309,6 +309,7 @@ "searchProviderPlaceholder": "搜索提供商", "noProviderMatchFound": "未找到提供商", "noMatchFound": "未找到匹配的配置文件", + "retiredProviderMessage": "抱歉,此供应商已不再受支持。我们发现实际使用它的 Roo 用户非常少,我们需要缩减代码库的范围,以便继续快速交付并更好地服务社区。这是一个非常艰难的决定,但它能让我们专注于对你最重要的事情。我们知道这很遗憾。", "vscodeLmDescription": "VS Code 语言模型 API 允许您运行由其他 VS Code 扩展(包括但不限于 GitHub Copilot)提供的模型。最简单的方法是从 VS Code 市场安装 Copilot 和 Copilot Chat 扩展。", "awsCustomArnUse": "请输入有效的 Amazon Bedrock ARN(Amazon资源名称),格式示例:", "awsCustomArnDesc": "请确保ARN中的区域与上方选择的AWS区域一致。", diff --git a/webview-ui/src/i18n/locales/zh-TW/chat.json b/webview-ui/src/i18n/locales/zh-TW/chat.json index 926cb105fb4..7084d09dabf 100644 --- a/webview-ui/src/i18n/locales/zh-TW/chat.json +++ b/webview-ui/src/i18n/locales/zh-TW/chat.json @@ -503,5 +503,10 @@ }, "readCommandOutput": { "title": "Roo read command output" + }, + "retiredProvider": { + "title": "供應商不再受支援", + "message": "抱歉,此供應商已不再受支援。我們發現實際使用它的 Roo 使用者非常少,我們需要縮減程式碼庫的範圍,以便繼續快速交付並更好地服務社群。這是一個非常艱難的決定,但它能讓我們專注於對你最重要的事情。我們知道這很遺憾。", + "openSettings": "開啟設定" } } diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 1ba02bbb425..28791880217 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -319,6 +319,7 @@ "searchProviderPlaceholder": "搜尋供應商", "noProviderMatchFound": "找不到供應商", "noMatchFound": "找不到符合的設定檔", + "retiredProviderMessage": "抱歉,此供應商已不再受支援。我們發現實際使用它的 Roo 使用者非常少,我們需要縮減程式碼庫的範圍,以便繼續快速交付並更好地服務社群。這是一個非常艱難的決定,但它能讓我們專注於對你最重要的事情。我們知道這很遺憾。", "vscodeLmDescription": "VS Code 語言模型 API 可以讓您使用其他擴充功能(如 GitHub Copilot)提供的模型。最簡單的方式是從 VS Code Marketplace 安裝 Copilot 和 Copilot Chat 擴充套件。", "awsCustomArnUse": "輸入您要使用的模型的有效 Amazon Bedrock ARN。格式範例:", "awsCustomArnDesc": "確保 ARN 中的區域與您上面選擇的 AWS 區域相符。", From 2a1be4fc602e5a72f3953bd8bd32d609ea7ee09c Mon Sep 17 00:00:00 2001 From: Hannes Rudolph Date: Sun, 8 Feb 2026 00:07:54 -0700 Subject: [PATCH 9/9] test: update ApiOptions retired-provider test to expect i18n key --- .../src/components/settings/__tests__/ApiOptions.spec.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webview-ui/src/components/settings/__tests__/ApiOptions.spec.tsx b/webview-ui/src/components/settings/__tests__/ApiOptions.spec.tsx index f862087dd06..469dcd914bf 100644 --- a/webview-ui/src/components/settings/__tests__/ApiOptions.spec.tsx +++ b/webview-ui/src/components/settings/__tests__/ApiOptions.spec.tsx @@ -671,7 +671,7 @@ describe("ApiOptions", () => { }) expect(screen.getByTestId("retired-provider-message")).toHaveTextContent( - "Sorry, this provider is no longer supported. We saw very few Roo users actually using it and we need to reduce the surface area of our codebase so we can keep shipping fast and serving our community well in this space. It was a really hard decision but it lets us focus on what matters most to you. It sucks, we know.", + "settings:providers.retiredProviderMessage", ) expect(screen.queryByTestId("litellm-provider")).not.toBeInTheDocument() })