From bc153ea2ddac9a1457bd94e8797f0049e3c14e52 Mon Sep 17 00:00:00 2001 From: Towoadeyemi1 <38192891+Towoadeyemi1@users.noreply.github.com> Date: Thu, 20 Feb 2025 18:05:51 -0700 Subject: [PATCH 1/2] Add Ollama AI integration with local model support - Implemented Ollama proxy routes and client-side model detection - Added support for generating text using local Ollama models - Created new Ollama-specific routes and composable functions - Enhanced AI interaction handling to support Ollama provider - Added Ollama status detection and model availability checks --- config/app.js | 25 +++ config/handleAiInteractions.js | 290 ++++++++++++++++++++------------ public/App.js | 13 ++ public/components/AgentCard.js | 64 ++++++- public/composables/useModels.js | 117 +++++++++++-- server/routes/ollama.js | 60 +++++++ 6 files changed, 439 insertions(+), 130 deletions(-) create mode 100644 server/routes/ollama.js diff --git a/config/app.js b/config/app.js index 351891b..4c36029 100644 --- a/config/app.js +++ b/config/app.js @@ -41,8 +41,33 @@ app.use((req, res, next) => { next(); }); +// Add Ollama proxy route +app.post('/api/ollama/:model', async (req, res) => { + try { + const response = await fetch(`http://localhost:11434/api/generate`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + model: req.params.model, + prompt: req.body.prompt, + stream: false + }), + }); + + const data = await response.json(); + res.json({ text: data.response }); + } catch (error) { + console.error('Ollama API error:', error); + res.status(500).json({ error: 'Failed to communicate with Ollama' }); + } +}); +const ollamaRoutes = require('../server/routes/ollama'); +// Add Ollama routes +app.use('/api/ollama', ollamaRoutes); //Export the app for use on the index.js page module.exports = { app }; diff --git a/config/handleAiInteractions.js b/config/handleAiInteractions.js index e3ac024..f3876d4 100644 --- a/config/handleAiInteractions.js +++ b/config/handleAiInteractions.js @@ -8,36 +8,76 @@ const { Mistral } = require("@mistralai/mistralai"); const { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } = require("@google/generative-ai"); // Helper function to create provider-specific clients -const createClient = (provider, credentials) => { - const envKey = process.env[`${provider.toUpperCase()}_API_KEY`]; - const apiKey = credentials?.apiKey || envKey; - - if (!apiKey) { - throw new Error(`No API key available for ${provider}`); - } +const createClient = async (provider, modelConfig) => { + // Normalize provider to lowercase + const normalizedProvider = provider.toLowerCase(); + + // Define which providers need credentials + const requiresCredentials = { + 'openai': true, + 'anthropic': true, + 'azureai': true, + 'mistral': true, + 'groq': true, + 'gemini': true, + 'ollama': false + }; + + const envKey = process.env[`${provider.toUpperCase()}_API_KEY`]; + const credentials = modelConfig.apiKey || envKey; + + // Check credentials only for providers that require them + if (requiresCredentials[provider.toLowerCase()] && !credentials) { + throw new Error(`No API key available for ${provider}`); + } // console.log('LLM Request for ', provider) switch (provider.toLowerCase()) { case 'openai': - return new OpenAI({ apiKey }); + return new OpenAI({ apiKey: credentials }); case 'anthropic': - return new Anthropic({ apiKey }); + return new Anthropic({ apiKey: credentials }); case 'azureai': const endpoint = credentials?.apiEndpoint || process.env.AZUREAI_ENDPOINT; if (!endpoint) { throw new Error('AzureAI requires both an API key and endpoint. No endpoint was provided.'); } - if (!apiKey) { + if (!credentials) { throw new Error('AzureAI requires both an API key and endpoint. No API key was provided.'); } - return new OpenAIClient(endpoint, new AzureKeyCredential(apiKey)); + return new OpenAIClient(endpoint, new AzureKeyCredential(credentials)); case 'mistral': - return new Mistral({ apiKey }); + return new Mistral({ apiKey: credentials }); case 'groq': - return new Groq({ apiKey }); + return new Groq({ apiKey: credentials }); case 'gemini': - return new GoogleGenerativeAI(apiKey); + return new GoogleGenerativeAI(credentials); + case 'ollama': + console.log('Creating Ollama client with model:', modelConfig.model); + return { + provider: 'ollama', + model: modelConfig.model, + completions: { + create: async (config) => { + const response = await fetch('http://localhost:11434/api/generate', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + model: modelConfig.model, + prompt: config.messages.map(m => `${m.role}: ${m.content}`).join('\n'), + stream: true + }) + }); + + if (!response.ok) { + throw new Error('Ollama generation failed'); + } + + return response.body; + } + } + }; default: throw new Error(`Unsupported provider: ${provider}`); } @@ -56,7 +96,7 @@ const validateMessages = (messages) => { const handlePrompt = async (promptConfig, sendToClient) => { const { - model: modelConfig, // Now expects the full model object + model: modelConfig, uuid, session, messageHistory, @@ -66,42 +106,26 @@ const handlePrompt = async (promptConfig, sendToClient) => { } = promptConfig; try { - // Create messages array if not provided in history const messages = messageHistory.length ? messageHistory : [ { role: "system", content: systemPrompt }, { role: "user", content: userPrompt }, ]; - // Validate message format - if (!validateMessages(messages)) { - throw new Error('Invalid message format in conversation history'); - } - - // Create provider-specific client - const client = createClient(modelConfig.provider, { - apiKey: modelConfig.apiKey, - apiEndpoint: modelConfig.apiEndpoint, - }); + const client = await createClient(modelConfig.provider, modelConfig); - //Create the prompt object to pass forward to the function - let promptPayload = { + let promptPayload = { model: modelConfig.model, messages, temperature: Math.max(0, Math.min(1, parseFloat(temperature) || 0.5)), stream: true, - } - - //Handle model specific limitations - if(modelConfig.model == 'o3-mini-2025-01-31') delete promptPayload.temperature; + }; - // Handle provider-specific prompts const responseStream = await handleProviderPrompt( client, modelConfig.provider, promptPayload ); - // Process the response stream await handleProviderResponse( responseStream, modelConfig.provider, @@ -111,9 +135,9 @@ const handlePrompt = async (promptConfig, sendToClient) => { ); } catch (error) { - console.error("Prompt error:", error); sendToClient(uuid, session, "ERROR", JSON.stringify({ - message: error.message || "An error occurred while processing the prompt" + message: error.message || "An error occurred while processing the prompt", + details: error.stack })); } }; @@ -145,6 +169,13 @@ const handleProviderPrompt = async (client, provider, config) => { case 'gemini': return handleGeminiPrompt(client, config); + case 'ollama': + if (!client?.completions?.create) { + console.error('Invalid client:', client); + throw new Error('Invalid Ollama client configuration'); + } + return client.completions.create(config); + default: throw new Error(`Unsupported provider: ${provider}`); } @@ -225,95 +256,130 @@ const handleGeminiPrompt = async (client, config) => { // Handle provider responses const handleProviderResponse = async (responseStream, provider, uuid, session, sendToClient) => { - // Normalize provider name to lowercase - provider = provider.toLowerCase(); + provider = provider.toLowerCase(); + + if (provider === 'ollama') { + try { + const reader = responseStream.getReader(); + const decoder = new TextDecoder(); + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + const chunk = decoder.decode(value); + const lines = chunk.split('\n').filter(line => line.trim()); + + for (const line of lines) { + try { + const response = JSON.parse(line); + if (response.response) { + sendToClient(uuid, session, "message", response.response); + } + } catch (e) { + // Silent catch for parsing errors + } + } + } + + sendToClient(uuid, session, "EOM", null); + + } catch (error) { + sendToClient(uuid, session, "ERROR", JSON.stringify({ + message: "Error processing Ollama stream", + error: error.message + })); + } + return; + } - // Handle Gemini separately - if (provider === "gemini") { - for await (const chunk of responseStream.stream) { - sendToClient(uuid, session, "message", chunk.text()); + // Handle Gemini separately + if (provider === "gemini") { + for await (const chunk of responseStream.stream) { + sendToClient(uuid, session, "message", chunk.text()); + } + sendToClient(uuid, session, "EOM", null); + return; } - sendToClient(uuid, session, "EOM", null); - return; - } - // Handle Azure separately - if (provider === "azureai") { - const stream = Readable.from(responseStream); - handleAzureStream(stream, uuid, session, sendToClient); - return; - } + // Handle Azure separately + if (provider === "azureai") { + const stream = Readable.from(responseStream); + handleAzureStream(stream, uuid, session, sendToClient); + return; + } - // Handle other providers - let messageEnded = false; - for await (const part of responseStream) { - try { - let content = null; - - switch (provider) { - case "openai": - content = part?.choices?.[0]?.delta?.content; - messageEnded = part?.choices?.[0]?.finish_reason === "stop"; - break; - case "anthropic": - if (part.type === "message_stop") { - messageEnded = true; - } else { - content = part?.content_block?.text || part?.delta?.text || ""; - } - break; - case "mistral": - content = part?.data?.choices?.[0]?.delta?.content; - messageEnded = part?.data?.choices?.[0]?.finishReason === "stop"; - break; - case "groq": - content = part?.choices?.[0]?.delta?.content; - messageEnded = part?.choices?.[0]?.finish_reason === "stop"; - break; - } + // Handle other providers + let messageEnded = false; + for await (const part of responseStream) { + try { + let content = null; + + switch (provider) { + case "openai": + content = part?.choices?.[0]?.delta?.content; + messageEnded = part?.choices?.[0]?.finish_reason === "stop"; + break; + case "anthropic": + if (part.type === "message_stop") { + messageEnded = true; + } else { + content = part?.content_block?.text || part?.delta?.text || ""; + } + break; + case "mistral": + content = part?.data?.choices?.[0]?.delta?.content; + messageEnded = part?.data?.choices?.[0]?.finishReason === "stop"; + break; + case "groq": + content = part?.choices?.[0]?.delta?.content; + messageEnded = part?.choices?.[0]?.finish_reason === "stop"; + break; + } + + if (content) { + sendToClient(uuid, session, "message", content); + } + + // Send EOM if we've reached the end of the message + if (messageEnded) { + sendToClient(uuid, session, "EOM", null); + } + } catch (error) { + console.error(`Error processing ${provider} stream message:`, error); + sendToClient(uuid, session, "ERROR", JSON.stringify({ + message: "Error processing stream message", + error: error.message, + provider: provider + })); + } + } - if (content) { - sendToClient(uuid, session, "message", content); - } - - // Send EOM if we've reached the end of the message - if (messageEnded) { + // Send final EOM if not already sent + if (!messageEnded) { sendToClient(uuid, session, "EOM", null); - } - } catch (error) { - console.error(`Error processing ${provider} stream message:`, error); - sendToClient(uuid, session, "ERROR", JSON.stringify({ - message: "Error processing stream message", - error: error.message, - provider: provider - })); } - } - - // Send final EOM if not already sent - if (!messageEnded) { - sendToClient(uuid, session, "EOM", null); - } }; + // Handle AzureAI specific stream const handleAzureStream = (stream, uuid, session, sendToClient) => { - stream.on("data", (event) => { - event.choices.forEach((choice) => { - if (choice.delta?.content !== undefined) { - sendToClient(uuid, session, "message", choice.delta.content); - } + stream.on("data", (event) => { + event.choices.forEach((choice) => { + if (choice.delta?.content !== undefined) { + sendToClient(uuid, session, "message", choice.delta.content); + } + }); }); - }); - stream.on("end", () => sendToClient(uuid, session, "EOM", null)); - stream.on("error", (error) => { - sendToClient(uuid, session, "ERROR", JSON.stringify({ - message: "Stream error.", - error: error.message - })); - }); + stream.on("end", () => sendToClient(uuid, session, "EOM", null)); + stream.on("error", (error) => { + sendToClient(uuid, session, "ERROR", JSON.stringify({ + message: "Stream error.", + error: error.message + })); + }); }; module.exports = { - handlePrompt + handlePrompt }; \ No newline at end of file diff --git a/public/App.js b/public/App.js index 0900b7a..7f52989 100644 --- a/public/App.js +++ b/public/App.js @@ -111,9 +111,22 @@ export default { fileInput.value.click(); } + // Add Ollama model detection + const detectOllamaModels = async () => { + try { + const response = await fetch('http://localhost:11434/api/tags'); + const data = await response.json(); + return data.models || []; + } catch (error) { + console.warn('Ollama not detected locally:', error); + return []; + } + }; + Vue.onMounted(async ()=>{ await getConfigs(); await fetchServerModels(); + await detectOllamaModels(); // Add Ollama model detection await socketIoConnection(); }); diff --git a/public/components/AgentCard.js b/public/components/AgentCard.js index 25f07a0..d3913e0 100644 --- a/public/components/AgentCard.js +++ b/public/components/AgentCard.js @@ -189,6 +189,12 @@ export default { + + +
+ + {{ ollamaStatusDisplay.text }} +
@@ -210,7 +216,7 @@ export default { const triggerPending = Vue.ref(false); // Composables - const { allModels: availableModels } = useModels(); + const { allModels: availableModels, isOllamaAvailable, ollamaStatus } = useModels(); const { wsUuid, sessions, @@ -294,6 +300,19 @@ export default { })) ); + // Add visual indicator for Ollama status + const ollamaStatusDisplay = Vue.computed(() => { + switch (ollamaStatus.value) { + case 'checking': + return { icon: 'pi-sync pi-spin', text: 'Checking Ollama...' }; + case 'available': + return { icon: 'pi-check-circle', text: 'Ollama Available' }; + case 'unavailable': + return { icon: 'pi-times-circle', text: 'Ollama Unavailable' }; + default: + return { icon: 'pi-question-circle', text: 'Unknown Status' }; + } + }); // Card-specific functions const handleCardUpdate = (data) => { @@ -662,6 +681,45 @@ export default { unregisterSession(websocketId.value); }); + // Handle model selection including Ollama models + const handleModelSelect = (model) => { + if (model.provider === 'ollama' && !isOllamaAvailable.value) { + alert('Ollama is not available. Please ensure it is running locally.'); + return; + } + // ... existing model selection logic ... + }; + + const generateResponse = async (prompt, systemPrompt) => { + try { + if (localCardData.value.data.model?.provider === 'ollama') { + // Special handling for Ollama models + const response = await fetch('/api/ollama/generate', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + model: localCardData.value.data.model.model, + prompt: prompt, + systemPrompt: systemPrompt + }) + }); + + if (!response.ok) { + throw new Error('Ollama generation failed'); + } + + const data = await response.json(); + return data.text; + } + // ... existing code for other providers ... + } catch (error) { + console.error('Generation error:', error); + return `Error: ${error.message}`; + } + }; + return { // Core setup localCardData, @@ -675,12 +733,16 @@ export default { availableModels, outputSocket, sessionStatus, + ollamaStatus, + ollamaStatusDisplay, // Card functions handlePromptChange, handleSocketUpdate, handleHtmlUpdate, triggerAgent, + handleModelSelect, + generateResponse }; }, }; \ No newline at end of file diff --git a/public/composables/useModels.js b/public/composables/useModels.js index 00b4e43..91a480f 100644 --- a/public/composables/useModels.js +++ b/public/composables/useModels.js @@ -4,7 +4,10 @@ const modelRegistry = Vue.ref(new Map()); // Canvas model cards registry const serverModels = Vue.ref([]); // Initialize as empty array const lastModelConfig = Vue.ref(null); -export const useModels = () => { +export function useModels() { + const availableModels = Vue.ref([]); + const isLoading = Vue.ref(false); + // Helper Functions const isValidField = (field) => field && typeof field === 'string' && field.trim().length > 0; @@ -31,17 +34,28 @@ export const useModels = () => { // Server Model Functions const fetchServerModels = async () => { try { - const response = await axios.get("/api/models"); - if (response.data?.payload && Array.isArray(response.data.payload)) { - serverModels.value = response.data.payload; - } else { - console.warn("Invalid server models response format", response.data); - serverModels.value = []; + isLoading.value = true; + // Fetch Ollama models + const ollamaResponse = await fetch('/api/ollama/models'); + if (ollamaResponse.ok) { + const ollamaData = await ollamaResponse.json(); + const ollamaModels = ollamaData.models?.map(model => ({ + displayName: `Ollama: ${model.name}`, // Add displayName for dropdown + name: { en: `Ollama: ${model.name}`, fr: `Ollama: ${model.name}` }, + model: model.name, + provider: 'ollama', + local: true, + apiKey: 'local' // Add required field for validation + })) || []; + + // Update both refs + serverModels.value = [...serverModels.value, ...ollamaModels]; + availableModels.value = [...availableModels.value, ...ollamaModels]; } - console.log("Loaded the following models", serverModels.value); } catch (error) { - console.error("Error fetching models:", error); - serverModels.value = []; // Ensure it's always an array + console.error('Error fetching models:', error); + } finally { + isLoading.value = false; } }; @@ -86,11 +100,8 @@ export const useModels = () => { // Create a Map to track unique models by model ID const uniqueModels = new Map(); - // Ensure serverModels.value is an array before attempting to iterate - const currentServerModels = Array.isArray(serverModels.value) ? serverModels.value : []; - // Add server models first (these will be overridden by canvas models if they exist) - currentServerModels.forEach(model => { + serverModels.value.forEach(model => { if (model && model.model) { // Add validation uniqueModels.set(model.model, model); } @@ -103,10 +114,75 @@ export const useModels = () => { } }); + // Add Ollama models + availableModels.value.forEach(model => { + if (model && model.model) { + uniqueModels.set(model.model, model); + } + }); + return Array.from(uniqueModels.values()); }); - + const localOllamaModels = Vue.ref([]); + const isOllamaAvailable = Vue.ref(false); + const ollamaStatus = Vue.ref('checking'); // 'checking', 'available', 'unavailable' + + const detectOllamaModels = async () => { + try { + ollamaStatus.value = 'checking'; + const response = await fetch('http://localhost:11434/api/tags'); + if (!response.ok) throw new Error('Ollama API returned an error'); + + const data = await response.json(); + localOllamaModels.value = data.models?.map(model => ({ + name: { en: `Ollama: ${model.name}`, fr: `Ollama: ${model.name}` }, + model: model.name, + provider: 'ollama', + local: true + })) || []; + + isOllamaAvailable.value = true; + ollamaStatus.value = 'available'; + + // Merge with other available models + availableModels.value = [...availableModels.value, ...localOllamaModels.value]; + + } catch (error) { + console.warn('Ollama detection failed:', error); + isOllamaAvailable.value = false; + ollamaStatus.value = 'unavailable'; + } + }; + + const generateWithOllama = async (model, prompt, systemPrompt = '') => { + try { + const fullPrompt = systemPrompt ? `${systemPrompt}\n\n${prompt}` : prompt; + + const response = await fetch('/api/ollama/generate', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + model, + prompt: fullPrompt, + stream: false + }) + }); + + if (!response.ok) { + throw new Error(`Ollama API error: ${response.statusText}`); + } + + const data = await response.json(); + return data.text; + + } catch (error) { + console.error('Ollama generation error:', error); + throw new Error(`Failed to generate with Ollama: ${error.message}`); + } + }; return { // Core functions @@ -119,6 +195,13 @@ export const useModels = () => { // Raw refs (in case needed) serverModels, - modelRegistry + modelRegistry, + availableModels, + localOllamaModels, + isOllamaAvailable, + ollamaStatus, + detectOllamaModels, + generateWithOllama, + isLoading }; -}; \ No newline at end of file +} \ No newline at end of file diff --git a/server/routes/ollama.js b/server/routes/ollama.js new file mode 100644 index 0000000..b9b1954 --- /dev/null +++ b/server/routes/ollama.js @@ -0,0 +1,60 @@ +const express = require('express'); +const fetch = require('node-fetch'); + +const router = express.Router(); +const OLLAMA_BASE_URL = 'http://localhost:11434'; + +// Middleware to check Ollama availability +const checkOllamaStatus = async (_, res, next) => { + try { + const response = await fetch(`${OLLAMA_BASE_URL}/api/tags`); + if (!response.ok) { + throw new Error('Ollama service unavailable'); + } + next(); + } catch (error) { + res.status(503).json({ + error: 'Ollama service unavailable', + details: 'Please ensure Ollama is running locally' + }); + } +}; + +// Get available models +router.get('/models', async (_, res) => { + try { + const response = await fetch(`${OLLAMA_BASE_URL}/api/tags`); + if (!response.ok) { + throw new Error('Failed to fetch from Ollama'); + } + const data = await response.json(); + res.json({ models: data.models }); // Ensure we're sending the models array + } catch (error) { + console.error('Ollama error:', error); + res.status(500).json({ error: error.message, models: [] }); + } +}); + +// Generate text +router.post('/generate', async (req, res) => { + try { + const { model, prompt } = req.body; + const response = await fetch(`${OLLAMA_BASE_URL}/api/generate`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ model, prompt }) + }); + + if (!response.ok) { + throw new Error('Ollama generation failed'); + } + + const data = await response.json(); + res.json({ text: data.response }); + } catch (error) { + console.error('Ollama generation error:', error); + res.status(500).json({ error: error.message }); + } +}); + +module.exports = router; \ No newline at end of file From 1da682aaf1d00b0b20d70e6a51c86171e0421cf8 Mon Sep 17 00:00:00 2001 From: Towoadeyemi1 <38192891+Towoadeyemi1@users.noreply.github.com> Date: Sun, 23 Feb 2025 18:04:20 -0700 Subject: [PATCH 2/2] Add Ollama AI integration with local model support Implemented Ollama proxy routes and client-side model detection Added support for generating text using local Ollama models Created new Ollama-specific routes and composable functions Enhanced AI interaction handling to support Ollama provider Added Ollama status detection and model availability checks --- controllers/_config/models.js | 5 +--- public/composables/useModels.js | 50 ++++++++++++++++++++------------- 2 files changed, 32 insertions(+), 23 deletions(-) diff --git a/controllers/_config/models.js b/controllers/_config/models.js index 76143b7..e6098aa 100644 --- a/controllers/_config/models.js +++ b/controllers/_config/models.js @@ -71,10 +71,7 @@ exports.getModels = async function (req, res, next) { ]; // Send the counts as JSON response - res.status(200).json({ - message: "Here are the server side model and providers", - payload: payload, - }); + return res.status(200).json(payload); } catch (error) { next(ApiError.internal("An error occurred while retrieving stats")); } diff --git a/public/composables/useModels.js b/public/composables/useModels.js index 91a480f..2921304 100644 --- a/public/composables/useModels.js +++ b/public/composables/useModels.js @@ -35,23 +35,37 @@ export function useModels() { const fetchServerModels = async () => { try { isLoading.value = true; - // Fetch Ollama models + + // Fetch all models first + const allModelsResponse = await fetch('/api/models'); + if (allModelsResponse.ok) { + const allModelsData = await allModelsResponse.json(); + serverModels.value = allModelsData.map(model => ({ + displayName: `${model.provider}: ${model.name.en}`, + name: { en: model.name.en, fr: model.name.fr }, + model: model.model, + provider: model.provider, + apiKey: model.apiKey, + apiEndpoint: model.apiEndpoint + })); + } + + // Then fetch Ollama specifically const ollamaResponse = await fetch('/api/ollama/models'); if (ollamaResponse.ok) { const ollamaData = await ollamaResponse.json(); const ollamaModels = ollamaData.models?.map(model => ({ - displayName: `Ollama: ${model.name}`, // Add displayName for dropdown + displayName: `Ollama: ${model.name}`, name: { en: `Ollama: ${model.name}`, fr: `Ollama: ${model.name}` }, model: model.name, provider: 'ollama', local: true, - apiKey: 'local' // Add required field for validation + apiKey: 'local' })) || []; - // Update both refs serverModels.value = [...serverModels.value, ...ollamaModels]; - availableModels.value = [...availableModels.value, ...ollamaModels]; } + } catch (error) { console.error('Error fetching models:', error); } finally { @@ -94,33 +108,31 @@ export function useModels() { // Combined Models const allModels = Vue.computed(() => { - // Get unique models by combining server models and canvas models const canvasModels = Array.from(modelRegistry.value.values()).flat(); - // Create a Map to track unique models by model ID const uniqueModels = new Map(); - // Add server models first (these will be overridden by canvas models if they exist) - serverModels.value.forEach(model => { - if (model && model.model) { // Add validation + // 1. Add canvas models first (highest priority) + canvasModels.forEach(model => { + if (model?.model) { uniqueModels.set(model.model, model); } }); - - // Add canvas models (these will override server models with the same ID) - canvasModels.forEach(model => { - if (model && model.model) { // Add validation + + // 2. Add server models (non-Ollama) + serverModels.value.forEach(model => { + if (model?.model && model.provider !== 'ollama') { uniqueModels.set(model.model, model); } }); - - // Add Ollama models - availableModels.value.forEach(model => { - if (model && model.model) { + + // 3. Add Ollama models last + serverModels.value.forEach(model => { + if (model?.model && model.provider === 'ollama') { uniqueModels.set(model.model, model); } }); - + return Array.from(uniqueModels.values()); });