diff --git a/agent/pkg/llm/ollama.go b/agent/pkg/llm/ollama.go new file mode 100644 index 0000000..6bcf1fd --- /dev/null +++ b/agent/pkg/llm/ollama.go @@ -0,0 +1,473 @@ +package llm + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "regexp" + "strings" +) + +// OllamaProvider implements the Provider interface for Ollama +type OllamaProvider struct { + baseURL string + config Config + registeredTools map[string]bool + messages []OllamaMessage +} + +// OllamaMessage represents a message in Ollama format +type OllamaMessage struct { + Role string `json:"role"` + Content string `json:"content"` +} + +// OllamaRequest represents a request to Ollama API +type OllamaRequest struct { + Model string `json:"model"` + Messages []OllamaMessage `json:"messages"` + Stream bool `json:"stream"` +} + +// OllamaResponse represents a response from Ollama API +type OllamaResponse struct { + Message OllamaMessage `json:"message"` + Done bool `json:"done"` +} + +// NewOllamaProvider creates a new Ollama provider +func NewOllamaProvider(baseURL string, modelName string) (*OllamaProvider, error) { + config := Config{ + Provider: "ollama", + ModelName: modelName, + ResponseMIMEType: "application/json", + SystemPrompt: getOllamaSystemPrompt(), + MaxRetries: 3, + MaxJSONRetries: 2, + } + + if modelName == "" { + config.ModelName = "llama3.1:8b" + } + + return NewOllamaProviderWithConfig(baseURL, config) +} + +// getOllamaSystemPrompt returns an optimized system prompt for Ollama models +func getOllamaSystemPrompt() string { + return `You are a helpful AI assistant with access to various tools and functions. + +CRITICAL: You MUST respond with valid JSON only. No markdown, no explanations, no extra text. + +When you need to use a tool, respond with: +{"toolName": "tool_name", "arguments": {...}, "explanation": "why"} + +For answers, respond with: +{"answer": "your response here", "explanation": "context"} + +For questions, respond with: +{"question": "what you need to know"} + +Keep responses simple and valid JSON.` +} + +// NewOllamaProviderWithConfig creates a new Ollama provider with custom config +func NewOllamaProviderWithConfig(baseURL string, llmConfig Config) (*OllamaProvider, error) { + if baseURL == "" { + baseURL = "http://localhost:11434" + } + + // Ensure baseURL doesn't have trailing slash + baseURL = strings.TrimSuffix(baseURL, "/") + + if llmConfig.ModelName == "" { + llmConfig.ModelName = "llama3.1:8b" + } + + provider := &OllamaProvider{ + baseURL: baseURL, + config: llmConfig, + registeredTools: make(map[string]bool), + messages: []OllamaMessage{}, + } + + // Populate registered tools map + for _, toolName := range llmConfig.RegisteredTools { + provider.registeredTools[toolName] = true + } + + // Initialize with system message + provider.messages = append(provider.messages, OllamaMessage{ + Role: "system", + Content: llmConfig.SystemPrompt, + }) + + return provider, nil +} + +// SendMessage sends a message to Ollama +func (p *OllamaProvider) SendMessage(ctx context.Context, message string) (*Response, error) { + // Add user message + p.messages = append(p.messages, OllamaMessage{ + Role: "user", + Content: message, + }) + + var lastErr error + var currentMessages []OllamaMessage + + // Retry loop for JSON parsing errors + for attempt := 0; attempt <= p.config.MaxJSONRetries; attempt++ { + currentMessages = make([]OllamaMessage, len(p.messages)) + copy(currentMessages, p.messages) + + // Create Ollama request + req := OllamaRequest{ + Model: p.config.ModelName, + Messages: currentMessages, + Stream: false, + } + + resp, err := p.makeRequest(ctx, req) + if err != nil { + return nil, p.friendlyError(err) + } + + content := resp.Message.Content + parsedResp, err := p.parseResponse(content) + if err == nil { + // Add assistant message to history + p.messages = append(p.messages, resp.Message) + return parsedResp, nil + } + + // Check if it's a JSON parse error + if jsonErr, ok := err.(*JSONParseError); ok { + lastErr = err + if attempt < p.config.MaxJSONRetries { + log.Printf("JSON parse error (attempt %d/%d): %v. Retrying with feedback...", attempt+1, p.config.MaxJSONRetries+1, err) + + // Construct feedback message for the LLM + feedbackMsg := fmt.Sprintf("I received an error parsing your last response as JSON. Error: %v\n\nYour previous response was:\n%s\n\nPlease correct the format and respond ONLY with valid JSON matching the schema.", jsonErr.Err, jsonErr.OriginalText) + currentMessages = append(currentMessages, OllamaMessage{ + Role: "user", + Content: feedbackMsg, + }) + continue + } + } else { + // legitimate other error + return nil, err + } + } + + // If we exhausted retries, fallback to returning the text from the last error if available + if jsonErr, ok := lastErr.(*JSONParseError); ok { + log.Printf("Exhausted JSON retries. Falling back to raw text.") + return &Response{Text: jsonErr.OriginalText}, nil + } + + return nil, lastErr +} + +// makeRequest sends a request to Ollama API +func (p *OllamaProvider) makeRequest(ctx context.Context, req OllamaRequest) (*OllamaResponse, error) { + jsonData, err := json.Marshal(req) + if err != nil { + return nil, fmt.Errorf("failed to marshal request: %w", err) + } + + url := fmt.Sprintf("%s/api/chat", p.baseURL) + httpReq, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonData)) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + httpReq.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + resp, err := client.Do(httpReq) + if err != nil { + return nil, fmt.Errorf("failed to make request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("ollama API error (status %d): %s", resp.StatusCode, string(body)) + } + + var ollamaResp OllamaResponse + if err := json.NewDecoder(resp.Body).Decode(&ollamaResp); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return &ollamaResp, nil +} + +// GetHistory returns the conversation history +func (p *OllamaProvider) GetHistory() []Message { + var history []Message + for _, msg := range p.messages { + role := "user" + if msg.Role == "assistant" { + role = "assistant" + } else if msg.Role == "system" { + role = "system" + } + + history = append(history, Message{ + Role: role, + Content: msg.Content, + }) + } + return history +} + +// GetRawHistory returns the raw history for session carry-over +func (p *OllamaProvider) GetRawHistory() any { + return p.messages +} + +// AppendSystemNotice adds a system notice to the history +func (p *OllamaProvider) AppendSystemNotice(message string) error { + p.messages = append(p.messages, OllamaMessage{ + Role: "system", + Content: message, + }) + return nil +} + +// Close closes the provider and releases resources +func (p *OllamaProvider) Close() error { + // Ollama doesn't require explicit closing + return nil +} + +// parseResponse converts Ollama response to generic Response with enhanced flexibility +func (p *OllamaProvider) parseResponse(text string) (*Response, error) { + // Clean the text first + text = strings.TrimSpace(text) + + // Try multiple parsing strategies for better robustness + genericResp, err := p.tryParseStrategies(text) + if err != nil { + return nil, err + } + + return genericResp, nil +} + +// tryParseStrategies attempts multiple JSON parsing approaches +func (p *OllamaProvider) tryParseStrategies(text string) (*Response, error) { + // Strategy 1: Standard array/object parsing (original approach) + if resp, err := p.parseStandard(text); err == nil { + return resp, nil + } + + // Strategy 2: Extract JSON from markdown code blocks + if resp, err := p.parseFromMarkdown(text); err == nil { + return resp, nil + } + + // Strategy 3: Flexible key-based parsing (look for tool/action patterns) + if resp, err := p.parseFlexible(text); err == nil { + return resp, nil + } + + // Strategy 4: Try to extract any valid JSON object/array from the text + if resp, err := p.parseExtractedJSON(text); err == nil { + return resp, nil + } + + // All strategies failed + return nil, &JSONParseError{ + OriginalText: text, + Err: fmt.Errorf("all parsing strategies failed"), + } +} + +// parseStandard - original parsing approach +func (p *OllamaProvider) parseStandard(text string) (*Response, error) { + var outerResponses []LLMOuterResponse + if err := json.Unmarshal([]byte(text), &outerResponses); err != nil { + // Try single object + var single LLMOuterResponse + if err2 := json.Unmarshal([]byte(text), &single); err2 != nil { + return nil, err2 + } + outerResponses = []LLMOuterResponse{single} + } + + return p.processLLMResponses(outerResponses), nil +} + +// parseFromMarkdown - extract JSON from markdown code blocks +func (p *OllamaProvider) parseFromMarkdown(text string) (*Response, error) { + // Look for JSON in markdown code blocks + jsonRegex := regexp.MustCompile("```(?:json)?\\s*(\\{[\\s\\S]*?\\}|\\[[\\s\\S]*?\\])\\s*```") + matches := jsonRegex.FindStringSubmatch(text) + if len(matches) > 1 { + return p.parseStandard(matches[1]) + } + + // Also try without language specifier + jsonRegex2 := regexp.MustCompile("```\\s*(\\{[\\s\\S]*?\\}|\\[[\\s\\S]*?\\])\\s*```") + matches2 := jsonRegex2.FindStringSubmatch(text) + if len(matches2) > 1 { + return p.parseStandard(matches2[1]) + } + + return nil, fmt.Errorf("no JSON found in markdown") +} + +// parseFlexible - look for tool/action patterns in various formats +func (p *OllamaProvider) parseFlexible(text string) (*Response, error) { + resp := &Response{} + + // Look for tool call patterns + toolPatterns := []string{ + `"toolName"\s*:\s*"([^"]+)"`, + `"tool"\s*:\s*"([^"]+)"`, + `"action"\s*:\s*"([^"]+)"`, + `"function"\s*:\s*"([^"]+)"`, + } + + for _, pattern := range toolPatterns { + re := regexp.MustCompile(pattern) + matches := re.FindStringSubmatch(text) + if len(matches) > 1 && p.registeredTools[matches[1]] { + // Found a valid tool call + resp.ToolCalls = append(resp.ToolCalls, ToolCall{ + ToolName: matches[1], + Arguments: make(map[string]interface{}), + Explanation: "Tool call detected", + }) + break + } + } + + // Look for answer patterns + answerPatterns := []string{ + `"answer"\s*:\s*"([^"]*(?:\\.[^"]*)*)"`, + `"response"\s*:\s*"([^"]*(?:\\.[^"]*)*)"`, + `"result"\s*:\s*"([^"]*(?:\\.[^"]*)*)"`, + } + + for _, pattern := range answerPatterns { + re := regexp.MustCompile(pattern) + matches := re.FindStringSubmatch(text) + if len(matches) > 1 { + resp.Text = matches[1] + break + } + } + + // Look for question patterns + questionPatterns := []string{ + `"question"\s*:\s*"([^"]*(?:\\.[^"]*)*)"`, + } + + for _, pattern := range questionPatterns { + re := regexp.MustCompile(pattern) + matches := re.FindStringSubmatch(text) + if len(matches) > 1 { + resp.Question = matches[1] + break + } + } + + // If we found any structured content, return it + if len(resp.ToolCalls) > 0 || resp.Text != "" || resp.Question != "" { + return resp, nil + } + + return nil, fmt.Errorf("no structured content found") +} + +// parseExtractedJSON - try to find and parse any valid JSON in the text +func (p *OllamaProvider) parseExtractedJSON(text string) (*Response, error) { + // Try to find JSON objects or arrays in the text + jsonPatterns := []string{ + `\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}`, // Simple objects (may not handle nested) + `\[[\s\S]*?\]`, // Arrays + } + + for _, pattern := range jsonPatterns { + re := regexp.MustCompile(pattern) + matches := re.FindAllString(text, -1) + for _, match := range matches { + if resp, err := p.parseStandard(match); err == nil { + return resp, nil + } + } + } + + return nil, fmt.Errorf("no valid JSON found") +} + +// processLLMResponses - common processing logic for LLMOuterResponse arrays +func (p *OllamaProvider) processLLMResponses(outerResponses []LLMOuterResponse) *Response { + genericResp := &Response{} + var finalAnswer strings.Builder + + for _, r := range outerResponses { + if r.Answer != "" { + finalAnswer.WriteString(r.Answer + "\n") + } + if r.Question != "" { + genericResp.Question = r.Question + } + // Look for unified tool call format: toolName and arguments + if r.ToolName != "" { + // Check if this tool is registered + if !p.registeredTools[r.ToolName] { + continue // Tool not registered, skip + } + + // Get arguments (LLM should always provide them) + arguments := r.Arguments + + // Create tool call with its explanation + toolCall := ToolCall{ + ToolName: r.ToolName, + Arguments: arguments, + Explanation: r.Explanation, + } + + genericResp.ToolCalls = append(genericResp.ToolCalls, toolCall) + } + } + + genericResp.Text = strings.TrimSpace(finalAnswer.String()) + return genericResp +} + +// friendlyError converts Ollama API errors to user-friendly messages +func (p *OllamaProvider) friendlyError(err error) error { + if err == nil { + return nil + } + + errStr := err.Error() + + // Check for common error patterns + if strings.Contains(errStr, "connection refused") || strings.Contains(errStr, "dial tcp") { + return fmt.Errorf("🚫 Cannot connect to Ollama. Please ensure Ollama is running with 'ollama serve'") + } + + if strings.Contains(errStr, "model not found") || strings.Contains(errStr, "model not available") { + return fmt.Errorf("📦 Model '%s' not found. Please run 'ollama pull %s' to download it", p.config.ModelName, p.config.ModelName) + } + + if strings.Contains(errStr, "timeout") || strings.Contains(errStr, "deadline") || strings.Contains(errStr, "context canceled") { + return fmt.Errorf("⏱️ Request timed out. Ollama may be busy or the model may be too large for your system") + } + + // Return original error if no match + return err +} diff --git a/grid-agent-gui/app.go b/grid-agent-gui/app.go index cdb45db..119982f 100644 --- a/grid-agent-gui/app.go +++ b/grid-agent-gui/app.go @@ -42,7 +42,9 @@ type App struct { type Settings struct { Mnemonics string `json:"mnemonics"` Network string `json:"network"` // mainnet, testnet, devnet + Provider string `json:"provider"` // "gemini", "ollama" GeminiAPIKey string `json:"geminiApiKey"` + OllamaBaseURL string `json:"ollamaBaseURL"` // usually "http://localhost:11434" Model string `json:"model"` Theme string `json:"theme"` // light, dark IsConfigured bool `json:"isConfigured"` @@ -690,8 +692,12 @@ func (a *App) initializeAgent(opts AgentInitOptions) error { modelName = a.settings.Model } - // Create LLM provider with dynamic tool documentation + // Determine provider and create appropriate LLM provider + var provider llm.Provider + var err error + providerConfig := llm.Config{ + Provider: a.settings.Provider, ModelName: modelName, ResponseMIMEType: "application/json", SystemPrompt: strings.Replace(internalConfig.GetSystemPrompt(a.settings.Network, a.getActiveInstructions()), "{{TOOL_DESCRIPTIONS}}", toolDocs, 1), @@ -701,7 +707,15 @@ func (a *App) initializeAgent(opts AgentInitOptions) error { History: history, // Pass previous history } - provider, err := llm.NewGeminiProviderWithConfig(a.settings.GeminiAPIKey, providerConfig) + switch a.settings.Provider { + case "ollama": + provider, err = llm.NewOllamaProviderWithConfig(a.settings.OllamaBaseURL, providerConfig) + case "gemini": + fallthrough + default: + provider, err = llm.NewGeminiProviderWithConfig(a.settings.GeminiAPIKey, providerConfig) + } + if err != nil { return err } @@ -815,13 +829,24 @@ func (a *App) DeleteProfile(id string) (*Settings, error) { return a.settings, nil } -// UpdateAdvancedSettings updates the API key, model, and export options -func (a *App) UpdateAdvancedSettings(apiKey, model string, enableExportSummary bool) (*Settings, error) { - if apiKey == "" { - return nil, fmt.Errorf("API key cannot be empty") +// UpdateAdvancedSettings updates the provider, API key/URL, model, and export options +func (a *App) UpdateAdvancedSettings(provider, credential, model string, enableExportSummary bool) (*Settings, error) { + // Validate based on provider + if provider == "gemini" && credential == "" { + return nil, fmt.Errorf("Gemini API key cannot be empty") + } + if provider == "ollama" && credential == "" { + credential = "http://localhost:11434" // Default Ollama URL } - a.settings.GeminiAPIKey = apiKey + a.settings.Provider = provider + if provider == "gemini" { + a.settings.GeminiAPIKey = credential + // Set environment variable for Gemini + _ = os.Setenv("GEMINI_API_KEY", credential) + } else if provider == "ollama" { + a.settings.OllamaBaseURL = credential + } a.settings.Model = model a.settings.EnableExportSummary = enableExportSummary diff --git a/grid-agent-gui/frontend/src/components/Docs.svelte b/grid-agent-gui/frontend/src/components/Docs.svelte index 46ec2bd..68fe049 100644 --- a/grid-agent-gui/frontend/src/components/Docs.svelte +++ b/grid-agent-gui/frontend/src/components/Docs.svelte @@ -116,15 +116,22 @@

🤖 AI Configuration

Configure the AI model and behavior settings.

+
Provider Options
+ +
How to Configure AI Settings
  1. Click "AI Configuration" in the sidebar
  2. -
  3. Model Selection: Choose from available Gemini models (higher models = better responses but more tokens)
  4. -
  5. API Key: Click "Show" to reveal, "Change" to update
  6. +
  7. Provider Selection: Choose between Google Gemini or Ollama
  8. +
  9. Model Selection: Pick from available models for your chosen provider
  10. +
  11. API Key: Required for Gemini, not needed for Ollama
  12. Export Summary: Enable AI-generated summaries when exporting conversations
-
Available Models
+
Gemini Models
+ +
+

🏠 Ollama Local AI Setup

+

Run AI models locally on your machine for maximum privacy and zero API costs.

+ +
Why Use Ollama?
+ + +
Step 1: Install Ollama
+
+

For macOS:

+

brew install ollama

+
+

For Linux:

+

curl -fsSL https://ollama.ai/install.sh | sh

+
+

For Windows:

+

Download from ollama.ai/download

+
+ +
Step 2: Quick Setup (Choose One)
+ +
Native Installation (Recommended)
+
+

# 1. Start Ollama in background

+

ollama serve &

+
+

# 2. Verify it's running

+

curl http://localhost:11434/api/version

+
+

# 3. Pull a model

+

ollama pull llama3.1:8b

+
+

# 4. Check models

+

ollama list

+
+

# 5. Use Grid Agent!

+

# (configure to use Ollama)

+
+

# 6. Stop when done

+

pkill -f "ollama serve"

+
+ +
Docker Installation
+
+

# 1. Start Ollama container

+

docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama

+
+

# 2. Verify it's running

+

curl http://localhost:11434/api/version

+
+

# 3. Pull a model

+

docker exec -it ollama ollama pull llama3.1:8b

+
+

# 4. Check models

+

docker exec -it ollama ollama list

+
+

# 5. Use Grid Agent!

+

# (configure to use Ollama)

+
+

# 6. Stop when done

+

docker stop ollama

+
+ +
Step 4: Configure Grid Agent
+
    +
  1. Open Grid Agent and go to "AI Configuration"
  2. +
  3. Select "Ollama (Local)" as the provider
  4. +
  5. Choose your downloaded model from the dropdown
  6. +
  7. Click "Save Configuration"
  8. +
+ +
Troubleshooting
+ + +
Model Recommendations
+ + +
System Requirements
+ +
+

🌐 Grid Configuration

diff --git a/grid-agent-gui/frontend/src/components/Settings.svelte b/grid-agent-gui/frontend/src/components/Settings.svelte index 3f3168d..0404e1b 100644 --- a/grid-agent-gui/frontend/src/components/Settings.svelte +++ b/grid-agent-gui/frontend/src/components/Settings.svelte @@ -31,14 +31,17 @@ let formInstructions = ""; // Advanced Settings + let advancedProvider = "gemini"; let advancedModel = ""; let advancedApiKey = ""; let isEditingApiKey = false; let tempApiKey = ""; + let providerDropdownOpen = false; let modelDropdownOpen = false; let showApiKey = false; // Track original values for change detection + let originalProvider = "gemini"; let originalApiKey = ""; let originalModel = ""; @@ -112,10 +115,11 @@ let originalEnableExportSummary = false; $: configHasChanged = - (advancedApiKey !== originalApiKey || + (advancedProvider !== originalProvider || + advancedApiKey !== originalApiKey || advancedModel !== originalModel || enableExportSummary !== originalEnableExportSummary) && - advancedApiKey.trim() !== ""; + (advancedProvider === "ollama" || advancedApiKey.trim() !== ""); // Detect if grid config has changed $: gridConfigHasChanged = @@ -131,6 +135,7 @@ function resetState() { activeSection = "personas"; error = ""; + providerDropdownOpen = false; modelDropdownOpen = false; networkDropdownOpen = false; cancelApiKeyEdit(); @@ -146,6 +151,12 @@ // Close dropdown when clicking outside function handleDropdownClickOutside(event: MouseEvent) { + if (providerDropdownOpen) { + const target = event.target as HTMLElement; + if (!target.closest(".custom-select")) { + providerDropdownOpen = false; + } + } if (modelDropdownOpen) { const target = event.target as HTMLElement; if (!target.closest(".custom-select")) { @@ -253,18 +264,20 @@ } async function saveAdvanced() { - if (!advancedApiKey.trim()) { - error = "API Key is required"; + if (advancedProvider === "gemini" && !advancedApiKey.trim()) { + error = "Gemini API Key is required"; return; } try { const newSettings = await UpdateAdvancedSettings( - advancedApiKey, + advancedProvider, + advancedProvider === "gemini" ? advancedApiKey : "http://localhost:11434", advancedModel, enableExportSummary, ); settingsStore.set(newSettings); // Update original values after successful save + originalProvider = advancedProvider; originalApiKey = advancedApiKey; originalModel = advancedModel; originalEnableExportSummary = enableExportSummary; @@ -657,6 +670,64 @@
+
+ +
+ + {#if providerDropdownOpen} +
+ + +
+ {/if} +
+
+
@@ -684,7 +755,16 @@ class="select-dropdown" transition:slide={{ duration: 200 }} > - {#each availableModels as m} + {#each advancedProvider === "gemini" ? availableModels : [ + "llama3.1:8b", + "llama3.1:70b", + "llama3.1:405b", + "qwen2.5:7b", + "qwen2.5:14b", + "qwen2.5:32b", + "qwen2.5:72b", + "mistral:7b" + ] as m}
-
- - {#if isEditingApiKey} -
- -
+ {#if advancedProvider === "gemini"} +
+ + {#if isEditingApiKey} +
+ +
+ + +
+
+ {:else} +
+ 8 + ? advancedApiKey.slice(0, 4) + "****" + advancedApiKey.slice(-4) + : "****") + : "*".repeat(advancedApiKey.length)} + disabled + /> showApiKey = !showApiKey} > + {showApiKey ? "Hide" : "Show"} + Change
-
- {:else} -
- 8 - ? advancedApiKey.slice(0, 4) + "****" + advancedApiKey.slice(-4) - : "****") - : "*".repeat(advancedApiKey.length)} - disabled - /> - - -
- {/if} -

- Your API key is stored securely on your local device. -

-
+ {/if} +

+ Your API key is stored securely on your local device. +

+
+ {/if}
diff --git a/grid-agent-gui/frontend/wailsjs/go/main/App.d.ts b/grid-agent-gui/frontend/wailsjs/go/main/App.d.ts index 4e32735..84d4a85 100755 --- a/grid-agent-gui/frontend/wailsjs/go/main/App.d.ts +++ b/grid-agent-gui/frontend/wailsjs/go/main/App.d.ts @@ -32,7 +32,7 @@ export function SendMessage(arg1:string,arg2:string):Promise; export function SetTheme(arg1:string):Promise; -export function UpdateAdvancedSettings(arg1:string,arg2:string,arg3:boolean):Promise; +export function UpdateAdvancedSettings(arg1:string,arg2:string,arg3:string,arg4:boolean):Promise; export function UpdateGridSettings(arg1:string,arg2:string):Promise; diff --git a/grid-agent-gui/frontend/wailsjs/go/main/App.js b/grid-agent-gui/frontend/wailsjs/go/main/App.js index ea37b06..f34cf32 100755 --- a/grid-agent-gui/frontend/wailsjs/go/main/App.js +++ b/grid-agent-gui/frontend/wailsjs/go/main/App.js @@ -62,8 +62,8 @@ export function SetTheme(arg1) { return window['go']['main']['App']['SetTheme'](arg1); } -export function UpdateAdvancedSettings(arg1, arg2, arg3) { - return window['go']['main']['App']['UpdateAdvancedSettings'](arg1, arg2, arg3); +export function UpdateAdvancedSettings(arg1, arg2, arg3, arg4) { + return window['go']['main']['App']['UpdateAdvancedSettings'](arg1, arg2, arg3, arg4); } export function UpdateGridSettings(arg1, arg2) { diff --git a/grid-agent-gui/frontend/wailsjs/go/models.ts b/grid-agent-gui/frontend/wailsjs/go/models.ts index 041fa3b..1d2a83b 100755 --- a/grid-agent-gui/frontend/wailsjs/go/models.ts +++ b/grid-agent-gui/frontend/wailsjs/go/models.ts @@ -87,7 +87,9 @@ export namespace main { export class Settings { mnemonics: string; network: string; + provider: string; geminiApiKey: string; + ollamaBaseURL: string; model: string; theme: string; isConfigured: boolean; @@ -103,7 +105,9 @@ export namespace main { if ('string' === typeof source) source = JSON.parse(source); this.mnemonics = source["mnemonics"]; this.network = source["network"]; + this.provider = source["provider"]; this.geminiApiKey = source["geminiApiKey"]; + this.ollamaBaseURL = source["ollamaBaseURL"]; this.model = source["model"]; this.theme = source["theme"]; this.isConfigured = source["isConfigured"];