diff --git a/src/dashboard/events.ts b/src/dashboard/events.ts index ee03fba..f64bf93 100644 --- a/src/dashboard/events.ts +++ b/src/dashboard/events.ts @@ -188,6 +188,17 @@ export class SSEManager { } } +/** + * Broadcast a log message to all connected dashboard clients + */ +export function broadcastLog(level: 'info' | 'warn' | 'error', message: string): void { + sseManager.broadcast('server:log', { + level, + message, + timestamp: new Date().toISOString(), + }); +} + /** * Singleton instance of the SSE manager */ diff --git a/src/dashboard/ui.ts b/src/dashboard/ui.ts index f96a024..5a0095d 100644 --- a/src/dashboard/ui.ts +++ b/src/dashboard/ui.ts @@ -1742,6 +1742,24 @@ export function getDashboardHTML(): string { // Just keep connection alive - don't log to reduce noise }); + eventSource.addEventListener('server:log', (e) => { + const logData = JSON.parse(e.data); + const prefix = '%c[server]%c '; + const prefixStyle = 'color: #a371f7; font-weight: bold'; + const msgStyle = 'color: inherit'; + + switch (logData.level) { + case 'error': + console.error(prefix + logData.message, prefixStyle, msgStyle); + break; + case 'warn': + console.warn(prefix + logData.message, prefixStyle, msgStyle); + break; + default: + console.log(prefix + logData.message, prefixStyle, msgStyle); + } + }); + eventSource.onerror = (e) => { log.warn('SSE connection error, will reconnect...', e); setConnected(false); diff --git a/src/embeddings/ollama.ts b/src/embeddings/ollama.ts index 0912d70..9b6ddae 100644 --- a/src/embeddings/ollama.ts +++ b/src/embeddings/ollama.ts @@ -1,6 +1,7 @@ import type { EmbeddingBackend, EmbeddingConfig } from './types.js'; import { chunkArray } from './types.js'; import { fetchWithRetry } from './retry.js'; +import { broadcastLog } from '../dashboard/events.js'; /** Default batch size for Ollama (texts per request) */ const DEFAULT_BATCH_SIZE = 50; @@ -97,11 +98,12 @@ export class OllamaBackend implements EmbeddingBackend { const results: number[][] = new Array(texts.length); const totalGroups = Math.ceil(batches.length / this.concurrency); - console.error( - `[lance-context] Ollama: embedding ${texts.length} texts in ${batches.length} batches ` + - `(${this.batchSize} texts/batch, ${this.concurrency} parallel, ${totalGroups} groups)` - ); - console.error(`[lance-context] Ollama: using model ${this.model} at ${this.baseUrl}`); + const initMsg = `Ollama: embedding ${texts.length} texts in ${batches.length} batches (${this.batchSize} texts/batch, ${this.concurrency} parallel, ${totalGroups} groups)`; + const modelMsg = `Ollama: using model ${this.model} at ${this.baseUrl}`; + console.error(`[lance-context] ${initMsg}`); + console.error(`[lance-context] ${modelMsg}`); + broadcastLog('info', initMsg); + broadcastLog('info', modelMsg); // Process batches in parallel groups controlled by concurrency for (let i = 0; i < batches.length; i += this.concurrency) { @@ -109,9 +111,9 @@ export class OllamaBackend implements EmbeddingBackend { const groupNum = Math.floor(i / this.concurrency) + 1; const groupStart = Date.now(); - console.error( - `[lance-context] Ollama: starting group ${groupNum}/${totalGroups} (${batchGroup.length} batches)...` - ); + const groupStartMsg = `Ollama: starting group ${groupNum}/${totalGroups} (${batchGroup.length} batches)...`; + console.error(`[lance-context] ${groupStartMsg}`); + broadcastLog('info', groupStartMsg); const batchPromises = batchGroup.map(async (batch, groupIndex) => { // Create abort controller with timeout @@ -144,9 +146,9 @@ export class OllamaBackend implements EmbeddingBackend { const batchResults = await Promise.all(batchPromises); const groupElapsed = ((Date.now() - groupStart) / 1000).toFixed(1); const processedSoFar = Math.min((i + this.concurrency) * this.batchSize, texts.length); - console.error( - `[lance-context] Embedded batch group ${Math.floor(i / this.concurrency) + 1}/${Math.ceil(batches.length / this.concurrency)} (${processedSoFar}/${texts.length} texts) in ${groupElapsed}s` - ); + const groupCompleteMsg = `Embedded batch group ${Math.floor(i / this.concurrency) + 1}/${Math.ceil(batches.length / this.concurrency)} (${processedSoFar}/${texts.length} texts) in ${groupElapsed}s`; + console.error(`[lance-context] ${groupCompleteMsg}`); + broadcastLog('info', groupCompleteMsg); // Place results in correct positions for (const { batchIndex, embeddings } of batchResults) { diff --git a/src/search/indexer.ts b/src/search/indexer.ts index d610bc7..2fba689 100644 --- a/src/search/indexer.ts +++ b/src/search/indexer.ts @@ -3,6 +3,7 @@ import * as crypto from 'crypto'; import * as fs from 'fs/promises'; import * as path from 'path'; import type { EmbeddingBackend } from '../embeddings/index.js'; +import { broadcastLog } from '../dashboard/events.js'; import { ASTChunker } from './ast-chunker.js'; import { TreeSitterChunker } from './tree-sitter-chunker.js'; import { @@ -1438,9 +1439,9 @@ export class CodeIndexer { for (let i = 0; i < chunks.length; i += embeddingBatchSize) { const batch = chunks.slice(i, i + embeddingBatchSize); const texts = batch.map((c) => c.content); - console.error( - `[lance-context] Sending ${texts.length} texts to embedding backend (batch ${Math.floor(i / embeddingBatchSize) + 1}/${Math.ceil(chunks.length / embeddingBatchSize)})...` - ); + const batchMsg = `Sending ${texts.length} texts to embedding backend (batch ${Math.floor(i / embeddingBatchSize) + 1}/${Math.ceil(chunks.length / embeddingBatchSize)})...`; + console.error(`[lance-context] ${batchMsg}`); + broadcastLog('info', batchMsg); const embeddings = await this.embeddingBackend.embedBatch(texts); batch.forEach((chunk, idx) => { chunk.embedding = embeddings[idx];