Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions src/dashboard/events.ts
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,17 @@ export class SSEManager {
}
}

/**
* Broadcast a log message to all connected dashboard clients
*/
export function broadcastLog(level: 'info' | 'warn' | 'error', message: string): void {
sseManager.broadcast('server:log', {
level,
message,
timestamp: new Date().toISOString(),
});
}

/**
* Singleton instance of the SSE manager
*/
Expand Down
18 changes: 18 additions & 0 deletions src/dashboard/ui.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1742,6 +1742,24 @@ export function getDashboardHTML(): string {
// Just keep connection alive - don't log to reduce noise
});

eventSource.addEventListener('server:log', (e) => {
const logData = JSON.parse(e.data);
const prefix = '%c[server]%c ';
const prefixStyle = 'color: #a371f7; font-weight: bold';
const msgStyle = 'color: inherit';

switch (logData.level) {
case 'error':
console.error(prefix + logData.message, prefixStyle, msgStyle);
break;
case 'warn':
console.warn(prefix + logData.message, prefixStyle, msgStyle);
break;
default:
console.log(prefix + logData.message, prefixStyle, msgStyle);
}
});

eventSource.onerror = (e) => {
log.warn('SSE connection error, will reconnect...', e);
setConnected(false);
Expand Down
24 changes: 13 additions & 11 deletions src/embeddings/ollama.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import type { EmbeddingBackend, EmbeddingConfig } from './types.js';
import { chunkArray } from './types.js';
import { fetchWithRetry } from './retry.js';
import { broadcastLog } from '../dashboard/events.js';

/** Default batch size for Ollama (texts per request) */
const DEFAULT_BATCH_SIZE = 50;
Expand Down Expand Up @@ -97,21 +98,22 @@ export class OllamaBackend implements EmbeddingBackend {
const results: number[][] = new Array(texts.length);
const totalGroups = Math.ceil(batches.length / this.concurrency);

console.error(
`[lance-context] Ollama: embedding ${texts.length} texts in ${batches.length} batches ` +
`(${this.batchSize} texts/batch, ${this.concurrency} parallel, ${totalGroups} groups)`
);
console.error(`[lance-context] Ollama: using model ${this.model} at ${this.baseUrl}`);
const initMsg = `Ollama: embedding ${texts.length} texts in ${batches.length} batches (${this.batchSize} texts/batch, ${this.concurrency} parallel, ${totalGroups} groups)`;
const modelMsg = `Ollama: using model ${this.model} at ${this.baseUrl}`;
console.error(`[lance-context] ${initMsg}`);
console.error(`[lance-context] ${modelMsg}`);
broadcastLog('info', initMsg);
broadcastLog('info', modelMsg);

// Process batches in parallel groups controlled by concurrency
for (let i = 0; i < batches.length; i += this.concurrency) {
const batchGroup = batches.slice(i, i + this.concurrency);
const groupNum = Math.floor(i / this.concurrency) + 1;
const groupStart = Date.now();

console.error(
`[lance-context] Ollama: starting group ${groupNum}/${totalGroups} (${batchGroup.length} batches)...`
);
const groupStartMsg = `Ollama: starting group ${groupNum}/${totalGroups} (${batchGroup.length} batches)...`;
console.error(`[lance-context] ${groupStartMsg}`);
broadcastLog('info', groupStartMsg);

const batchPromises = batchGroup.map(async (batch, groupIndex) => {
// Create abort controller with timeout
Expand Down Expand Up @@ -144,9 +146,9 @@ export class OllamaBackend implements EmbeddingBackend {
const batchResults = await Promise.all(batchPromises);
const groupElapsed = ((Date.now() - groupStart) / 1000).toFixed(1);
const processedSoFar = Math.min((i + this.concurrency) * this.batchSize, texts.length);
console.error(
`[lance-context] Embedded batch group ${Math.floor(i / this.concurrency) + 1}/${Math.ceil(batches.length / this.concurrency)} (${processedSoFar}/${texts.length} texts) in ${groupElapsed}s`
);
const groupCompleteMsg = `Embedded batch group ${Math.floor(i / this.concurrency) + 1}/${Math.ceil(batches.length / this.concurrency)} (${processedSoFar}/${texts.length} texts) in ${groupElapsed}s`;
console.error(`[lance-context] ${groupCompleteMsg}`);
broadcastLog('info', groupCompleteMsg);

// Place results in correct positions
for (const { batchIndex, embeddings } of batchResults) {
Expand Down
7 changes: 4 additions & 3 deletions src/search/indexer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import * as crypto from 'crypto';
import * as fs from 'fs/promises';
import * as path from 'path';
import type { EmbeddingBackend } from '../embeddings/index.js';
import { broadcastLog } from '../dashboard/events.js';
import { ASTChunker } from './ast-chunker.js';
import { TreeSitterChunker } from './tree-sitter-chunker.js';
import {
Expand Down Expand Up @@ -1438,9 +1439,9 @@ export class CodeIndexer {
for (let i = 0; i < chunks.length; i += embeddingBatchSize) {
const batch = chunks.slice(i, i + embeddingBatchSize);
const texts = batch.map((c) => c.content);
console.error(
`[lance-context] Sending ${texts.length} texts to embedding backend (batch ${Math.floor(i / embeddingBatchSize) + 1}/${Math.ceil(chunks.length / embeddingBatchSize)})...`
);
const batchMsg = `Sending ${texts.length} texts to embedding backend (batch ${Math.floor(i / embeddingBatchSize) + 1}/${Math.ceil(chunks.length / embeddingBatchSize)})...`;
console.error(`[lance-context] ${batchMsg}`);
broadcastLog('info', batchMsg);
const embeddings = await this.embeddingBackend.embedBatch(texts);
batch.forEach((chunk, idx) => {
chunk.embedding = embeddings[idx];
Expand Down
Loading