diff --git a/README.md b/README.md
index 7c52b4ea8e..fc2373668e 100644
--- a/README.md
+++ b/README.md
@@ -228,7 +228,7 @@ Official integrations are maintained by companies building production ready MCP
-
**[Linear](https://linear.app/docs/mcp)** - Search, create, and update Linear issues, projects, and comments.
-
**[Lingo.dev](https://github.com/lingodotdev/lingo.dev/blob/main/mcp.md)** - Make your AI agent speak every language on the planet, using [Lingo.dev](https://lingo.dev) Localization Engine.
-
**[LinkedIn MCP Runner](https://github.com/ertiqah/linkedin-mcp-runner)** - Write, edit, and schedule LinkedIn posts right from ChatGPT and Claude with [LiGo](https://ligo.ertiqah.com/).
--
**[Lisply](https://github.com/gornskew/lisply-mcp)** - Flexible frontend for compliant Lisp-speaking backends.
+-
**[Lisply](https://github.com/gornskew/lisply-mcp)** - Flexible frontend for compliant Lisp-speaking backends.
-
**[Litmus.io](https://github.com/litmusautomation/litmus-mcp-server)** - Official MCP server for configuring [Litmus](https://litmus.io) Edge for Industrial Data Collection, Edge Analytics & Industrial AI.
-
**[Liveblocks](https://github.com/liveblocks/liveblocks-mcp-server)** - Ready‑made features for AI & human collaboration—use this to develop your [Liveblocks](https://liveblocks.io) app quicker.
-
**[Logfire](https://github.com/pydantic/logfire-mcp)** - Provides access to OpenTelemetry traces and metrics through Logfire.
@@ -408,7 +408,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[Ableton Live](https://github.com/ahujasid/ableton-mcp)** (by ahujasid) - Ableton integration allowing prompt enabled music creation.
- **[Actor Critic Thinking](https://github.com/aquarius-wing/actor-critic-thinking-mcp)** - Actor-critic thinking for performance evaluation
- **[AgentBay](https://github.com/Michael98671/agentbay)** - An MCP server for providing serverless cloud infrastructure for AI agents.
-- **[AgentMode](https://www.agentmode.app) - Connect to dozens of databases, data warehouses, Github & more, from a single MCP server. Run the Docker image locally, in the cloud, or on-premise.
+- **[AgentMode](https://www.agentmode.app)** - Connect to dozens of databases, data warehouses, Github & more, from a single MCP server. Run the Docker image locally, in the cloud, or on-premise.
- **[AI Agent Marketplace Index](https://github.com/AI-Agent-Hub/ai-agent-marketplace-index-mcp)** - MCP server to search more than 5000+ AI agents and tools of various categories from [AI Agent Marketplace Index](http://www.deepnlp.org/store/ai-agent) and monitor traffic of AI Agents.
- **[AI Tasks](https://github.com/jbrinkman/valkey-ai-tasks)** - Let the AI manage complex plans with integrated task management and tracking tools. Supports STDIO, SSE and Streamable HTTP transports.
- **[ai-Bible](https://github.com/AdbC99/ai-bible)** - Search the bible reliably and repeatably [ai-Bible Labs](https://ai-bible.com)
diff --git a/package-lock.json b/package-lock.json
index c785a237fe..6a9bac9316 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -6159,7 +6159,7 @@
"version": "0.6.2",
"license": "MIT",
"dependencies": {
- "@modelcontextprotocol/sdk": "^1.12.3",
+ "@modelcontextprotocol/sdk": "^1.17.0",
"diff": "^5.1.0",
"glob": "^10.3.10",
"minimatch": "^10.0.1",
@@ -6182,9 +6182,9 @@
}
},
"src/filesystem/node_modules/@modelcontextprotocol/sdk": {
- "version": "1.12.3",
- "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.12.3.tgz",
- "integrity": "sha512-DyVYSOafBvk3/j1Oka4z5BWT8o4AFmoNyZY9pALOm7Lh3GZglR71Co4r4dEUoqDWdDazIZQHBe7J2Nwkg6gHgQ==",
+ "version": "1.17.0",
+ "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.17.0.tgz",
+ "integrity": "sha512-qFfbWFA7r1Sd8D697L7GkTd36yqDuTkvz0KfOGkgXR8EUhQn3/EDNIR/qUdQNMT8IjmasBvHWuXeisxtXTQT2g==",
"license": "MIT",
"dependencies": {
"ajv": "^6.12.6",
@@ -6192,6 +6192,7 @@
"cors": "^2.8.5",
"cross-spawn": "^7.0.5",
"eventsource": "^3.0.2",
+ "eventsource-parser": "^3.0.0",
"express": "^5.0.1",
"express-rate-limit": "^7.5.0",
"pkce-challenge": "^5.0.0",
diff --git a/src/filesystem/README.md b/src/filesystem/README.md
index cd6d0a9f06..89bca6908f 100644
--- a/src/filesystem/README.md
+++ b/src/filesystem/README.md
@@ -70,10 +70,19 @@ The server's directory access control follows this flow:
### Tools
-- **read_file**
- - Read complete contents of a file
- - Input: `path` (string)
- - Reads complete file contents with UTF-8 encoding
+- **read_text_file**
+ - Read complete contents of a file as text
+ - Inputs:
+ - `path` (string)
+ - `head` (number, optional): First N lines
+ - `tail` (number, optional): Last N lines
+ - Always treats the file as UTF-8 text regardless of extension
+
+- **read_media_file**
+ - Read an image or audio file
+ - Inputs:
+ - `path` (string)
+ - Streams the file and returns base64 data with the corresponding MIME type
- **read_multiple_files**
- Read multiple files simultaneously
diff --git a/src/filesystem/index.ts b/src/filesystem/index.ts
index 524c9c2608..6723f43600 100644
--- a/src/filesystem/index.ts
+++ b/src/filesystem/index.ts
@@ -10,6 +10,7 @@ import {
type Root,
} from "@modelcontextprotocol/sdk/types.js";
import fs from "fs/promises";
+import { createReadStream } from "fs";
import path from "path";
import os from 'os';
import { randomBytes } from 'crypto';
@@ -116,12 +117,16 @@ async function validatePath(requestedPath: string): Promise {
}
// Schema definitions
-const ReadFileArgsSchema = z.object({
+const ReadTextFileArgsSchema = z.object({
path: z.string(),
tail: z.number().optional().describe('If provided, returns only the last N lines of the file'),
head: z.number().optional().describe('If provided, returns only the first N lines of the file')
});
+const ReadMediaFileArgsSchema = z.object({
+ path: z.string()
+});
+
const ReadMultipleFilesArgsSchema = z.object({
paths: z.array(z.string()),
});
@@ -374,10 +379,10 @@ async function applyFileEdits(
function formatSize(bytes: number): string {
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
if (bytes === 0) return '0 B';
-
+
const i = Math.floor(Math.log(bytes) / Math.log(1024));
if (i === 0) return `${bytes} ${units[i]}`;
-
+
return `${(bytes / Math.pow(1024, i)).toFixed(2)} ${units[i]}`;
}
@@ -386,9 +391,9 @@ async function tailFile(filePath: string, numLines: number): Promise {
const CHUNK_SIZE = 1024; // Read 1KB at a time
const stats = await fs.stat(filePath);
const fileSize = stats.size;
-
+
if (fileSize === 0) return '';
-
+
// Open file for reading
const fileHandle = await fs.open(filePath, 'r');
try {
@@ -397,36 +402,36 @@ async function tailFile(filePath: string, numLines: number): Promise {
let chunk = Buffer.alloc(CHUNK_SIZE);
let linesFound = 0;
let remainingText = '';
-
+
// Read chunks from the end of the file until we have enough lines
while (position > 0 && linesFound < numLines) {
const size = Math.min(CHUNK_SIZE, position);
position -= size;
-
+
const { bytesRead } = await fileHandle.read(chunk, 0, size, position);
if (!bytesRead) break;
-
+
// Get the chunk as a string and prepend any remaining text from previous iteration
const readData = chunk.slice(0, bytesRead).toString('utf-8');
const chunkText = readData + remainingText;
-
+
// Split by newlines and count
const chunkLines = normalizeLineEndings(chunkText).split('\n');
-
+
// If this isn't the end of the file, the first line is likely incomplete
// Save it to prepend to the next chunk
if (position > 0) {
remainingText = chunkLines[0];
chunkLines.shift(); // Remove the first (incomplete) line
}
-
+
// Add lines to our result (up to the number we need)
for (let i = chunkLines.length - 1; i >= 0 && linesFound < numLines; i--) {
lines.unshift(chunkLines[i]);
linesFound++;
}
}
-
+
return lines.join('\n');
} finally {
await fileHandle.close();
@@ -441,14 +446,14 @@ async function headFile(filePath: string, numLines: number): Promise {
let buffer = '';
let bytesRead = 0;
const chunk = Buffer.alloc(1024); // 1KB buffer
-
+
// Read chunks and count lines until we have enough or reach EOF
while (lines.length < numLines) {
const result = await fileHandle.read(chunk, 0, chunk.length, bytesRead);
if (result.bytesRead === 0) break; // End of file
bytesRead += result.bytesRead;
buffer += chunk.slice(0, result.bytesRead).toString('utf-8');
-
+
const newLineIndex = buffer.lastIndexOf('\n');
if (newLineIndex !== -1) {
const completeLines = buffer.slice(0, newLineIndex).split('\n');
@@ -459,32 +464,63 @@ async function headFile(filePath: string, numLines: number): Promise {
}
}
}
-
+
// If there is leftover content and we still need lines, add it
if (buffer.length > 0 && lines.length < numLines) {
lines.push(buffer);
}
-
+
return lines.join('\n');
} finally {
await fileHandle.close();
}
}
+// Reads a file as a stream of buffers, concatenates them, and then encodes
+// the result to a Base64 string. This is a memory-efficient way to handle
+// binary data from a stream before the final encoding.
+async function readFileAsBase64Stream(filePath: string): Promise {
+ return new Promise((resolve, reject) => {
+ const stream = createReadStream(filePath);
+ const chunks: Buffer[] = [];
+ stream.on('data', (chunk) => {
+ chunks.push(chunk as Buffer);
+ });
+ stream.on('end', () => {
+ const finalBuffer = Buffer.concat(chunks);
+ resolve(finalBuffer.toString('base64'));
+ });
+ stream.on('error', (err) => reject(err));
+ });
+}
+
// Tool handlers
server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: [
{
name: "read_file",
+ description: "Read the complete contents of a file as text. DEPRECATED: Use read_text_file instead.",
+ inputSchema: zodToJsonSchema(ReadTextFileArgsSchema) as ToolInput,
+ },
+ {
+ name: "read_text_file",
description:
- "Read the complete contents of a file from the file system. " +
+ "Read the complete contents of a file from the file system as text. " +
"Handles various text encodings and provides detailed error messages " +
"if the file cannot be read. Use this tool when you need to examine " +
"the contents of a single file. Use the 'head' parameter to read only " +
"the first N lines of a file, or the 'tail' parameter to read only " +
- "the last N lines of a file. Only works within allowed directories.",
- inputSchema: zodToJsonSchema(ReadFileArgsSchema) as ToolInput,
+ "the last N lines of a file. Operates on the file as text regardless of extension. " +
+ "Only works within allowed directories.",
+ inputSchema: zodToJsonSchema(ReadTextFileArgsSchema) as ToolInput,
+ },
+ {
+ name: "read_media_file",
+ description:
+ "Read an image or audio file. Returns the base64 encoded data and MIME type. " +
+ "Only works within allowed directories.",
+ inputSchema: zodToJsonSchema(ReadMediaFileArgsSchema) as ToolInput,
},
{
name: "read_multiple_files",
@@ -597,17 +633,18 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
switch (name) {
- case "read_file": {
- const parsed = ReadFileArgsSchema.safeParse(args);
+ case "read_file":
+ case "read_text_file": {
+ const parsed = ReadTextFileArgsSchema.safeParse(args);
if (!parsed.success) {
- throw new Error(`Invalid arguments for read_file: ${parsed.error}`);
+ throw new Error(`Invalid arguments for read_text_file: ${parsed.error}`);
}
const validPath = await validatePath(parsed.data.path);
-
+
if (parsed.data.head && parsed.data.tail) {
throw new Error("Cannot specify both head and tail parameters simultaneously");
}
-
+
if (parsed.data.tail) {
// Use memory-efficient tail implementation for large files
const tailContent = await tailFile(validPath, parsed.data.tail);
@@ -615,7 +652,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
content: [{ type: "text", text: tailContent }],
};
}
-
+
if (parsed.data.head) {
// Use memory-efficient head implementation for large files
const headContent = await headFile(validPath, parsed.data.head);
@@ -623,13 +660,45 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
content: [{ type: "text", text: headContent }],
};
}
-
+
const content = await fs.readFile(validPath, "utf-8");
return {
content: [{ type: "text", text: content }],
};
}
+ case "read_media_file": {
+ const parsed = ReadMediaFileArgsSchema.safeParse(args);
+ if (!parsed.success) {
+ throw new Error(`Invalid arguments for read_media_file: ${parsed.error}`);
+ }
+ const validPath = await validatePath(parsed.data.path);
+ const extension = path.extname(validPath).toLowerCase();
+ const mimeTypes: Record = {
+ ".png": "image/png",
+ ".jpg": "image/jpeg",
+ ".jpeg": "image/jpeg",
+ ".gif": "image/gif",
+ ".webp": "image/webp",
+ ".bmp": "image/bmp",
+ ".svg": "image/svg+xml",
+ ".mp3": "audio/mpeg",
+ ".wav": "audio/wav",
+ ".ogg": "audio/ogg",
+ ".flac": "audio/flac",
+ };
+ const mimeType = mimeTypes[extension] || "application/octet-stream";
+ const data = await readFileAsBase64Stream(validPath);
+ const type = mimeType.startsWith("image/")
+ ? "image"
+ : mimeType.startsWith("audio/")
+ ? "audio"
+ : "blob";
+ return {
+ content: [{ type, data, mimeType }],
+ };
+ }
+
case "read_multiple_files": {
const parsed = ReadMultipleFilesArgsSchema.safeParse(args);
if (!parsed.success) {
@@ -734,7 +803,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
}
const validPath = await validatePath(parsed.data.path);
const entries = await fs.readdir(validPath, { withFileTypes: true });
-
+
// Get detailed information for each entry
const detailedEntries = await Promise.all(
entries.map(async (entry) => {
@@ -757,7 +826,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
}
})
);
-
+
// Sort entries based on sortBy parameter
const sortedEntries = [...detailedEntries].sort((a, b) => {
if (parsed.data.sortBy === 'size') {
@@ -766,29 +835,29 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
// Default sort by name
return a.name.localeCompare(b.name);
});
-
+
// Format the output
- const formattedEntries = sortedEntries.map(entry =>
+ const formattedEntries = sortedEntries.map(entry =>
`${entry.isDirectory ? "[DIR]" : "[FILE]"} ${entry.name.padEnd(30)} ${
entry.isDirectory ? "" : formatSize(entry.size).padStart(10)
}`
);
-
+
// Add summary
const totalFiles = detailedEntries.filter(e => !e.isDirectory).length;
const totalDirs = detailedEntries.filter(e => e.isDirectory).length;
const totalSize = detailedEntries.reduce((sum, entry) => sum + (entry.isDirectory ? 0 : entry.size), 0);
-
+
const summary = [
"",
`Total: ${totalFiles} files, ${totalDirs} directories`,
`Combined size: ${formatSize(totalSize)}`
];
-
+
return {
- content: [{
- type: "text",
- text: [...formattedEntries, ...summary].join("\n")
+ content: [{
+ type: "text",
+ text: [...formattedEntries, ...summary].join("\n")
}],
};
}
diff --git a/src/filesystem/package.json b/src/filesystem/package.json
index 482f0cce79..4d3ac3205a 100644
--- a/src/filesystem/package.json
+++ b/src/filesystem/package.json
@@ -20,7 +20,7 @@
"test": "jest --config=jest.config.cjs --coverage"
},
"dependencies": {
- "@modelcontextprotocol/sdk": "^1.12.3",
+ "@modelcontextprotocol/sdk": "^1.17.0",
"diff": "^5.1.0",
"glob": "^10.3.10",
"minimatch": "^10.0.1",
@@ -38,4 +38,4 @@
"ts-node": "^10.9.2",
"typescript": "^5.8.2"
}
-}
\ No newline at end of file
+}