From 4ff4e4d10061db34d02d0de6c91603c523e9a7bf Mon Sep 17 00:00:00 2001 From: Zhe Yu Date: Thu, 4 Sep 2025 10:09:49 +0800 Subject: [PATCH 1/4] docs: documentation updates. --- docs/cli.md | 72 +++++++++++++++++++++---------------------- docs/neovim/README.md | 8 +++-- 2 files changed, 41 insertions(+), 39 deletions(-) diff --git a/docs/cli.md b/docs/cli.md index d1376d64..c5446229 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -580,7 +580,7 @@ following options in the JSON config file: "embedding_params": { "backend": "torch", "device": "cuda" - }, + } } ``` @@ -593,8 +593,8 @@ need to configure `SentenceTransformer` to use `openvino` backend. In your ```json { "embedding_params": { - "backend": "openvino", - }, + "backend": "openvino" + } } ``` This will run the embedding model on your GPU. This is supported even for @@ -618,14 +618,14 @@ fact, this is exactly what I did when I wrote the neovim plugin. For the query command, here's the format printed in the `pipe` mode: ```json [ - { - "path": "path_to_your_code.py", - "document":"import something" - }, - { - "path": "path_to_another_file.py", - "document": "print('hello world')" - } + { + "path": "path_to_your_code.py", + "document": "import something" + }, + { + "path": "path_to_another_file.py", + "document": "print('hello world')" + } ] ``` Basically an array of dictionaries with 2 keys: `"path"` for the path to the @@ -634,20 +634,20 @@ document, and `"document"` for the content of the document. If you used `--include chunk path` parameters, the array will look like this: ```json [ - { - "path": "path_to_your_code.py", - "chunk": "foo", - "start_line": 1, - "end_line": 1, - "chunk_id": "chunk_id_1" - }, - { - "path": "path_to_another_file.py", - "chunk": "bar", - "start_line": 1, - "end_line": 1, - "chunk_id": "chunk_id_2" - } + { + "path": "path_to_your_code.py", + "chunk": "foo", + "start_line": 1, + "end_line": 1, + "chunk_id": "chunk_id_1" + }, + { + "path": "path_to_another_file.py", + "chunk": "bar", + "start_line": 1, + "end_line": 1, + "chunk_id": "chunk_id_2" + } ] ``` Keep in mind that both `start_line` and `end_line` are inclusive. The `chunk_id` @@ -668,13 +668,13 @@ The output is in JSON format. It contains a dictionary with the following fields A JSON array of collection information of the following format will be printed: ```json { - "project_root": str, - "user": str, - "hostname": str, - "collection_name": str, - "size": int, - "num_files": int, - "embedding_function": str + "project_root": str, + "user": str, + "hostname": str, + "collection_name": str, + "size": int, + "num_files": int, + "embedding_function": str } ``` - `"project_root"`: the path to the `project-root`; @@ -711,8 +711,8 @@ pipx inject vectorcode 'vectorcode[lsp]' --force The LSP request for the `workspace/executeCommand` is defined as follows: ``` { - command: str - arguments: list[Any] + command: str + arguments: list[Any] } ``` For the `vectorcode-server`, the only valid value for the `command` key is @@ -721,8 +721,8 @@ command. For example, to execute `vectorcode query -n 10 reranker`, the request would be: ``` { - command: "vectorcode", - arguments: ["query", "-n", "10", "reranker"] + command: "vectorcode", + arguments: ["query", "-n", "10", "reranker"] } ``` diff --git a/docs/neovim/README.md b/docs/neovim/README.md index 685480cc..ab230af1 100644 --- a/docs/neovim/README.md +++ b/docs/neovim/README.md @@ -1,9 +1,11 @@ # NeoVim Plugin > [!NOTE] -> This plugin depends on the CLI tool. Please go through -> [the CLI documentation](../cli/README.md) and make sure the VectorCode CLI is working -> before proceeding. +> This plugin depends on the CLI tool. You should follow +> [the CLI documentation](../cli/README.md) to install the CLI and familiarise +> yourself with +> [the basic usage of the CLI interface](../cli/README.md#getting-started) +> before you proceed. > [!NOTE] > When the neovim plugin doesn't work properly, please try upgrading both the CLI From 195268db3409dd0adffecebe20fe2f3cf35ece33 Mon Sep 17 00:00:00 2001 From: Davidyz <30951234+Davidyz@users.noreply.github.com> Date: Thu, 4 Sep 2025 02:10:30 +0000 Subject: [PATCH 2/4] Auto generate docs --- doc/VectorCode-cli.txt | 72 +++++++++++++++++++++--------------------- doc/VectorCode.txt | 7 ++-- 2 files changed, 40 insertions(+), 39 deletions(-) diff --git a/doc/VectorCode-cli.txt b/doc/VectorCode-cli.txt index f20ec83c..ddc0d54b 100644 --- a/doc/VectorCode-cli.txt +++ b/doc/VectorCode-cli.txt @@ -640,7 +640,7 @@ following options in the JSON config file: "embedding_params": { "backend": "torch", "device": "cuda" - }, + } } < @@ -655,8 +655,8 @@ need to configure `SentenceTransformer` to use `openvino` backend. In your >json { "embedding_params": { - "backend": "openvino", - }, + "backend": "openvino" + } } < @@ -687,14 +687,14 @@ For the query command, here’s the format printed in the `pipe` mode: >json [ - { - "path": "path_to_your_code.py", - "document":"import something" - }, - { - "path": "path_to_another_file.py", - "document": "print('hello world')" - } + { + "path": "path_to_your_code.py", + "document": "import something" + }, + { + "path": "path_to_another_file.py", + "document": "print('hello world')" + } ] < @@ -705,20 +705,20 @@ If you used `--include chunk path` parameters, the array will look like this: >json [ - { - "path": "path_to_your_code.py", - "chunk": "foo", - "start_line": 1, - "end_line": 1, - "chunk_id": "chunk_id_1" - }, - { - "path": "path_to_another_file.py", - "chunk": "bar", - "start_line": 1, - "end_line": 1, - "chunk_id": "chunk_id_2" - } + { + "path": "path_to_your_code.py", + "chunk": "foo", + "start_line": 1, + "end_line": 1, + "chunk_id": "chunk_id_1" + }, + { + "path": "path_to_another_file.py", + "chunk": "bar", + "start_line": 1, + "end_line": 1, + "chunk_id": "chunk_id_2" + } ] < @@ -743,13 +743,13 @@ A JSON array of collection information of the following format will be printed: >json { - "project_root": str, - "user": str, - "hostname": str, - "collection_name": str, - "size": int, - "num_files": int, - "embedding_function": str + "project_root": str, + "user": str, + "hostname": str, + "collection_name": str, + "size": int, + "num_files": int, + "embedding_function": str } < @@ -792,8 +792,8 @@ The LSP request for the `workspace/executeCommand` is defined as follows: > { - command: str - arguments: list[Any] + command: str + arguments: list[Any] } < @@ -804,8 +804,8 @@ request would be: > { - command: "vectorcode", - arguments: ["query", "-n", "10", "reranker"] + command: "vectorcode", + arguments: ["query", "-n", "10", "reranker"] } < diff --git a/doc/VectorCode.txt b/doc/VectorCode.txt index 0824b0b0..470fbda4 100644 --- a/doc/VectorCode.txt +++ b/doc/VectorCode.txt @@ -15,9 +15,10 @@ Table of Contents *VectorCode-table-of-contents* 1. NeoVim Plugin *VectorCode-neovim-plugin* - [!NOTE] This plugin depends on the CLI tool. Please go through the CLI - documentation <../cli/README.md> and make sure the VectorCode CLI is working - before proceeding. + [!NOTE] This plugin depends on the CLI tool. You should follow the CLI + documentation <../cli/README.md> to install the CLI and familiarise yourself + with the basic usage of the CLI interface <../cli/README.md#getting-started> + before you proceed. [!NOTE] When the neovim plugin doesn’t work properly, please try upgrading both the CLI and the neovim plugin to the latest version before opening an From 6008e61b796d78d5a54b5f4bf524f2659297f6cc Mon Sep 17 00:00:00 2001 From: Zhe Yu Date: Thu, 4 Sep 2025 10:19:32 +0800 Subject: [PATCH 3/4] docs(cli): suggest to pin version to `<1.0.0` --- docs/cli.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/cli.md b/docs/cli.md index c5446229..e3d2f53c 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -55,7 +55,7 @@ your system Python or project-local virtual environments. After installing `uv`, run: ```bash -uv tool install vectorcode +uv tool install "vectorcode<1.0.0" ``` in your shell. To specify a particular version of Python, use the `--python` flag. For example, `uv tool install vectorcode --python python3.11`. For hardware @@ -63,14 +63,15 @@ accelerated embedding, refer to [the relevant section](#hardware-acceleration). If you want a CPU-only installation without CUDA dependencies required by default by PyTorch, run: ```bash -uv tool install vectorcode --index https://download.pytorch.org/whl/cpu --index-strategy unsafe-best-match +uv tool install "vectorcode<1.0.0" --index https://download.pytorch.org/whl/cpu --index-strategy unsafe-best-match ``` If you need to install multiple dependency group (for [LSP](#lsp-mode) or [MCP](#mcp-server)), you can use the following syntax: ```bash -uv tool install 'vectorcode[lsp,mcp]' +uv tool install "vectorcode[lsp,mcp]<1.0.0" ``` + > [!NOTE] > The command only install VectorCode and `SentenceTransformer`, the default > embedding engine. If you need to install an extra dependency, you can use From d3cf8bdb1b0073345bd54c9e646078f5ee590202 Mon Sep 17 00:00:00 2001 From: Davidyz <30951234+Davidyz@users.noreply.github.com> Date: Thu, 4 Sep 2025 02:20:20 +0000 Subject: [PATCH 4/4] Auto generate docs --- doc/VectorCode-cli.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/VectorCode-cli.txt b/doc/VectorCode-cli.txt index ddc0d54b..06bbd1f1 100644 --- a/doc/VectorCode-cli.txt +++ b/doc/VectorCode-cli.txt @@ -66,7 +66,7 @@ virtual environments. After installing `uv`, run: >bash - uv tool install vectorcode + uv tool install "vectorcode<1.0.0" < in your shell. To specify a particular version of Python, use the `--python` @@ -76,14 +76,14 @@ If you want a CPU-only installation without CUDA dependencies required by default by PyTorch, run: >bash - uv tool install vectorcode --index https://download.pytorch.org/whl/cpu --index-strategy unsafe-best-match + uv tool install "vectorcode<1.0.0" --index https://download.pytorch.org/whl/cpu --index-strategy unsafe-best-match < If you need to install multiple dependency group (for |VectorCode-cli-lsp| or |VectorCode-cli-mcp|), you can use the following syntax: >bash - uv tool install 'vectorcode[lsp,mcp]' + uv tool install "vectorcode[lsp,mcp]<1.0.0" <