diff --git a/lua/vectorcode/cacher/default.lua b/lua/vectorcode/cacher/default.lua index b10fd7af..5d265bd6 100644 --- a/lua/vectorcode/cacher/default.lua +++ b/lua/vectorcode/cacher/default.lua @@ -2,6 +2,7 @@ local M = {} local vc_config = require("vectorcode.config") local notify_opts = vc_config.notify_opts +local jobrunner = require("vectorcode.jobrunner.cmd") local logger = vc_config.logger @@ -58,37 +59,35 @@ local function async_runner(query_message, buf_nr) ) vim.list_extend(args, { "--project_root", project_root }) end + + if cache.options.single_job then + kill_jobs(buf_nr) + end + CACHE[buf_nr].job_count = CACHE[buf_nr].job_count + 1 logger.debug("vectorcode default cacher job args: ", args) - local job = require("plenary.job"):new({ - command = "vectorcode", - args = args, - detached = true, - on_start = function() - if cache.options.single_job then - kill_jobs(buf_nr) - end - end, - on_exit = function(self, code, signal) + + -- jobrunner is assumed to be defined at the module level, e.g., local jobrunner = require("vectorcode.jobrunner.cmd") + local job_pid + job_pid = jobrunner.run_async( + args, + function(json_result, stderr_error, exit_code, signal) if not M.buf_is_registered(buf_nr) then return end - logger.debug("vectorcode ", buf_name, " default cacher results: ", self:result()) + logger.debug("vectorcode ", buf_name, " default cacher results: ", json_result) CACHE[buf_nr].job_count = CACHE[buf_nr].job_count - 1 - CACHE[buf_nr].jobs[self.pid] = nil - local ok, json = pcall( - vim.json.decode, - table.concat(self:result()) or "[]", - { array = true, object = true } - ) - if not ok or code ~= 0 then + assert(job_pid ~= nil) + CACHE[buf_nr].jobs[job_pid] = nil + + if exit_code ~= 0 then vim.schedule(function() if CACHE[buf_nr].options.notify then if signal == 15 then vim.notify("Retrieval aborted.", vim.log.levels.INFO, notify_opts) else vim.notify( - "Retrieval failed:\n" .. table.concat(self:result()), + "Retrieval failed:\\n" .. table.concat(stderr_error, "\n"), vim.log.levels.WARN, notify_opts ) @@ -98,7 +97,7 @@ local function async_runner(query_message, buf_nr) return end cache = CACHE[buf_nr] - cache.retrieval = json or {} + cache.retrieval = json_result or {} vim.schedule(function() if cache.options.notify then vim.notify( @@ -109,12 +108,15 @@ local function async_runner(query_message, buf_nr) end end) end, - }) - job:start() + buf_nr + ) + ---@type VectorCode.Cache cache = CACHE[buf_nr] - cache.last_run = vim.uv.clock_gettime("realtime").sec - cache.jobs[job.pid] = vim.uv.clock_gettime("realtime").sec + if job_pid then + cache.last_run = vim.uv.clock_gettime("realtime").sec + cache.jobs[job_pid] = vim.uv.clock_gettime("realtime").sec + end vim.schedule(function() if cache.options.notify then vim.notify( @@ -313,21 +315,14 @@ end ---@param on_success fun(out: vim.SystemCompleted)? ---@param on_failure fun(out: vim.SystemCompleted?)? function M.async_check(check_item, on_success, on_failure) - if not vc_config.has_cli() then - if on_failure ~= nil then - on_failure() - end - return - end - - check_item = check_item or "config" - vim.system({ "vectorcode", "check", check_item }, {}, function(out) - if out.code == 0 and type(on_success) == "function" then - vim.schedule_wrap(on_success)(out) - elseif out.code ~= 0 and type(on_failure) == "function" then - vim.schedule_wrap(on_failure)(out) - end - end) + vim.deprecate( + "vectorcode.cacher.default.async_check", + "vectorcode.cacher.utils.async_check", + "0.7.0", + "VectorCode", + true + ) + require("vectorcode.cacher").utils.async_check(check_item, on_success, on_failure) end ---@param bufnr integer? diff --git a/lua/vectorcode/cacher/init.lua b/lua/vectorcode/cacher/init.lua index b264e138..3fb94cae 100644 --- a/lua/vectorcode/cacher/init.lua +++ b/lua/vectorcode/cacher/init.lua @@ -1,4 +1,36 @@ +local vc_config = require("vectorcode.config") +local jobrunner = require("vectorcode.jobrunner.cmd") + return { lsp = require("vectorcode.cacher.lsp"), default = require("vectorcode.cacher.default"), + utils = { + ---Checks if VectorCode has been configured properly for your project. + ---See the CLI manual for details. + ---@param check_item string? + ---@param on_success fun(out: vim.SystemCompleted)? + ---@param on_failure fun(out: vim.SystemCompleted?)? + async_check = function(check_item, on_success, on_failure) + if not vc_config.has_cli() then + if on_failure ~= nil then + on_failure() + end + return + end + check_item = check_item or "config" + jobrunner.run_async({ "check", check_item }, function(result, error, code, signal) + local out = { + stdout = table.concat(vim.iter(result):flatten(math.huge):totable()), + stderr = table.concat(vim.iter(error):flatten(math.huge):totable()), + code = code, + signal = signal, + } + if out.code == 0 and type(on_success) == "function" then + vim.schedule_wrap(on_success)(out) + elseif out.code ~= 0 and type(on_failure) == "function" then + vim.schedule_wrap(on_failure)(out) + end + end, 0) + end, + }, } diff --git a/lua/vectorcode/cacher/lsp.lua b/lua/vectorcode/cacher/lsp.lua index f2f5b756..b7679c84 100644 --- a/lua/vectorcode/cacher/lsp.lua +++ b/lua/vectorcode/cacher/lsp.lua @@ -348,21 +348,14 @@ end ---@param on_success fun(out: vim.SystemCompleted)? ---@param on_failure fun(out: vim.SystemCompleted?)? function M.async_check(check_item, on_success, on_failure) - if not vc_config.has_cli() then - if on_failure ~= nil then - on_failure() - end - return - end - - check_item = check_item or "config" - vim.system({ "vectorcode", "check", check_item }, {}, function(out) - if out.code == 0 and type(on_success) == "function" then - vim.schedule_wrap(on_success)(out) - elseif out.code ~= 0 and type(on_failure) == "function" then - vim.schedule_wrap(on_failure)(out) - end - end) + vim.deprecate( + "vectorcode.cacher.default.async_check", + "vectorcode.cacher.utils.async_check", + "0.7.0", + "VectorCode", + true + ) + require("vectorcode.cacher").utils.async_check(check_item, on_success, on_failure) end ---@param bufnr integer? diff --git a/lua/vectorcode/init.lua b/lua/vectorcode/init.lua index 373ae79f..d0bbf350 100644 --- a/lua/vectorcode/init.lua +++ b/lua/vectorcode/init.lua @@ -186,14 +186,17 @@ function M.check(check_item, stdout_cb) end check_item = check_item or "config" local return_code - vim - .system({ "vectorcode", "check", check_item }, {}, function(out) - return_code = out.code - if type(stdout_cb) == "function" then - stdout_cb(out) - end - end) - :wait() + jobrunner.run_async({ "check", check_item }, function(result, error, code, signal) + return_code = code + if type(stdout_cb) == "function" then + stdout_cb({ + stdout = table.concat(vim.iter(result):flatten(math.huge):totable()), + stderr = table.concat(vim.iter(error):flatten(math.huge):totable()), + code = code, + signal = signal, + }) + end + end, 0) return return_code == 0 end diff --git a/lua/vectorcode/jobrunner/cmd.lua b/lua/vectorcode/jobrunner/cmd.lua index 21ef5fdd..5303c28f 100644 --- a/lua/vectorcode/jobrunner/cmd.lua +++ b/lua/vectorcode/jobrunner/cmd.lua @@ -22,14 +22,14 @@ function runner.run_async(args, callback, bufnr) local job = Job:new({ command = "vectorcode", args = args, - on_exit = function(self, _, _) + on_exit = function(self, code, signal) jobs[self.pid] = nil local result = self:result() logger.debug(result) local ok, decoded = pcall(vim.json.decode, table.concat(result, "")) if callback ~= nil then if ok then - callback(decoded or {}, self:stderr_result()) + callback(decoded or {}, self:stderr_result(), code, signal) if vim.islist(result) then logger.debug( "cmd jobrunner result:\n", @@ -43,7 +43,7 @@ function runner.run_async(args, callback, bufnr) ) end else - callback({ result }, self:stderr_result()) + callback({ result }, self:stderr_result(), code, signal) logger.warn("cmd runner: failed to decode result:\n", result) end end @@ -58,20 +58,20 @@ function runner.run(args, timeout_ms, bufnr) if timeout_ms == nil or timeout_ms < 0 then timeout_ms = 2 ^ 31 - 1 end - local res, err - local pid = runner.run_async(args, function(result, error) + local res, err, code, signal + local pid = runner.run_async(args, function(result, error, e_code, s) res = result err = error + code = e_code + signal = s end, bufnr) if pid ~= nil then vim.wait(timeout_ms, function() return res ~= nil or err ~= nil end) jobs[pid] = nil - return res, err - else - return {}, err end + return res or {}, err, code, signal end function runner.is_job_running(job) diff --git a/lua/vectorcode/jobrunner/init.lua b/lua/vectorcode/jobrunner/init.lua index 75a9940c..48d77aff 100644 --- a/lua/vectorcode/jobrunner/init.lua +++ b/lua/vectorcode/jobrunner/init.lua @@ -6,15 +6,23 @@ local utils = require("vectorcode.utils") ---@class VectorCode.JobRunner --- Runs a vectorcode command asynchronously. --- Executes the command specified by `args`. Upon completion, if `callback` is provided, ---- it's invoked with the result table (decoded JSON from stdout) and error table (stderr lines). +--- it's invoked with the following arguments: +--- - `result`: the JSON object of the command execution result. +--- - `error`: error messages, if any. +--- - `code`: exit code (or error code) for the process. +--- - `signal`: _for cmd runner only_, the shell signal sent to the process. --- The `bufnr` is used for context, potentially to find the project root or attach LSP clients. --- Returns a job handle (e.g., PID or LSP request ID) or nil if the job couldn't be started. ----@field run_async fun(args: string[], callback:fun(result: table, error: table)?, bufnr: integer):(job_handle:integer?) +---@field run_async fun(args: string[], callback:fun(result: table, error: table, code:integer, signal: integer?)?, bufnr: integer):(job_handle:integer?) --- Runs a vectorcode command synchronously, blocking until completion or timeout. --- Executes the command specified by `args`. Waits for up to `timeout_ms` milliseconds. --- The `bufnr` is used for context, potentially to find the project root or attach LSP clients. ---- Returns the result table (decoded JSON from stdout) and error table (stderr lines). ----@field run fun(args: string[], timeout_ms: integer?, bufnr: integer):(result:table, error:table) +--- Returns the following objects: +--- - `result`: the JSON object of the command execution result. +--- - `error`: error messages, if any. +--- - `code`: exit code (or error code) for the process. +--- - `signal`: _for cmd runner only_, the shell signal sent to the process. +---@field run fun(args: string[], timeout_ms: integer?, bufnr: integer):(result:table, error:table, code:integer, signal: integer?) --- Checks if a job associated with the given handle is currently running. --- Returns true if the job is running, false otherwise. ---@field is_job_running fun(job_handle: integer):boolean diff --git a/lua/vectorcode/jobrunner/lsp.lua b/lua/vectorcode/jobrunner/lsp.lua index d15bfab7..1703b685 100644 --- a/lua/vectorcode/jobrunner/lsp.lua +++ b/lua/vectorcode/jobrunner/lsp.lua @@ -52,18 +52,16 @@ function jobrunner.run(args, timeout_ms, bufnr) end args = require("vectorcode.jobrunner").find_root(args, bufnr) - local result, err - jobrunner.run_async(args, function(res, err) + local result, err, code + jobrunner.run_async(args, function(res, err, e_code) result = res err = err + code = e_code end, bufnr) vim.wait(timeout_ms, function() return (result ~= nil) or (err ~= nil) end) - if result == nil then - return {}, err - end - return result, err + return result or {}, err, code end function jobrunner.run_async(args, callback, bufnr) @@ -102,7 +100,11 @@ function jobrunner.run_async(args, callback, bufnr) if err ~= nil and err.message ~= nil then err_message = { err.message } end - vim.schedule_wrap(callback)(result, err_message) + local code = 0 + if err and err.code then + code = err.code + end + vim.schedule_wrap(callback)(result, err_message, code) if result then logger.debug( "lsp jobrunner result:\n", diff --git a/src/vectorcode/lsp_main.py b/src/vectorcode/lsp_main.py index 51a56e6a..46d37b5c 100644 --- a/src/vectorcode/lsp_main.py +++ b/src/vectorcode/lsp_main.py @@ -6,6 +6,12 @@ import time import uuid +from pygls.exceptions import ( + JsonRpcException, + JsonRpcInternalError, + JsonRpcInvalidRequest, +) + try: # pragma: nocover from lsprotocol import types from pygls.server import LanguageServer @@ -66,89 +72,101 @@ def get_arg_parser(): @server.command("vectorcode") async def execute_command(ls: LanguageServer, args: list[str]): - global DEFAULT_PROJECT_ROOT - start_time = time.time() - logger.info("Received command arguments: %s", args) - parsed_args = await parse_cli_args(args) - logger.info("Parsed command arguments: %s", parsed_args) - if parsed_args.action not in {CliAction.query, CliAction.ls}: - print( - f"Unsupported vectorcode subcommand: {str(parsed_args.action)}", - file=sys.stderr, - ) - return - if parsed_args.project_root is None: - if DEFAULT_PROJECT_ROOT is not None: - parsed_args.project_root = DEFAULT_PROJECT_ROOT - logger.warning("Using DEFAULT_PROJECT_ROOT: %s", DEFAULT_PROJECT_ROOT) - elif DEFAULT_PROJECT_ROOT is None: - logger.warning("Updating DEFAULT_PROJECT_ROOT to %s", parsed_args.project_root) - DEFAULT_PROJECT_ROOT = str(parsed_args.project_root) - - if parsed_args.project_root is not None: - parsed_args.project_root = os.path.abspath(str(parsed_args.project_root)) - await make_caches(parsed_args.project_root) - final_configs = await cached_project_configs[ - parsed_args.project_root - ].merge_from(parsed_args) - final_configs.pipe = True - client = await get_client(final_configs) - collection = await get_collection( - client=client, - configs=final_configs, - make_if_missing=final_configs.action in {CliAction.vectorise}, - ) - else: - final_configs = parsed_args - client = await get_client(parsed_args) - collection = None - logger.info("Merged final configs: %s", final_configs) - progress_token = str(uuid.uuid4()) - - await ls.progress.create_async(progress_token) - match final_configs.action: - case CliAction.query: - ls.progress.begin( - progress_token, - types.WorkDoneProgressBegin( - "VectorCode", - message=f"Querying {cleanup_path(str(final_configs.project_root))}", - ), + try: + global DEFAULT_PROJECT_ROOT + start_time = time.time() + logger.info("Received command arguments: %s", args) + parsed_args = await parse_cli_args(args) + logger.info("Parsed command arguments: %s", parsed_args) + if parsed_args.action not in {CliAction.query, CliAction.ls}: + error_message = ( + f"Unsupported vectorcode subcommand: {str(parsed_args.action)}" ) - final_results = [] - try: - if collection is None: - print("Please specify a project to search in.", file=sys.stderr) - else: - final_results.extend( - await build_query_results(collection, final_configs) - ) - finally: - log_message = f"Retrieved {len(final_results)} result{'s' if len(final_results) > 1 else ''} in {round(time.time() - start_time, 2)}s." - ls.progress.end( + logger.error( + error_message, + ) + raise JsonRpcInvalidRequest(error_message) + if parsed_args.project_root is None: + if DEFAULT_PROJECT_ROOT is not None: + parsed_args.project_root = DEFAULT_PROJECT_ROOT + logger.warning("Using DEFAULT_PROJECT_ROOT: %s", DEFAULT_PROJECT_ROOT) + elif DEFAULT_PROJECT_ROOT is None: + logger.warning( + "Updating DEFAULT_PROJECT_ROOT to %s", parsed_args.project_root + ) + DEFAULT_PROJECT_ROOT = str(parsed_args.project_root) + + if parsed_args.project_root is not None: + parsed_args.project_root = os.path.abspath(str(parsed_args.project_root)) + await make_caches(parsed_args.project_root) + final_configs = await cached_project_configs[ + parsed_args.project_root + ].merge_from(parsed_args) + final_configs.pipe = True + client = await get_client(final_configs) + collection = await get_collection( + client=client, + configs=final_configs, + make_if_missing=final_configs.action in {CliAction.vectorise}, + ) + else: + final_configs = parsed_args + client = await get_client(parsed_args) + collection = None + logger.info("Merged final configs: %s", final_configs) + progress_token = str(uuid.uuid4()) + + await ls.progress.create_async(progress_token) + match final_configs.action: + case CliAction.query: + ls.progress.begin( progress_token, - types.WorkDoneProgressEnd(message=log_message), + types.WorkDoneProgressBegin( + "VectorCode", + message=f"Querying {cleanup_path(str(final_configs.project_root))}", + ), ) - logger.info(log_message) - return final_results - case CliAction.ls: - ls.progress.begin( - progress_token, - types.WorkDoneProgressBegin( - "VectorCode", - message="Looking for other projects indexed by VectorCode", - ), - ) - projects: list[dict] = [] - try: - projects.extend(await get_collection_list(client)) - finally: - ls.progress.end( + final_results = [] + try: + if collection is None: + print("Please specify a project to search in.", file=sys.stderr) + else: + final_results.extend( + await build_query_results(collection, final_configs) + ) + finally: + log_message = f"Retrieved {len(final_results)} result{'s' if len(final_results) > 1 else ''} in {round(time.time() - start_time, 2)}s." + ls.progress.end( + progress_token, + types.WorkDoneProgressEnd(message=log_message), + ) + logger.info(log_message) + return final_results + case CliAction.ls: + ls.progress.begin( progress_token, - types.WorkDoneProgressEnd(message="List retrieved."), + types.WorkDoneProgressBegin( + "VectorCode", + message="Looking for other projects indexed by VectorCode", + ), ) - logger.info(f"Retrieved {len(projects)} project(s).") - return projects + projects: list[dict] = [] + try: + projects.extend(await get_collection_list(client)) + finally: + ls.progress.end( + progress_token, + types.WorkDoneProgressEnd(message="List retrieved."), + ) + logger.info(f"Retrieved {len(projects)} project(s).") + return projects + except Exception as e: + if isinstance(e, JsonRpcException): + # pygls exception. raise it as is. + raise + else: # pragma: nocover + # wrap non-pygls errors for error codes. + raise JsonRpcInternalError(e.__traceback__) from e async def lsp_start() -> int: diff --git a/tests/test_lsp.py b/tests/test_lsp.py index d953da6c..d2a70f41 100644 --- a/tests/test_lsp.py +++ b/tests/test_lsp.py @@ -1,6 +1,7 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest +from pygls.exceptions import JsonRpcInvalidRequest from pygls.server import LanguageServer from vectorcode import __version__ @@ -245,9 +246,8 @@ async def test_execute_command_unsupported_action( # Mock the merge_from method mock_config.merge_from = AsyncMock(return_value=mock_config) - await execute_command(mock_language_server, ["invalid_action"]) - captured = capsys.readouterr() - assert "Unsupported vectorcode subcommand" in captured.err + with pytest.raises(JsonRpcInvalidRequest): + await execute_command(mock_language_server, ["invalid_action"]) @pytest.mark.asyncio