From 60d98160b231a8eba7e5d79be4bb4beb2a0cd495 Mon Sep 17 00:00:00 2001 From: Kelu Date: Mon, 15 Dec 2025 23:34:42 +0700 Subject: [PATCH 1/2] feat: Introduce LLM wait time Previously, the LLM invocation within the compose method was not subject to a specific timeout, which could lead to indefinite blocking if the underlying LLM service failed to respond or was extremely slow. This change Adds max_llm_wait_time, which defaults to 10 minutes to ensure that the LLM composition process will time out after the configured duration, preventing the agent from being stuck indefinitely and allowing for graceful error handling.. --- .../common/trading/decision/prompt_based/composer.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/python/valuecell/agents/common/trading/decision/prompt_based/composer.py b/python/valuecell/agents/common/trading/decision/prompt_based/composer.py index 0a9d9fb42..82f9a79d0 100644 --- a/python/valuecell/agents/common/trading/decision/prompt_based/composer.py +++ b/python/valuecell/agents/common/trading/decision/prompt_based/composer.py @@ -1,5 +1,6 @@ from __future__ import annotations +import asyncio import json from typing import Dict @@ -48,10 +49,12 @@ def __init__( *, default_slippage_bps: int = 25, quantity_precision: float = 1e-9, + max_llm_wait_time: float = 600.0, ) -> None: self._request = request self._default_slippage_bps = default_slippage_bps self._quantity_precision = quantity_precision + self._max_llm_wait_time = max_llm_wait_time cfg = self._request.llm_model_config self._model = model_utils.create_model_with_provider( provider=cfg.provider, @@ -200,7 +203,9 @@ async def _call_llm(self, prompt: str) -> TradePlanProposal: agent's `response.content` is returned (or validated) as a `LlmPlanProposal`. """ - response = await self.agent.arun(prompt) + response = await asyncio.wait_for( + self.agent.arun(prompt), timeout=self._max_llm_wait_time + ) # Agent may return a raw object or a wrapper with `.content`. content = getattr(response, "content", None) or response logger.debug("Received LLM response {}", content) From 16dd8f2fd73a67e95ab23e23400958e8ae5c2922 Mon Sep 17 00:00:00 2001 From: Kelu Diao <1632092+lukecold@users.noreply.github.com> Date: Tue, 16 Dec 2025 13:49:08 +0700 Subject: [PATCH 2/2] refactor: Specify the time unit in parameter name --- .../agents/common/trading/decision/prompt_based/composer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/valuecell/agents/common/trading/decision/prompt_based/composer.py b/python/valuecell/agents/common/trading/decision/prompt_based/composer.py index 82f9a79d0..dee1216ea 100644 --- a/python/valuecell/agents/common/trading/decision/prompt_based/composer.py +++ b/python/valuecell/agents/common/trading/decision/prompt_based/composer.py @@ -49,12 +49,12 @@ def __init__( *, default_slippage_bps: int = 25, quantity_precision: float = 1e-9, - max_llm_wait_time: float = 600.0, + max_llm_wait_time_sec: float = 600.0, ) -> None: self._request = request self._default_slippage_bps = default_slippage_bps self._quantity_precision = quantity_precision - self._max_llm_wait_time = max_llm_wait_time + self._max_llm_wait_time_sec = max_llm_wait_time_sec cfg = self._request.llm_model_config self._model = model_utils.create_model_with_provider( provider=cfg.provider, @@ -204,7 +204,7 @@ async def _call_llm(self, prompt: str) -> TradePlanProposal: `LlmPlanProposal`. """ response = await asyncio.wait_for( - self.agent.arun(prompt), timeout=self._max_llm_wait_time + self.agent.arun(prompt), timeout=self._max_llm_wait_time_sec ) # Agent may return a raw object or a wrapper with `.content`. content = getattr(response, "content", None) or response