Skip to content

Commit d37a353

Browse files
committed
chore: test work
1 parent 5fae7dc commit d37a353

File tree

6 files changed

+157
-252
lines changed

6 files changed

+157
-252
lines changed

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -452,4 +452,5 @@ source-exclude = [
452452
]
453453

454454
# [tool.uv.sources]
455+
# exxec = { path = "../exxec", editable = true }
455456
# schemez = { path = "../schemez", editable = true }

tests/servers/acp_server/test_acp_via_acp_snapshots.py

Lines changed: 66 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -10,17 +10,20 @@
1010

1111
from __future__ import annotations
1212

13-
from dataclasses import dataclass, field
13+
from dataclasses import asdict, dataclass, field
1414
from pathlib import Path
1515
import sys
1616
import tempfile
1717
from typing import TYPE_CHECKING, Any
1818

19+
from exxec.configs import MockExecutionEnvironmentConfig
20+
from exxec.models import ExecutionResult
1921
import pytest
2022
from syrupy.extensions.json import JSONSnapshotExtension
2123
import yaml
2224

2325
from agentpool.agents.acp_agent import ACPAgent
26+
from agentpool_config.toolsets import ExecutionEnvironmentToolsetConfig
2427

2528

2629
if TYPE_CHECKING:
@@ -47,15 +50,15 @@ def create_config_file(
4750
temp_dir: Path,
4851
tool_name: str,
4952
tool_args: dict[str, Any],
50-
toolsets: list[dict[str, Any]],
53+
toolsets: list[ExecutionEnvironmentToolsetConfig],
5154
) -> Path:
5255
"""Create a YAML config file for the subprocess agent.
5356
5457
Args:
5558
temp_dir: Directory to write config file to
5659
tool_name: Name of the tool to call
5760
tool_args: Arguments for the tool
58-
toolsets: List of toolset config dicts (can include environment config)
61+
toolsets: List of toolset configs
5962
"""
6063
agent_config: dict[str, Any] = {
6164
"type": "native",
@@ -64,7 +67,7 @@ def create_config_file(
6467
"call_tools": [tool_name],
6568
"tool_args": {tool_name: tool_args},
6669
},
67-
"toolsets": toolsets,
70+
"toolsets": [t.model_dump(mode="json") for t in toolsets],
6871
}
6972

7073
config = {"agents": {"test_agent": agent_config}}
@@ -89,14 +92,14 @@ async def execute_tool(
8992
self,
9093
tool_name: str,
9194
tool_args: dict[str, Any],
92-
toolsets: list[dict[str, Any]],
95+
toolsets: list[ExecutionEnvironmentToolsetConfig],
9396
) -> list[dict[str, Any]]:
9497
"""Execute a tool via ACP subprocess and capture full event details.
9598
9699
Args:
97100
tool_name: Name of tool to execute
98101
tool_args: Arguments to pass to tool
99-
toolsets: Toolset configurations (can include environment config)
102+
toolsets: Toolset configurations
100103
"""
101104
config_path = create_config_file(
102105
self.temp_dir,
@@ -120,14 +123,9 @@ async def execute_tool(
120123
cwd=str(self.temp_dir),
121124
) as agent:
122125
async for event in agent.run_stream("Execute the tool"):
123-
# Convert event to dict for snapshot
124-
if hasattr(event, "model_dump"):
125-
event_dict = event.model_dump(exclude_none=True)
126-
else:
127-
from dataclasses import asdict
128-
129-
event_dict = asdict(event)
126+
from dataclasses import asdict
130127

128+
event_dict = asdict(event)
131129
event_dict["type"] = type(event).__name__
132130
self.recorded_events.append(event_dict)
133131

@@ -143,34 +141,33 @@ def harness(temp_dir: Path) -> ACPViaACPHarness:
143141
class TestExecuteCommandViaACP:
144142
"""Test execute_command tool through ACP subprocess."""
145143

146-
@pytest.mark.asyncio
147144
async def test_execute_command_simple(
148145
self,
149146
harness: ACPViaACPHarness,
150147
json_snapshot: SnapshotAssertion,
151148
):
152149
"""Test simple command execution via ACP with mock environment."""
153-
# Configure mock environment on the toolset itself (not the agent)
154-
# The execution toolset has its own environment config
155-
mock_env = {
156-
"type": "mock",
157-
"deterministic_ids": True,
158-
"command_results": {
159-
"echo hello": {
160-
"result": None,
161-
"stdout": "hello\n",
162-
"stderr": "",
163-
"success": True,
164-
"exit_code": 0,
165-
"duration": 0.01,
166-
}
150+
mock_env = MockExecutionEnvironmentConfig(
151+
deterministic_ids=True,
152+
command_results={
153+
"echo hello": asdict(
154+
ExecutionResult(
155+
result=None,
156+
stdout="hello\n",
157+
stderr="",
158+
success=True,
159+
exit_code=0,
160+
duration=0.01,
161+
)
162+
)
167163
},
168-
}
164+
)
165+
toolset = ExecutionEnvironmentToolsetConfig(environment=mock_env)
169166

170167
events = await harness.execute_tool(
171168
tool_name="execute_command",
172169
tool_args={"command": "echo hello"},
173-
toolsets=[{"type": "execution", "environment": mock_env}],
170+
toolsets=[toolset],
174171
)
175172

176173
# Filter to tool call messages for stable comparison
@@ -181,41 +178,42 @@ async def test_execute_command_simple(
181178
assert tool_events == json_snapshot
182179

183180

184-
class TestExecuteCodeViaACP:
185-
"""Test execute_code tool through ACP subprocess."""
186-
187-
@pytest.mark.asyncio
188-
async def test_execute_code_simple(
189-
self,
190-
harness: ACPViaACPHarness,
191-
json_snapshot: SnapshotAssertion,
192-
):
193-
"""Test simple code execution via ACP with mock environment."""
194-
# Configure mock environment on the toolset itself
195-
mock_env = {
196-
"type": "mock",
197-
"deterministic_ids": True,
198-
"code_results": {
199-
"print('hello')": {
200-
"result": None,
201-
"stdout": "hello\n",
202-
"stderr": "",
203-
"success": True,
204-
"exit_code": 0,
205-
"duration": 0.01,
206-
}
207-
},
208-
}
209-
210-
events = await harness.execute_tool(
211-
tool_name="execute_code",
212-
tool_args={"code": "print('hello')"},
213-
toolsets=[{"type": "execution", "environment": mock_env}],
214-
)
215-
216-
# Filter to tool call messages for stable comparison
217-
tool_events = [
218-
e for e in events if e["type"] in ("ToolCallStartEvent", "ToolCallProgressEvent")
219-
]
220-
221-
assert tool_events == json_snapshot
181+
# class TestExecuteCodeViaACP:
182+
# """Test execute_code tool through ACP subprocess."""
183+
184+
# # async def test_execute_code_simple(
185+
# self,
186+
# harness: ACPViaACPHarness,
187+
# json_snapshot: SnapshotAssertion,
188+
# ):
189+
# """Test simple code execution via ACP with mock environment."""
190+
# mock_env = MockExecutionEnvironmentConfig(
191+
# deterministic_ids=True,
192+
# code_results={
193+
# "print('hello')": {
194+
# "stdout": "hello\n",
195+
# "stderr": "",
196+
# "success": True,
197+
# "exit_code": 0,
198+
# "duration": 0.01,
199+
# }
200+
# },
201+
# )
202+
# toolset = ExecutionEnvironmentToolsetConfig(environment=mock_env)
203+
204+
# events = await harness.execute_tool(
205+
# tool_name="execute_code",
206+
# tool_args={"code": "print('hello')"},
207+
# toolsets=[toolset],
208+
# )
209+
210+
# # Filter to tool call messages for stable comparison
211+
# tool_events = [
212+
# e for e in events if e["type"] in ("ToolCallStartEvent", "ToolCallProgressEvent")
213+
# ]
214+
215+
# assert tool_events == json_snapshot
216+
217+
218+
if __name__ == "__main__":
219+
pytest.main([__file__, "-vv"])

tests/servers/acp_server/test_tool_call_snapshots.py

Lines changed: 63 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -108,69 +108,69 @@ async def test_write_file_overwrite(
108108
assert messages == json_snapshot
109109

110110

111-
class TestExecuteCodeSnapshots:
112-
"""Snapshot tests for execute_code tool."""
113-
114-
async def test_execute_code_simple(
115-
self,
116-
harness: ToolCallTestHarness,
117-
json_snapshot: SnapshotAssertion,
118-
) -> None:
119-
"""Test simple code execution produces expected notifications."""
120-
harness.mock_env._code_results["print('hello')"] = ExecutionResult(
121-
result=None, duration=0.01, success=True, stdout="hello\n", exit_code=0
122-
)
123-
124-
messages = await harness.execute_tool(
125-
tool_name="execute_code",
126-
tool_args={"code": "print('hello')"},
127-
toolsets=[ExecutionEnvironmentToolsetConfig()],
128-
)
129-
130-
assert messages == json_snapshot
131-
132-
async def test_execute_code_with_error(
133-
self,
134-
harness: ToolCallTestHarness,
135-
json_snapshot: SnapshotAssertion,
136-
) -> None:
137-
"""Test code execution with error produces expected notifications."""
138-
harness.mock_env._code_results["raise ValueError('test error')"] = ExecutionResult(
139-
result=None,
140-
duration=0.01,
141-
success=False,
142-
stderr="ValueError: test error\n",
143-
exit_code=1,
144-
error="ValueError: test error",
145-
error_type="ValueError",
146-
)
147-
148-
messages = await harness.execute_tool(
149-
tool_name="execute_code",
150-
tool_args={"code": "raise ValueError('test error')"},
151-
toolsets=[ExecutionEnvironmentToolsetConfig()],
152-
)
153-
154-
assert messages == json_snapshot
155-
156-
async def test_execute_code_multiline(
157-
self,
158-
harness: ToolCallTestHarness,
159-
json_snapshot: SnapshotAssertion,
160-
) -> None:
161-
"""Test multiline code execution produces expected notifications."""
162-
code = "x = 1\ny = 2\nprint(x + y)"
163-
harness.mock_env._code_results[code] = ExecutionResult(
164-
result=None, duration=0.01, success=True, stdout="3\n", exit_code=0
165-
)
166-
167-
messages = await harness.execute_tool(
168-
tool_name="execute_code",
169-
tool_args={"code": code},
170-
toolsets=[ExecutionEnvironmentToolsetConfig()],
171-
)
172-
173-
assert messages == json_snapshot
111+
# class TestExecuteCodeSnapshots:
112+
# """Snapshot tests for execute_code tool."""
113+
114+
# async def test_execute_code_simple(
115+
# self,
116+
# harness: ToolCallTestHarness,
117+
# json_snapshot: SnapshotAssertion,
118+
# ) -> None:
119+
# """Test simple code execution produces expected notifications."""
120+
# harness.mock_env._code_results["print('hello')"] = ExecutionResult(
121+
# result=None, duration=0.01, success=True, stdout="hello\n", exit_code=0
122+
# )
123+
124+
# messages = await harness.execute_tool(
125+
# tool_name="execute_code",
126+
# tool_args={"code": "print('hello')"},
127+
# toolsets=[ExecutionEnvironmentToolsetConfig()],
128+
# )
129+
130+
# assert messages == json_snapshot
131+
132+
# async def test_execute_code_with_error(
133+
# self,
134+
# harness: ToolCallTestHarness,
135+
# json_snapshot: SnapshotAssertion,
136+
# ) -> None:
137+
# """Test code execution with error produces expected notifications."""
138+
# harness.mock_env._code_results["raise ValueError('test error')"] = ExecutionResult(
139+
# result=None,
140+
# duration=0.01,
141+
# success=False,
142+
# stderr="ValueError: test error\n",
143+
# exit_code=1,
144+
# error="ValueError: test error",
145+
# error_type="ValueError",
146+
# )
147+
148+
# messages = await harness.execute_tool(
149+
# tool_name="execute_code",
150+
# tool_args={"code": "raise ValueError('test error')"},
151+
# toolsets=[ExecutionEnvironmentToolsetConfig()],
152+
# )
153+
154+
# assert messages == json_snapshot
155+
156+
# async def test_execute_code_multiline(
157+
# self,
158+
# harness: ToolCallTestHarness,
159+
# json_snapshot: SnapshotAssertion,
160+
# ) -> None:
161+
# """Test multiline code execution produces expected notifications."""
162+
# code = "x = 1\ny = 2\nprint(x + y)"
163+
# harness.mock_env._code_results[code] = ExecutionResult(
164+
# result=None, duration=0.01, success=True, stdout="3\n", exit_code=0
165+
# )
166+
167+
# messages = await harness.execute_tool(
168+
# tool_name="execute_code",
169+
# tool_args={"code": code},
170+
# toolsets=[ExecutionEnvironmentToolsetConfig()],
171+
# )
172+
173+
# assert messages == json_snapshot
174174

175175

176176
class TestExecuteCommandSnapshots:

0 commit comments

Comments
 (0)