From b4d763bd0d7541bb09e860ed096dfa1c179c9c6c Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Fri, 25 Jul 2025 07:34:19 -0700 Subject: [PATCH 01/15] update for plugins --- openai_agents/run_worker.py | 61 ++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/openai_agents/run_worker.py b/openai_agents/run_worker.py index 9dbaef91..123d4a28 100644 --- a/openai_agents/run_worker.py +++ b/openai_agents/run_worker.py @@ -5,13 +5,11 @@ from temporalio.client import Client from temporalio.contrib.openai_agents import ( - ModelActivity, ModelActivityParameters, - OpenAIAgentsTracingInterceptor, - set_open_ai_agent_temporal_overrides, + OpenAIAgentsPlugin ) -from temporalio.contrib.pydantic import pydantic_data_converter from temporalio.worker import Worker +from temporalio.client import ClientConfig from openai_agents.workflows.agents_as_tools_workflow import AgentsAsToolsWorkflow from openai_agents.workflows.customer_service_workflow import CustomerServiceWorkflow @@ -22,34 +20,33 @@ async def main(): - with set_open_ai_agent_temporal_overrides( - model_params=ModelActivityParameters( - start_to_close_timeout=timedelta(seconds=60), - ), - ): - # Create client connected to server at the given address - client = await Client.connect( - "localhost:7233", - data_converter=pydantic_data_converter, - ) - - worker = Worker( - client, - task_queue="openai-agents-task-queue", - workflows=[ - HelloWorldAgent, - ToolsWorkflow, - ResearchWorkflow, - CustomerServiceWorkflow, - AgentsAsToolsWorkflow, - ], - activities=[ - ModelActivity().invoke_model_activity, - get_weather, - ], - interceptors=[OpenAIAgentsTracingInterceptor()], - ) - await worker.run() + # Create client connected to server at the given address + client = await Client.connect( + "localhost:7233", + plugins=[ + OpenAIAgentsPlugin( + model_params=ModelActivityParameters( + start_to_close_timeout=timedelta(seconds=120) + ) + ), + ] + ) + + worker = Worker( + client, + task_queue="openai-agents-task-queue", + workflows=[ + HelloWorldAgent, + ToolsWorkflow, + ResearchWorkflow, + CustomerServiceWorkflow, + AgentsAsToolsWorkflow, + ], + activities=[ + get_weather, + ], + ) + await worker.run() if __name__ == "__main__": From ef7a00df82e0a6ef0d698b19f6fd8686e81cde66 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Fri, 25 Jul 2025 07:40:16 -0700 Subject: [PATCH 02/15] formatting --- openai_agents/run_worker.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/openai_agents/run_worker.py b/openai_agents/run_worker.py index 123d4a28..0f3f3e5e 100644 --- a/openai_agents/run_worker.py +++ b/openai_agents/run_worker.py @@ -3,13 +3,9 @@ import asyncio from datetime import timedelta -from temporalio.client import Client -from temporalio.contrib.openai_agents import ( - ModelActivityParameters, - OpenAIAgentsPlugin -) +from temporalio.client import Client, ClientConfig +from temporalio.contrib.openai_agents import ModelActivityParameters, OpenAIAgentsPlugin from temporalio.worker import Worker -from temporalio.client import ClientConfig from openai_agents.workflows.agents_as_tools_workflow import AgentsAsToolsWorkflow from openai_agents.workflows.customer_service_workflow import CustomerServiceWorkflow @@ -26,10 +22,10 @@ async def main(): plugins=[ OpenAIAgentsPlugin( model_params=ModelActivityParameters( - start_to_close_timeout=timedelta(seconds=120) + start_to_close_timeout=timedelta(seconds=120) ) ), - ] + ], ) worker = Worker( From 38375caed75c0cdeb44a18341eb83dc684537f3f Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 10:03:52 -0700 Subject: [PATCH 03/15] reference main branch --- pyproject.toml | 4 +- uv.lock | 212 +++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 189 insertions(+), 27 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 620bbc52..51468517 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,8 +55,8 @@ open-telemetry = [ "opentelemetry-exporter-otlp-proto-grpc", ] openai-agents = [ - "openai-agents >= 0.0.19", - "temporalio[openai-agents] >= 1.14.1", + "openai-agents[litellm] >= 0.2.3", + "temporalio[openai-agents] @ https://github.com/temporalio/sdk-python.git", ] pydantic-converter = ["pydantic>=2.10.6,<3"] sentry = ["sentry-sdk>=1.11.0,<2"] diff --git a/uv.lock b/uv.lock index 99aaf356..a6c60497 100644 --- a/uv.lock +++ b/uv.lock @@ -435,6 +435,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e5/47/d63c60f59a59467fda0f93f46335c9d18526d7071f025cb5b89d5353ea42/fastapi-0.116.1-py3-none-any.whl", hash = "sha256:c46ac7c312df840f0c9e220f7964bada936781bc4e2e6eb71f1c4d7553786565", size = 95631, upload-time = "2025-07-11T16:22:30.485Z" }, ] +[[package]] +name = "filelock" +version = "3.18.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/10/c23352565a6544bdc5353e0b15fc1c563352101f30e24bf500207a54df9a/filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2", size = 18075, upload-time = "2025-03-14T07:11:40.47Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de", size = 16215, upload-time = "2025-03-14T07:11:39.145Z" }, +] + [[package]] name = "frozenlist" version = "1.7.0" @@ -529,6 +538,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ee/45/b82e3c16be2182bff01179db177fe144d58b5dc787a7d4492c6ed8b9317f/frozenlist-1.7.0-py3-none-any.whl", hash = "sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e", size = 13106, upload-time = "2025-06-09T23:02:34.204Z" }, ] +[[package]] +name = "fsspec" +version = "2025.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8b/02/0835e6ab9cfc03916fe3f78c0956cfcdb6ff2669ffa6651065d5ebf7fc98/fsspec-2025.7.0.tar.gz", hash = "sha256:786120687ffa54b8283d942929540d8bc5ccfa820deb555a2b5d0ed2b737bf58", size = 304432, upload-time = "2025-07-15T16:05:21.19Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/e0/014d5d9d7a4564cf1c40b5039bc882db69fd881111e03ab3657ac0b218e2/fsspec-2025.7.0-py3-none-any.whl", hash = "sha256:8b012e39f63c7d5f10474de957f3ab793b47b45ae7d39f2fb735f8bbe25c0e21", size = 199597, upload-time = "2025-07-15T16:05:19.529Z" }, +] + [[package]] name = "gevent" version = "25.4.2" @@ -709,6 +727,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, ] +[[package]] +name = "hf-xet" +version = "1.1.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ed/d4/7685999e85945ed0d7f0762b686ae7015035390de1161dcea9d5276c134c/hf_xet-1.1.5.tar.gz", hash = "sha256:69ebbcfd9ec44fdc2af73441619eeb06b94ee34511bbcf57cd423820090f5694", size = 495969, upload-time = "2025-06-20T21:48:38.007Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/89/a1119eebe2836cb25758e7661d6410d3eae982e2b5e974bcc4d250be9012/hf_xet-1.1.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:f52c2fa3635b8c37c7764d8796dfa72706cc4eded19d638331161e82b0792e23", size = 2687929, upload-time = "2025-06-20T21:48:32.284Z" }, + { url = "https://files.pythonhosted.org/packages/de/5f/2c78e28f309396e71ec8e4e9304a6483dcbc36172b5cea8f291994163425/hf_xet-1.1.5-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:9fa6e3ee5d61912c4a113e0708eaaef987047616465ac7aa30f7121a48fc1af8", size = 2556338, upload-time = "2025-06-20T21:48:30.079Z" }, + { url = "https://files.pythonhosted.org/packages/6d/2f/6cad7b5fe86b7652579346cb7f85156c11761df26435651cbba89376cd2c/hf_xet-1.1.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc874b5c843e642f45fd85cda1ce599e123308ad2901ead23d3510a47ff506d1", size = 3102894, upload-time = "2025-06-20T21:48:28.114Z" }, + { url = "https://files.pythonhosted.org/packages/d0/54/0fcf2b619720a26fbb6cc941e89f2472a522cd963a776c089b189559447f/hf_xet-1.1.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dbba1660e5d810bd0ea77c511a99e9242d920790d0e63c0e4673ed36c4022d18", size = 3002134, upload-time = "2025-06-20T21:48:25.906Z" }, + { url = "https://files.pythonhosted.org/packages/f3/92/1d351ac6cef7c4ba8c85744d37ffbfac2d53d0a6c04d2cabeba614640a78/hf_xet-1.1.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ab34c4c3104133c495785d5d8bba3b1efc99de52c02e759cf711a91fd39d3a14", size = 3171009, upload-time = "2025-06-20T21:48:33.987Z" }, + { url = "https://files.pythonhosted.org/packages/c9/65/4b2ddb0e3e983f2508528eb4501288ae2f84963586fbdfae596836d5e57a/hf_xet-1.1.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:83088ecea236d5113de478acb2339f92c95b4fb0462acaa30621fac02f5a534a", size = 3279245, upload-time = "2025-06-20T21:48:36.051Z" }, + { url = "https://files.pythonhosted.org/packages/f0/55/ef77a85ee443ae05a9e9cba1c9f0dd9241eb42da2aeba1dc50f51154c81a/hf_xet-1.1.5-cp37-abi3-win_amd64.whl", hash = "sha256:73e167d9807d166596b4b2f0b585c6d5bd84a26dea32843665a8b58f6edba245", size = 2738931, upload-time = "2025-06-20T21:48:39.482Z" }, +] + [[package]] name = "httpcore" version = "1.0.9" @@ -782,6 +815,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/25/0a/6269e3473b09aed2dab8aa1a600c70f31f00ae1349bee30658f7e358a159/httpx_sse-0.4.1-py3-none-any.whl", hash = "sha256:cba42174344c3a5b06f255ce65b350880f962d99ead85e776f23c6618a377a37", size = 8054, upload-time = "2025-06-24T13:21:04.772Z" }, ] +[[package]] +name = "huggingface-hub" +version = "0.34.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock" }, + { name = "fsspec" }, + { name = "hf-xet", marker = "platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" }, + { name = "packaging" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/cd/841bc8e0550d69f632a15cdd70004e95ba92cd0fbe13087d6669e2bb5f44/huggingface_hub-0.34.1.tar.gz", hash = "sha256:6978ed89ef981de3c78b75bab100a214843be1cc9d24f8e9c0dc4971808ef1b1", size = 456783, upload-time = "2025-07-25T14:54:54.758Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8c/cf/dd53c0132f50f258b06dd37a4616817b1f1f6a6b38382c06effd04bb6881/huggingface_hub-0.34.1-py3-none-any.whl", hash = "sha256:60d843dcb7bc335145b20e7d2f1dfe93910f6787b2b38a936fb772ce2a83757c", size = 558788, upload-time = "2025-07-25T14:54:52.957Z" }, +] + [[package]] name = "idna" version = "3.10" @@ -821,6 +873,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d1/b3/8def84f539e7d2289a02f0524b944b15d7c75dab7628bedf1c4f0992029c/isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6", size = 92310, upload-time = "2023-12-13T20:37:23.244Z" }, ] +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + [[package]] name = "jiter" version = "0.10.0" @@ -1054,6 +1118,28 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/de/f0/63b06b99b730b9954f8709f6f7d9b8d076fa0a973e472efe278089bde42b/langsmith-0.1.147-py3-none-any.whl", hash = "sha256:7166fc23b965ccf839d64945a78e9f1157757add228b086141eb03a60d699a15", size = 311812, upload-time = "2024-11-27T17:32:39.569Z" }, ] +[[package]] +name = "litellm" +version = "1.74.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "click" }, + { name = "httpx" }, + { name = "importlib-metadata" }, + { name = "jinja2" }, + { name = "jsonschema" }, + { name = "openai" }, + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "tiktoken" }, + { name = "tokenizers" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/16/89c5123c808cbc51e398afc2f1a56da1d75d5e8ef7be417895a3794f0416/litellm-1.74.8.tar.gz", hash = "sha256:6e0a18aecf62459d465ee6d9a2526fcb33719a595b972500519abe95fe4906e0", size = 9639701, upload-time = "2025-07-23T23:38:02.903Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/4a/eba1b617acb7fa597d169cdd1b5ce98502bd179138f130721a2367d2deb8/litellm-1.74.8-py3-none-any.whl", hash = "sha256:f9433207d1e12e545495e5960fe02d93e413ecac4a28225c522488e1ab1157a1", size = 8713698, upload-time = "2025-07-23T23:38:00.708Z" }, +] + [[package]] name = "markdown-it-py" version = "3.0.0" @@ -1066,6 +1152,64 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528, upload-time = "2023-06-03T06:41:11.019Z" }, ] +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/90/d08277ce111dd22f77149fd1a5d4653eeb3b3eaacbdfcbae5afb2600eebd/MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", size = 14357, upload-time = "2024-10-18T15:20:51.44Z" }, + { url = "https://files.pythonhosted.org/packages/04/e1/6e2194baeae0bca1fae6629dc0cbbb968d4d941469cbab11a3872edff374/MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", size = 12393, upload-time = "2024-10-18T15:20:52.426Z" }, + { url = "https://files.pythonhosted.org/packages/1d/69/35fa85a8ece0a437493dc61ce0bb6d459dcba482c34197e3efc829aa357f/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", size = 21732, upload-time = "2024-10-18T15:20:53.578Z" }, + { url = "https://files.pythonhosted.org/packages/22/35/137da042dfb4720b638d2937c38a9c2df83fe32d20e8c8f3185dbfef05f7/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", size = 20866, upload-time = "2024-10-18T15:20:55.06Z" }, + { url = "https://files.pythonhosted.org/packages/29/28/6d029a903727a1b62edb51863232152fd335d602def598dade38996887f0/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", size = 20964, upload-time = "2024-10-18T15:20:55.906Z" }, + { url = "https://files.pythonhosted.org/packages/cc/cd/07438f95f83e8bc028279909d9c9bd39e24149b0d60053a97b2bc4f8aa51/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", size = 21977, upload-time = "2024-10-18T15:20:57.189Z" }, + { url = "https://files.pythonhosted.org/packages/29/01/84b57395b4cc062f9c4c55ce0df7d3108ca32397299d9df00fedd9117d3d/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", size = 21366, upload-time = "2024-10-18T15:20:58.235Z" }, + { url = "https://files.pythonhosted.org/packages/bd/6e/61ebf08d8940553afff20d1fb1ba7294b6f8d279df9fd0c0db911b4bbcfd/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", size = 21091, upload-time = "2024-10-18T15:20:59.235Z" }, + { url = "https://files.pythonhosted.org/packages/11/23/ffbf53694e8c94ebd1e7e491de185124277964344733c45481f32ede2499/MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50", size = 15065, upload-time = "2024-10-18T15:21:00.307Z" }, + { url = "https://files.pythonhosted.org/packages/44/06/e7175d06dd6e9172d4a69a72592cb3f7a996a9c396eee29082826449bbc3/MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", size = 15514, upload-time = "2024-10-18T15:21:01.122Z" }, + { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353, upload-time = "2024-10-18T15:21:02.187Z" }, + { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392, upload-time = "2024-10-18T15:21:02.941Z" }, + { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984, upload-time = "2024-10-18T15:21:03.953Z" }, + { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120, upload-time = "2024-10-18T15:21:06.495Z" }, + { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032, upload-time = "2024-10-18T15:21:07.295Z" }, + { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057, upload-time = "2024-10-18T15:21:08.073Z" }, + { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359, upload-time = "2024-10-18T15:21:09.318Z" }, + { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306, upload-time = "2024-10-18T15:21:10.185Z" }, + { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094, upload-time = "2024-10-18T15:21:11.005Z" }, + { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521, upload-time = "2024-10-18T15:21:12.911Z" }, + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274, upload-time = "2024-10-18T15:21:13.777Z" }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348, upload-time = "2024-10-18T15:21:14.822Z" }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149, upload-time = "2024-10-18T15:21:15.642Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118, upload-time = "2024-10-18T15:21:17.133Z" }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993, upload-time = "2024-10-18T15:21:18.064Z" }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178, upload-time = "2024-10-18T15:21:18.859Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319, upload-time = "2024-10-18T15:21:19.671Z" }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352, upload-time = "2024-10-18T15:21:20.971Z" }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097, upload-time = "2024-10-18T15:21:22.646Z" }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601, upload-time = "2024-10-18T15:21:23.499Z" }, + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274, upload-time = "2024-10-18T15:21:24.577Z" }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352, upload-time = "2024-10-18T15:21:25.382Z" }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122, upload-time = "2024-10-18T15:21:26.199Z" }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085, upload-time = "2024-10-18T15:21:27.029Z" }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978, upload-time = "2024-10-18T15:21:27.846Z" }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208, upload-time = "2024-10-18T15:21:28.744Z" }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357, upload-time = "2024-10-18T15:21:29.545Z" }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344, upload-time = "2024-10-18T15:21:30.366Z" }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101, upload-time = "2024-10-18T15:21:31.207Z" }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603, upload-time = "2024-10-18T15:21:32.032Z" }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510, upload-time = "2024-10-18T15:21:33.625Z" }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486, upload-time = "2024-10-18T15:21:34.611Z" }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480, upload-time = "2024-10-18T15:21:35.398Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914, upload-time = "2024-10-18T15:21:36.231Z" }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796, upload-time = "2024-10-18T15:21:37.073Z" }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473, upload-time = "2024-10-18T15:21:37.932Z" }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114, upload-time = "2024-10-18T15:21:39.799Z" }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098, upload-time = "2024-10-18T15:21:40.813Z" }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208, upload-time = "2024-10-18T15:21:41.814Z" }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739, upload-time = "2024-10-18T15:21:42.784Z" }, +] + [[package]] name = "marshmallow" version = "3.26.1" @@ -1262,14 +1406,10 @@ wheels = [ [[package]] name = "nexus-rpc" version = "1.1.0" -source = { registry = "https://pypi.org/simple" } +source = { git = "https://github.com/nexus-rpc/sdk-python.git?rev=35f574c711193a6e2560d3e6665732a5bb7ae92c#35f574c711193a6e2560d3e6665732a5bb7ae92c" } dependencies = [ { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ef/66/540687556bd28cf1ec370cc6881456203dfddb9dab047b8979c6865b5984/nexus_rpc-1.1.0.tar.gz", hash = "sha256:d65ad6a2f54f14e53ebe39ee30555eaeb894102437125733fb13034a04a44553", size = 77383, upload-time = "2025-07-07T19:03:58.368Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/2f/9e9d0dcaa4c6ffa22b7aa31069a8a264c753ff8027b36af602cce038c92f/nexus_rpc-1.1.0-py3-none-any.whl", hash = "sha256:d1b007af2aba186a27e736f8eaae39c03aed05b488084ff6c3d1785c9ba2ad38", size = 27743, upload-time = "2025-07-07T19:03:57.556Z" }, -] [[package]] name = "nodeenv" @@ -1314,7 +1454,7 @@ wheels = [ [[package]] name = "openai" -version = "1.95.0" +version = "1.97.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1326,14 +1466,14 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ef/2f/0c6f509a1585545962bfa6e201d7fb658eb2a6f52fb8c26765632d91706c/openai-1.95.0.tar.gz", hash = "sha256:54bc42df9f7142312647dd485d34cca5df20af825fa64a30ca55164be2cf4cc9", size = 488144, upload-time = "2025-07-10T18:35:49.946Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/57/1c471f6b3efb879d26686d31582997615e969f3bb4458111c9705e56332e/openai-1.97.1.tar.gz", hash = "sha256:a744b27ae624e3d4135225da9b1c89c107a2a7e5bc4c93e5b7b5214772ce7a4e", size = 494267, upload-time = "2025-07-22T13:10:12.607Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/19/a5/57d0bb58b938a3e3f352ff26e645da1660436402a6ad1b29780d261cc5a5/openai-1.95.0-py3-none-any.whl", hash = "sha256:a7afc9dca7e7d616371842af8ea6dbfbcb739a85d183f5f664ab1cc311b9ef18", size = 755572, upload-time = "2025-07-10T18:35:47.507Z" }, + { url = "https://files.pythonhosted.org/packages/ee/35/412a0e9c3f0d37c94ed764b8ac7adae2d834dbd20e69f6aca582118e0f55/openai-1.97.1-py3-none-any.whl", hash = "sha256:4e96bbdf672ec3d44968c9ea39d2c375891db1acc1794668d8149d5fa6000606", size = 764380, upload-time = "2025-07-22T13:10:10.689Z" }, ] [[package]] name = "openai-agents" -version = "0.1.0" +version = "0.2.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "griffe" }, @@ -1344,9 +1484,14 @@ dependencies = [ { name = "types-requests" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/99/f8/a292d8f506997355755d88db619966539ec838ce18f070c5a101e5a430ec/openai_agents-0.1.0.tar.gz", hash = "sha256:a697a4fdd881a7a16db8c0dcafba0f17d9e90b6236a4b79923bd043b6ae86d80", size = 1379588, upload-time = "2025-06-27T20:58:03.186Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/17/1f9eefb99fde956e5912a00fbdd03d50ebc734cc45a80b8fe4007d3813c2/openai_agents-0.2.3.tar.gz", hash = "sha256:95d4ad194c5c0cf1a40038cb701eee8ecdaaf7698d87bb13e3c2c5cff80c4b4d", size = 1464947, upload-time = "2025-07-21T19:34:20.595Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/31/5b/326e6b1b661dbef718977a8379f9702a4eec1df772450517870beeb3af35/openai_agents-0.1.0-py3-none-any.whl", hash = "sha256:6a8ef71d3f20aecba0f01bca2e059590d1c23f5adc02d780cb5921ea8a7ca774", size = 130620, upload-time = "2025-06-27T20:58:01.461Z" }, + { url = "https://files.pythonhosted.org/packages/eb/a7/d6bdf69a54c15d237a2be979981f33dab8f5da53f9bc2e734fb2b58592ca/openai_agents-0.2.3-py3-none-any.whl", hash = "sha256:15c5602de7076a5df6d11f07a18ffe0cf4f6811f6135b301acdd1998398a6d5c", size = 161393, upload-time = "2025-07-21T19:34:18.883Z" }, +] + +[package.optional-dependencies] +litellm = [ + { name = "litellm" }, ] [[package]] @@ -2448,7 +2593,7 @@ wheels = [ [[package]] name = "temporalio" version = "1.14.1" -source = { registry = "https://pypi.org/simple" } +source = { git = "https://github.com/temporalio/sdk-python.git#fc564c4cf5e3dd090f19624512276393a2ec2096" } dependencies = [ { name = "nexus-rpc" }, { name = "protobuf" }, @@ -2456,14 +2601,6 @@ dependencies = [ { name = "types-protobuf" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/40/23/ef5ed581d26112e21c4a6d4ddc2c4eaa5700c0d70b53b07566553e9b7d90/temporalio-1.14.1.tar.gz", hash = "sha256:b240cf56f64add65beb75bd18aa854ac35bdc2505097af5af1e235d611190a9d", size = 1607639, upload-time = "2025-07-10T20:56:43.485Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/bd/66/6dc4f5a647a9901cf19e012c442173574babdc879ccaf4cb166662a23ef0/temporalio-1.14.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:ebde00b59af72e512e5837445e4b5b8aa445431d57a71bbeb57a5ba8a93ac8be", size = 12508009, upload-time = "2025-07-10T20:56:30.653Z" }, - { url = "https://files.pythonhosted.org/packages/bb/dc/654ebcc92c658180576127ac6dc047fab43b7730f39df4439645e91577fb/temporalio-1.14.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:3c21cff8fdc60fbcc9acd91e6c119b0b5f9de7671fe806459f00d68bd4ecae78", size = 12091653, upload-time = "2025-07-10T20:56:33.199Z" }, - { url = "https://files.pythonhosted.org/packages/8a/58/7fc3a7bde275c059e42d0279c54e8e66642b67be8eda21b31347f4277186/temporalio-1.14.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f984b503ae741213fe71128d6193076f3267691561ff3c55dbe798f92e6ee1b", size = 12451995, upload-time = "2025-07-10T20:56:36.055Z" }, - { url = "https://files.pythonhosted.org/packages/98/12/14f6a7a1f4aebb7d846469f5c1cd165cce55b793ded6ce5fc315bd83e28f/temporalio-1.14.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:830cb1a820624a5e64f6c874b5aca6ad9eb841295407dd2011074159a2d28bdb", size = 12688904, upload-time = "2025-07-10T20:56:38.501Z" }, - { url = "https://files.pythonhosted.org/packages/b4/ed/c09f1ca41d5ed9f9a777a0ddd5bc225f8300bab8b42bc6751195566706fb/temporalio-1.14.1-cp39-abi3-win_amd64.whl", hash = "sha256:ad4e6a16b42bb34aebec62fb8bbe8f64643d8268ed6d7db337dfe98a76799bb0", size = 12758696, upload-time = "2025-07-10T20:56:41.266Z" }, -] [package.optional-dependencies] openai-agents = [ @@ -2533,7 +2670,7 @@ open-telemetry = [ { name = "temporalio", extra = ["opentelemetry"] }, ] openai-agents = [ - { name = "openai-agents" }, + { name = "openai-agents", extra = ["litellm"] }, { name = "temporalio", extra = ["openai-agents"] }, ] pydantic-converter = [ @@ -2554,8 +2691,8 @@ requires-dist = [{ name = "temporalio", specifier = ">=1.14.1,<2" }] bedrock = [{ name = "boto3", specifier = ">=1.34.92,<2" }] cloud-export-to-parquet = [ { name = "boto3", specifier = ">=1.34.89,<2" }, - { name = "numpy", marker = "python_full_version >= '3.9' and python_full_version < '3.13'", specifier = ">=1.26.0,<2" }, - { name = "pandas", marker = "python_full_version >= '3.9' and python_full_version < '4'", specifier = ">=2.2.2,<3" }, + { name = "numpy", marker = "python_full_version >= '3.10' and python_full_version < '3.13'", specifier = ">=1.26.0,<2" }, + { name = "pandas", marker = "python_full_version >= '3.10' and python_full_version < '4'", specifier = ">=2.2.2,<3" }, { name = "pyarrow", specifier = ">=19.0.1" }, ] dev = [ @@ -2595,8 +2732,8 @@ open-telemetry = [ { name = "temporalio", extras = ["opentelemetry"] }, ] openai-agents = [ - { name = "openai-agents", specifier = ">=0.0.19" }, - { name = "temporalio", extras = ["openai-agents"], specifier = ">=1.14.1" }, + { name = "openai-agents", extras = ["litellm"], specifier = ">=0.2.3" }, + { name = "temporalio", extras = ["openai-agents"], git = "https://github.com/temporalio/sdk-python.git" }, ] pydantic-converter = [{ name = "pydantic", specifier = ">=2.10.6,<3" }] sentry = [{ name = "sentry-sdk", specifier = ">=1.11.0,<2" }] @@ -2650,6 +2787,31 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/de/a8/8f499c179ec900783ffe133e9aab10044481679bb9aad78436d239eee716/tiktoken-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:5ea0edb6f83dc56d794723286215918c1cde03712cbbafa0348b33448faf5b95", size = 894669, upload-time = "2025-02-14T06:02:47.341Z" }, ] +[[package]] +name = "tokenizers" +version = "0.21.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "huggingface-hub" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/2d/b0fce2b8201635f60e8c95990080f58461cc9ca3d5026de2e900f38a7f21/tokenizers-0.21.2.tar.gz", hash = "sha256:fdc7cffde3e2113ba0e6cc7318c40e3438a4d74bbc62bf04bcc63bdfb082ac77", size = 351545, upload-time = "2025-06-24T10:24:52.449Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/cc/2936e2d45ceb130a21d929743f1e9897514691bec123203e10837972296f/tokenizers-0.21.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:342b5dfb75009f2255ab8dec0041287260fed5ce00c323eb6bab639066fef8ec", size = 2875206, upload-time = "2025-06-24T10:24:42.755Z" }, + { url = "https://files.pythonhosted.org/packages/6c/e6/33f41f2cc7861faeba8988e7a77601407bf1d9d28fc79c5903f8f77df587/tokenizers-0.21.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:126df3205d6f3a93fea80c7a8a266a78c1bd8dd2fe043386bafdd7736a23e45f", size = 2732655, upload-time = "2025-06-24T10:24:41.56Z" }, + { url = "https://files.pythonhosted.org/packages/33/2b/1791eb329c07122a75b01035b1a3aa22ad139f3ce0ece1b059b506d9d9de/tokenizers-0.21.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a32cd81be21168bd0d6a0f0962d60177c447a1aa1b1e48fa6ec9fc728ee0b12", size = 3019202, upload-time = "2025-06-24T10:24:31.791Z" }, + { url = "https://files.pythonhosted.org/packages/05/15/fd2d8104faa9f86ac68748e6f7ece0b5eb7983c7efc3a2c197cb98c99030/tokenizers-0.21.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8bd8999538c405133c2ab999b83b17c08b7fc1b48c1ada2469964605a709ef91", size = 2934539, upload-time = "2025-06-24T10:24:34.567Z" }, + { url = "https://files.pythonhosted.org/packages/a5/2e/53e8fd053e1f3ffbe579ca5f9546f35ac67cf0039ed357ad7ec57f5f5af0/tokenizers-0.21.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5e9944e61239b083a41cf8fc42802f855e1dca0f499196df37a8ce219abac6eb", size = 3248665, upload-time = "2025-06-24T10:24:39.024Z" }, + { url = "https://files.pythonhosted.org/packages/00/15/79713359f4037aa8f4d1f06ffca35312ac83629da062670e8830917e2153/tokenizers-0.21.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:514cd43045c5d546f01142ff9c79a96ea69e4b5cda09e3027708cb2e6d5762ab", size = 3451305, upload-time = "2025-06-24T10:24:36.133Z" }, + { url = "https://files.pythonhosted.org/packages/38/5f/959f3a8756fc9396aeb704292777b84f02a5c6f25c3fc3ba7530db5feb2c/tokenizers-0.21.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b1b9405822527ec1e0f7d8d2fdb287a5730c3a6518189c968254a8441b21faae", size = 3214757, upload-time = "2025-06-24T10:24:37.784Z" }, + { url = "https://files.pythonhosted.org/packages/c5/74/f41a432a0733f61f3d21b288de6dfa78f7acff309c6f0f323b2833e9189f/tokenizers-0.21.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fed9a4d51c395103ad24f8e7eb976811c57fbec2af9f133df471afcd922e5020", size = 3121887, upload-time = "2025-06-24T10:24:40.293Z" }, + { url = "https://files.pythonhosted.org/packages/3c/6a/bc220a11a17e5d07b0dfb3b5c628621d4dcc084bccd27cfaead659963016/tokenizers-0.21.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2c41862df3d873665ec78b6be36fcc30a26e3d4902e9dd8608ed61d49a48bc19", size = 9091965, upload-time = "2025-06-24T10:24:44.431Z" }, + { url = "https://files.pythonhosted.org/packages/6c/bd/ac386d79c4ef20dc6f39c4706640c24823dca7ebb6f703bfe6b5f0292d88/tokenizers-0.21.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:ed21dc7e624e4220e21758b2e62893be7101453525e3d23264081c9ef9a6d00d", size = 9053372, upload-time = "2025-06-24T10:24:46.455Z" }, + { url = "https://files.pythonhosted.org/packages/63/7b/5440bf203b2a5358f074408f7f9c42884849cd9972879e10ee6b7a8c3b3d/tokenizers-0.21.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:0e73770507e65a0e0e2a1affd6b03c36e3bc4377bd10c9ccf51a82c77c0fe365", size = 9298632, upload-time = "2025-06-24T10:24:48.446Z" }, + { url = "https://files.pythonhosted.org/packages/a4/d2/faa1acac3f96a7427866e94ed4289949b2524f0c1878512516567d80563c/tokenizers-0.21.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:106746e8aa9014a12109e58d540ad5465b4c183768ea96c03cbc24c44d329958", size = 9470074, upload-time = "2025-06-24T10:24:50.378Z" }, + { url = "https://files.pythonhosted.org/packages/d8/a5/896e1ef0707212745ae9f37e84c7d50269411aef2e9ccd0de63623feecdf/tokenizers-0.21.2-cp39-abi3-win32.whl", hash = "sha256:cabda5a6d15d620b6dfe711e1af52205266d05b379ea85a8a301b3593c60e962", size = 2330115, upload-time = "2025-06-24T10:24:55.069Z" }, + { url = "https://files.pythonhosted.org/packages/13/c3/cc2755ee10be859c4338c962a35b9a663788c0c0b50c0bdd8078fb6870cf/tokenizers-0.21.2-cp39-abi3-win_amd64.whl", hash = "sha256:58747bb898acdb1007f37a7bbe614346e98dc28708ffb66a3fd50ce169ac6c98", size = 2509918, upload-time = "2025-06-24T10:24:53.71Z" }, +] + [[package]] name = "tomli" version = "2.2.1" From 92286fd4ec9dd3d5022e2d8920970a3c71469331 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 10:10:17 -0700 Subject: [PATCH 04/15] cleanup --- openai_agents/run_worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openai_agents/run_worker.py b/openai_agents/run_worker.py index 0f3f3e5e..b17fbd0f 100644 --- a/openai_agents/run_worker.py +++ b/openai_agents/run_worker.py @@ -3,7 +3,7 @@ import asyncio from datetime import timedelta -from temporalio.client import Client, ClientConfig +from temporalio.client import Client from temporalio.contrib.openai_agents import ModelActivityParameters, OpenAIAgentsPlugin from temporalio.worker import Worker From 1b6b0260bef3a511a041dca44438ac9e9c0c9f17 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 10:24:19 -0700 Subject: [PATCH 05/15] switch to plugins on the runners --- openai_agents/run_agents_as_tools_workflow.py | 6 ++++-- openai_agents/run_customer_service_client.py | 6 ++++-- openai_agents/run_hello_world_workflow.py | 6 ++++-- openai_agents/run_research_workflow.py | 6 ++++-- openai_agents/run_tools_workflow.py | 6 ++++-- 5 files changed, 20 insertions(+), 10 deletions(-) diff --git a/openai_agents/run_agents_as_tools_workflow.py b/openai_agents/run_agents_as_tools_workflow.py index 9c37beb0..822ca709 100644 --- a/openai_agents/run_agents_as_tools_workflow.py +++ b/openai_agents/run_agents_as_tools_workflow.py @@ -1,7 +1,7 @@ import asyncio from temporalio.client import Client -from temporalio.contrib.pydantic import pydantic_data_converter +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin from openai_agents.workflows.agents_as_tools_workflow import AgentsAsToolsWorkflow @@ -10,7 +10,9 @@ async def main(): # Create client connected to server at the given address client = await Client.connect( "localhost:7233", - data_converter=pydantic_data_converter, + plugins=[ + OpenAIAgentsPlugin(), + ], ) # Execute a workflow diff --git a/openai_agents/run_customer_service_client.py b/openai_agents/run_customer_service_client.py index be081c1e..ef8aaf62 100644 --- a/openai_agents/run_customer_service_client.py +++ b/openai_agents/run_customer_service_client.py @@ -7,7 +7,7 @@ WorkflowUpdateFailedError, ) from temporalio.common import QueryRejectCondition -from temporalio.contrib.pydantic import pydantic_data_converter +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin from temporalio.service import RPCError, RPCStatusCode from openai_agents.workflows.customer_service_workflow import ( @@ -24,7 +24,9 @@ async def main(): # Create client connected to server at the given address client = await Client.connect( "localhost:7233", - data_converter=pydantic_data_converter, + plugins=[ + OpenAIAgentsPlugin(), + ], ) handle = client.get_workflow_handle(args.conversation_id) diff --git a/openai_agents/run_hello_world_workflow.py b/openai_agents/run_hello_world_workflow.py index 566d525a..9ec8833f 100644 --- a/openai_agents/run_hello_world_workflow.py +++ b/openai_agents/run_hello_world_workflow.py @@ -1,7 +1,7 @@ import asyncio from temporalio.client import Client -from temporalio.contrib.pydantic import pydantic_data_converter +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin from openai_agents.workflows.hello_world_workflow import HelloWorldAgent @@ -10,7 +10,9 @@ async def main(): # Create client connected to server at the given address client = await Client.connect( "localhost:7233", - data_converter=pydantic_data_converter, + plugins=[ + OpenAIAgentsPlugin(), + ], ) # Execute a workflow diff --git a/openai_agents/run_research_workflow.py b/openai_agents/run_research_workflow.py index 136874db..5279648e 100644 --- a/openai_agents/run_research_workflow.py +++ b/openai_agents/run_research_workflow.py @@ -1,7 +1,7 @@ import asyncio from temporalio.client import Client -from temporalio.contrib.pydantic import pydantic_data_converter +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin from openai_agents.workflows.research_bot_workflow import ResearchWorkflow @@ -10,7 +10,9 @@ async def main(): # Create client connected to server at the given address client = await Client.connect( "localhost:7233", - data_converter=pydantic_data_converter, + plugins=[ + OpenAIAgentsPlugin(), + ], ) # Execute a workflow diff --git a/openai_agents/run_tools_workflow.py b/openai_agents/run_tools_workflow.py index 2635be6f..cfb3d811 100644 --- a/openai_agents/run_tools_workflow.py +++ b/openai_agents/run_tools_workflow.py @@ -1,7 +1,7 @@ import asyncio from temporalio.client import Client -from temporalio.contrib.pydantic import pydantic_data_converter +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin from openai_agents.workflows.tools_workflow import ToolsWorkflow @@ -10,7 +10,9 @@ async def main(): # Create client connected to server at the given address client = await Client.connect( "localhost:7233", - data_converter=pydantic_data_converter, + plugins=[ + OpenAIAgentsPlugin(), + ], ) # Execute a workflow From c1579413620424a1433cf39f466deb151c459966 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 11:03:07 -0700 Subject: [PATCH 06/15] move around samples --- openai_agents/README.md | 42 +++++++++++++++---- .../run_agents_as_tools_workflow.py | 4 +- openai_agents/agent_patterns/run_worker.py | 39 +++++++++++++++++ .../workflows/agents_as_tools_workflow.py | 0 .../activities}/get_weather_activity.py | 0 .../{ => basic}/run_hello_world_workflow.py | 2 +- .../{ => basic}/run_tools_workflow.py | 2 +- openai_agents/{ => basic}/run_worker.py | 14 ++----- .../workflows/hello_world_workflow.py | 0 .../{ => basic}/workflows/tools_workflow.py | 2 +- .../customer_service.py | 0 .../run_customer_service_client.py | 2 +- openai_agents/customer_service/run_worker.py | 39 +++++++++++++++++ .../workflows/customer_service_workflow.py | 2 +- .../agents}/planner_agent.py | 0 .../agents}/research_manager.py | 6 +-- .../agents}/search_agent.py | 0 .../agents}/writer_agent.py | 0 .../run_research_workflow.py | 2 +- openai_agents/research_bot/run_worker.py | 37 ++++++++++++++++ .../workflows/research_bot_workflow.py | 2 +- openai_agents/workflows/__init__.py | 0 .../workflows/research_agents/__init__.py | 0 23 files changed, 165 insertions(+), 30 deletions(-) rename openai_agents/{ => agent_patterns}/run_agents_as_tools_workflow.py (85%) create mode 100644 openai_agents/agent_patterns/run_worker.py rename openai_agents/{ => agent_patterns}/workflows/agents_as_tools_workflow.py (100%) rename openai_agents/{workflows => basic/activities}/get_weather_activity.py (100%) rename openai_agents/{ => basic}/run_hello_world_workflow.py (89%) rename openai_agents/{ => basic}/run_tools_workflow.py (89%) rename openai_agents/{ => basic}/run_worker.py (58%) rename openai_agents/{ => basic}/workflows/hello_world_workflow.py (100%) rename openai_agents/{ => basic}/workflows/tools_workflow.py (90%) rename openai_agents/{workflows => customer_service}/customer_service.py (100%) rename openai_agents/{ => customer_service}/run_customer_service_client.py (96%) create mode 100644 openai_agents/customer_service/run_worker.py rename openai_agents/{ => customer_service}/workflows/customer_service_workflow.py (98%) rename openai_agents/{workflows/research_agents => research_bot/agents}/planner_agent.py (100%) rename openai_agents/{workflows/research_agents => research_bot/agents}/research_manager.py (91%) rename openai_agents/{workflows/research_agents => research_bot/agents}/search_agent.py (100%) rename openai_agents/{workflows/research_agents => research_bot/agents}/writer_agent.py (100%) rename openai_agents/{ => research_bot}/run_research_workflow.py (88%) create mode 100644 openai_agents/research_bot/run_worker.py rename openai_agents/{ => research_bot}/workflows/research_bot_workflow.py (68%) delete mode 100644 openai_agents/workflows/__init__.py delete mode 100644 openai_agents/workflows/research_agents/__init__.py diff --git a/openai_agents/README.md b/openai_agents/README.md index 1eceb747..84af1e82 100644 --- a/openai_agents/README.md +++ b/openai_agents/README.md @@ -25,7 +25,7 @@ This approach ensures that AI agent workflows are durable, observable, and can h 1. **Start the worker** (supports all samples): ```bash - uv run openai_agents/run_worker.py + uv run openai_agents/basic/run_worker.py ``` 2. **Run individual samples** in separate terminals: @@ -34,29 +34,53 @@ This approach ensures that AI agent workflows are durable, observable, and can h - **Hello World Agent** - Simple agent that responds in haikus: ```bash - uv run openai_agents/run_hello_world_workflow.py + uv run openai_agents/basic/run_hello_world_workflow.py ``` - **Tools Agent** - Agent with access to external tools (weather API): ```bash - uv run openai_agents/run_tools_workflow.py + uv run openai_agents/basic/run_tools_workflow.py + ``` + +### Agent Patterns + +Start the worker: + + ```bash + uv run openai_agents/agent_patterns/run_worker.py + ``` + +- **Agents as Tools** - Demonstrate using agents as tools within other agents: + + ```bash + uv run openai_agents/agent_patterns/run_agents_as_tools_workflow.py ``` ### Advanced Multi-Agent Examples - **Research Workflow** - Multi-agent research system with specialized roles: + + Start the worker: + + ```bash + uv run openai_agents/research_bot/run_worker.py + ``` + ```bash - uv run openai_agents/run_research_workflow.py + uv run openai_agents/research_bot/run_research_workflow.py ``` Features a planner agent, search agent, and writer agent working together. - **Customer Service Workflow** - Customer service agent with escalation capabilities (interactive): - ```bash - uv run openai_agents/run_customer_service_client.py --conversation-id my-conversation-123 - ``` -- **Agents as Tools** - Demonstrate using agents as tools within other agents: + Start the worker: + + ```bash + uv run openai_agents/customer_service/run_worker.py + ``` + ```bash - uv run openai_agents/run_agents_as_tools_workflow.py + uv run openai_agents/customer_service/run_customer_service_client.py --conversation-id my-conversation-123 ``` + diff --git a/openai_agents/run_agents_as_tools_workflow.py b/openai_agents/agent_patterns/run_agents_as_tools_workflow.py similarity index 85% rename from openai_agents/run_agents_as_tools_workflow.py rename to openai_agents/agent_patterns/run_agents_as_tools_workflow.py index 822ca709..a42d3aac 100644 --- a/openai_agents/run_agents_as_tools_workflow.py +++ b/openai_agents/agent_patterns/run_agents_as_tools_workflow.py @@ -3,7 +3,9 @@ from temporalio.client import Client from temporalio.contrib.openai_agents import OpenAIAgentsPlugin -from openai_agents.workflows.agents_as_tools_workflow import AgentsAsToolsWorkflow +from openai_agents.agent_patterns.workflows.agents_as_tools_workflow import ( + AgentsAsToolsWorkflow, +) async def main(): diff --git a/openai_agents/agent_patterns/run_worker.py b/openai_agents/agent_patterns/run_worker.py new file mode 100644 index 00000000..3dc336f1 --- /dev/null +++ b/openai_agents/agent_patterns/run_worker.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +import asyncio +from datetime import timedelta + +from temporalio.client import Client +from temporalio.contrib.openai_agents import ModelActivityParameters, OpenAIAgentsPlugin +from temporalio.worker import Worker + +from openai_agents.agent_patterns.workflows.agents_as_tools_workflow import ( + AgentsAsToolsWorkflow, +) + + +async def main(): + # Create client connected to server at the given address + client = await Client.connect( + "localhost:7233", + plugins=[ + OpenAIAgentsPlugin( + model_params=ModelActivityParameters( + start_to_close_timeout=timedelta(seconds=120) + ) + ), + ], + ) + + worker = Worker( + client, + task_queue="openai-agents-task-queue", + workflows=[ + AgentsAsToolsWorkflow, + ], + ) + await worker.run() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/openai_agents/workflows/agents_as_tools_workflow.py b/openai_agents/agent_patterns/workflows/agents_as_tools_workflow.py similarity index 100% rename from openai_agents/workflows/agents_as_tools_workflow.py rename to openai_agents/agent_patterns/workflows/agents_as_tools_workflow.py diff --git a/openai_agents/workflows/get_weather_activity.py b/openai_agents/basic/activities/get_weather_activity.py similarity index 100% rename from openai_agents/workflows/get_weather_activity.py rename to openai_agents/basic/activities/get_weather_activity.py diff --git a/openai_agents/run_hello_world_workflow.py b/openai_agents/basic/run_hello_world_workflow.py similarity index 89% rename from openai_agents/run_hello_world_workflow.py rename to openai_agents/basic/run_hello_world_workflow.py index 9ec8833f..412dbb58 100644 --- a/openai_agents/run_hello_world_workflow.py +++ b/openai_agents/basic/run_hello_world_workflow.py @@ -3,7 +3,7 @@ from temporalio.client import Client from temporalio.contrib.openai_agents import OpenAIAgentsPlugin -from openai_agents.workflows.hello_world_workflow import HelloWorldAgent +from openai_agents.basic.workflows.hello_world_workflow import HelloWorldAgent async def main(): diff --git a/openai_agents/run_tools_workflow.py b/openai_agents/basic/run_tools_workflow.py similarity index 89% rename from openai_agents/run_tools_workflow.py rename to openai_agents/basic/run_tools_workflow.py index cfb3d811..eb6adcc1 100644 --- a/openai_agents/run_tools_workflow.py +++ b/openai_agents/basic/run_tools_workflow.py @@ -3,7 +3,7 @@ from temporalio.client import Client from temporalio.contrib.openai_agents import OpenAIAgentsPlugin -from openai_agents.workflows.tools_workflow import ToolsWorkflow +from openai_agents.basic.workflows.tools_workflow import ToolsWorkflow async def main(): diff --git a/openai_agents/run_worker.py b/openai_agents/basic/run_worker.py similarity index 58% rename from openai_agents/run_worker.py rename to openai_agents/basic/run_worker.py index b17fbd0f..ec94e907 100644 --- a/openai_agents/run_worker.py +++ b/openai_agents/basic/run_worker.py @@ -7,12 +7,9 @@ from temporalio.contrib.openai_agents import ModelActivityParameters, OpenAIAgentsPlugin from temporalio.worker import Worker -from openai_agents.workflows.agents_as_tools_workflow import AgentsAsToolsWorkflow -from openai_agents.workflows.customer_service_workflow import CustomerServiceWorkflow -from openai_agents.workflows.get_weather_activity import get_weather -from openai_agents.workflows.hello_world_workflow import HelloWorldAgent -from openai_agents.workflows.research_bot_workflow import ResearchWorkflow -from openai_agents.workflows.tools_workflow import ToolsWorkflow +from openai_agents.basic.activities.get_weather_activity import get_weather +from openai_agents.basic.workflows.hello_world_workflow import HelloWorldAgent +from openai_agents.basic.workflows.tools_workflow import ToolsWorkflow async def main(): @@ -22,7 +19,7 @@ async def main(): plugins=[ OpenAIAgentsPlugin( model_params=ModelActivityParameters( - start_to_close_timeout=timedelta(seconds=120) + start_to_close_timeout=timedelta(seconds=30) ) ), ], @@ -34,9 +31,6 @@ async def main(): workflows=[ HelloWorldAgent, ToolsWorkflow, - ResearchWorkflow, - CustomerServiceWorkflow, - AgentsAsToolsWorkflow, ], activities=[ get_weather, diff --git a/openai_agents/workflows/hello_world_workflow.py b/openai_agents/basic/workflows/hello_world_workflow.py similarity index 100% rename from openai_agents/workflows/hello_world_workflow.py rename to openai_agents/basic/workflows/hello_world_workflow.py diff --git a/openai_agents/workflows/tools_workflow.py b/openai_agents/basic/workflows/tools_workflow.py similarity index 90% rename from openai_agents/workflows/tools_workflow.py rename to openai_agents/basic/workflows/tools_workflow.py index c9f80e9f..70964dc0 100644 --- a/openai_agents/workflows/tools_workflow.py +++ b/openai_agents/basic/workflows/tools_workflow.py @@ -6,7 +6,7 @@ from temporalio import workflow from temporalio.contrib import openai_agents as temporal_agents -from openai_agents.workflows.get_weather_activity import get_weather +from openai_agents.basic.activities.get_weather_activity import get_weather @workflow.defn diff --git a/openai_agents/workflows/customer_service.py b/openai_agents/customer_service/customer_service.py similarity index 100% rename from openai_agents/workflows/customer_service.py rename to openai_agents/customer_service/customer_service.py diff --git a/openai_agents/run_customer_service_client.py b/openai_agents/customer_service/run_customer_service_client.py similarity index 96% rename from openai_agents/run_customer_service_client.py rename to openai_agents/customer_service/run_customer_service_client.py index ef8aaf62..e66419e4 100644 --- a/openai_agents/run_customer_service_client.py +++ b/openai_agents/customer_service/run_customer_service_client.py @@ -10,7 +10,7 @@ from temporalio.contrib.openai_agents import OpenAIAgentsPlugin from temporalio.service import RPCError, RPCStatusCode -from openai_agents.workflows.customer_service_workflow import ( +from openai_agents.customer_service.workflows.customer_service_workflow import ( CustomerServiceWorkflow, ProcessUserMessageInput, ) diff --git a/openai_agents/customer_service/run_worker.py b/openai_agents/customer_service/run_worker.py new file mode 100644 index 00000000..4062148e --- /dev/null +++ b/openai_agents/customer_service/run_worker.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +import asyncio +from datetime import timedelta + +from temporalio.client import Client +from temporalio.contrib.openai_agents import ModelActivityParameters, OpenAIAgentsPlugin +from temporalio.worker import Worker + +from openai_agents.customer_service.workflows.customer_service_workflow import ( + CustomerServiceWorkflow, +) + + +async def main(): + # Create client connected to server at the given address + client = await Client.connect( + "localhost:7233", + plugins=[ + OpenAIAgentsPlugin( + model_params=ModelActivityParameters( + start_to_close_timeout=timedelta(seconds=120) + ) + ), + ], + ) + + worker = Worker( + client, + task_queue="openai-agents-task-queue", + workflows=[ + CustomerServiceWorkflow, + ], + ) + await worker.run() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/openai_agents/workflows/customer_service_workflow.py b/openai_agents/customer_service/workflows/customer_service_workflow.py similarity index 98% rename from openai_agents/workflows/customer_service_workflow.py rename to openai_agents/customer_service/workflows/customer_service_workflow.py index 3186c5ab..c816d868 100644 --- a/openai_agents/workflows/customer_service_workflow.py +++ b/openai_agents/customer_service/workflows/customer_service_workflow.py @@ -14,7 +14,7 @@ ) from temporalio import workflow -from openai_agents.workflows.customer_service import ( +from openai_agents.customer_service.customer_service import ( AirlineAgentContext, ProcessUserMessageInput, init_agents, diff --git a/openai_agents/workflows/research_agents/planner_agent.py b/openai_agents/research_bot/agents/planner_agent.py similarity index 100% rename from openai_agents/workflows/research_agents/planner_agent.py rename to openai_agents/research_bot/agents/planner_agent.py diff --git a/openai_agents/workflows/research_agents/research_manager.py b/openai_agents/research_bot/agents/research_manager.py similarity index 91% rename from openai_agents/workflows/research_agents/research_manager.py rename to openai_agents/research_bot/agents/research_manager.py index 356da1d7..62a77a07 100644 --- a/openai_agents/workflows/research_agents/research_manager.py +++ b/openai_agents/research_bot/agents/research_manager.py @@ -8,13 +8,13 @@ # TODO: Restore progress updates from agents import RunConfig, Runner, custom_span, trace - from openai_agents.workflows.research_agents.planner_agent import ( + from openai_agents.research_bot.agents.planner_agent import ( WebSearchItem, WebSearchPlan, new_planner_agent, ) - from openai_agents.workflows.research_agents.search_agent import new_search_agent - from openai_agents.workflows.research_agents.writer_agent import ( + from openai_agents.research_bot.agents.search_agent import new_search_agent + from openai_agents.research_bot.agents.writer_agent import ( ReportData, new_writer_agent, ) diff --git a/openai_agents/workflows/research_agents/search_agent.py b/openai_agents/research_bot/agents/search_agent.py similarity index 100% rename from openai_agents/workflows/research_agents/search_agent.py rename to openai_agents/research_bot/agents/search_agent.py diff --git a/openai_agents/workflows/research_agents/writer_agent.py b/openai_agents/research_bot/agents/writer_agent.py similarity index 100% rename from openai_agents/workflows/research_agents/writer_agent.py rename to openai_agents/research_bot/agents/writer_agent.py diff --git a/openai_agents/run_research_workflow.py b/openai_agents/research_bot/run_research_workflow.py similarity index 88% rename from openai_agents/run_research_workflow.py rename to openai_agents/research_bot/run_research_workflow.py index 5279648e..e2739ef5 100644 --- a/openai_agents/run_research_workflow.py +++ b/openai_agents/research_bot/run_research_workflow.py @@ -3,7 +3,7 @@ from temporalio.client import Client from temporalio.contrib.openai_agents import OpenAIAgentsPlugin -from openai_agents.workflows.research_bot_workflow import ResearchWorkflow +from openai_agents.research_bot.workflows.research_bot_workflow import ResearchWorkflow async def main(): diff --git a/openai_agents/research_bot/run_worker.py b/openai_agents/research_bot/run_worker.py new file mode 100644 index 00000000..fd6827d6 --- /dev/null +++ b/openai_agents/research_bot/run_worker.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +import asyncio +from datetime import timedelta + +from temporalio.client import Client +from temporalio.contrib.openai_agents import ModelActivityParameters, OpenAIAgentsPlugin +from temporalio.worker import Worker + +from openai_agents.research_bot.workflows.research_bot_workflow import ResearchWorkflow + + +async def main(): + # Create client connected to server at the given address + client = await Client.connect( + "localhost:7233", + plugins=[ + OpenAIAgentsPlugin( + model_params=ModelActivityParameters( + start_to_close_timeout=timedelta(seconds=120) + ) + ), + ], + ) + + worker = Worker( + client, + task_queue="openai-agents-task-queue", + workflows=[ + ResearchWorkflow, + ], + ) + await worker.run() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/openai_agents/workflows/research_bot_workflow.py b/openai_agents/research_bot/workflows/research_bot_workflow.py similarity index 68% rename from openai_agents/workflows/research_bot_workflow.py rename to openai_agents/research_bot/workflows/research_bot_workflow.py index c0779c02..c2523c8c 100644 --- a/openai_agents/workflows/research_bot_workflow.py +++ b/openai_agents/research_bot/workflows/research_bot_workflow.py @@ -1,6 +1,6 @@ from temporalio import workflow -from openai_agents.workflows.research_agents.research_manager import ResearchManager +from openai_agents.research_bot.agents.research_manager import ResearchManager @workflow.defn diff --git a/openai_agents/workflows/__init__.py b/openai_agents/workflows/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/openai_agents/workflows/research_agents/__init__.py b/openai_agents/workflows/research_agents/__init__.py deleted file mode 100644 index e69de29b..00000000 From 815c14be146a479e36d7c68dad4211413639b1be Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 11:22:29 -0700 Subject: [PATCH 07/15] update README files --- openai_agents/README.md | 66 ++++------------------- openai_agents/agent_patterns/README.md | 68 ++++++++++++++++++++++++ openai_agents/basic/README.md | 25 +++++++++ openai_agents/customer_service/README.md | 21 ++++++++ openai_agents/research_bot/README.md | 35 ++++++++++++ 5 files changed, 159 insertions(+), 56 deletions(-) create mode 100644 openai_agents/agent_patterns/README.md create mode 100644 openai_agents/basic/README.md create mode 100644 openai_agents/customer_service/README.md create mode 100644 openai_agents/research_bot/README.md diff --git a/openai_agents/README.md b/openai_agents/README.md index 84af1e82..4eba715a 100644 --- a/openai_agents/README.md +++ b/openai_agents/README.md @@ -21,66 +21,20 @@ This approach ensures that AI agent workflows are durable, observable, and can h - Required dependencies installed via `uv sync --group openai-agents` - OpenAI API key set as environment variable: `export OPENAI_API_KEY=your_key_here` -## Running the Examples +## Examples -1. **Start the worker** (supports all samples): - ```bash - uv run openai_agents/basic/run_worker.py - ``` +Each directory contains a complete example with its own README for detailed instructions: -2. **Run individual samples** in separate terminals: +### [Basic Examples](./basic/README.md) +Simple agent examples including a hello world agent and a tools-enabled agent that can access external APIs like weather services. -### Basic Agent Examples +### [Agent Patterns](./agent_patterns/README.md) +Advanced patterns for agent composition, including using agents as tools within other agents. -- **Hello World Agent** - Simple agent that responds in haikus: - ```bash - uv run openai_agents/basic/run_hello_world_workflow.py - ``` +### [Research Bot](./research_bot/README.md) +Multi-agent research system with specialized roles: a planner agent, search agent, and writer agent working together to conduct comprehensive research. -- **Tools Agent** - Agent with access to external tools (weather API): - ```bash - uv run openai_agents/basic/run_tools_workflow.py - ``` - -### Agent Patterns - -Start the worker: - - ```bash - uv run openai_agents/agent_patterns/run_worker.py - ``` - -- **Agents as Tools** - Demonstrate using agents as tools within other agents: - - ```bash - uv run openai_agents/agent_patterns/run_agents_as_tools_workflow.py - ``` - -### Advanced Multi-Agent Examples - -- **Research Workflow** - Multi-agent research system with specialized roles: - - Start the worker: - - ```bash - uv run openai_agents/research_bot/run_worker.py - ``` - - ```bash - uv run openai_agents/research_bot/run_research_workflow.py - ``` - Features a planner agent, search agent, and writer agent working together. - -- **Customer Service Workflow** - Customer service agent with escalation capabilities (interactive): - - Start the worker: - - ```bash - uv run openai_agents/customer_service/run_worker.py - ``` - - ```bash - uv run openai_agents/customer_service/run_customer_service_client.py --conversation-id my-conversation-123 - ``` +### [Customer Service](./customer_service/README.md) +Interactive customer service agent with escalation capabilities, demonstrating conversational workflows. diff --git a/openai_agents/agent_patterns/README.md b/openai_agents/agent_patterns/README.md new file mode 100644 index 00000000..26867f10 --- /dev/null +++ b/openai_agents/agent_patterns/README.md @@ -0,0 +1,68 @@ +# Agent Patterns + +Common agentic patterns extended with Temporal's durable execution capabilities. + +*Adapted from [OpenAI Agents SDK agent patterns](https://github.com/openai/openai-agents-python/tree/main/examples/agent_patterns)* + +## Running the Examples + +First, start the worker (supports all patterns): +```bash +uv run openai_agents/agent_patterns/run_worker.py +``` + +Then run individual examples in separate terminals: + +## Deterministic Flows + +**TODO** + +A common tactic is to break down a task into a series of smaller steps. Each task can be performed by an agent, and the output of one agent is used as input to the next. For example, if your task was to generate a story, you could break it down into the following steps: + +1. Generate an outline +2. Generate the story +3. Generate the ending + +Each of these steps can be performed by an agent. The output of one agent is used as input to the next. + +## Handoffs and Routing + +**TODO** + +In many situations, you have specialized sub-agents that handle specific tasks. You can use handoffs to route the task to the right agent. + +For example, you might have a frontline agent that receives a request, and then hands off to a specialized agent based on the language of the request. + +## Agents as Tools + +The mental model for handoffs is that the new agent "takes over". It sees the previous conversation history, and owns the conversation from that point onwards. However, this is not the only way to use agents. You can also use agents as a tool - the tool agent goes off and runs on its own, and then returns the result to the original agent. + +For example, you could model a translation task as tool calls instead: rather than handing over to the language-specific agent, you could call the agent as a tool, and then use the result in the next step. This enables things like translating multiple languages at once. + +```bash +uv run openai_agents/agent_patterns/run_agents_as_tools_workflow.py +``` + +## LLM-as-a-Judge + +**TODO** + +LLMs can often improve the quality of their output if given feedback. A common pattern is to generate a response using a model, and then use a second model to provide feedback. You can even use a small model for the initial generation and a larger model for the feedback, to optimize cost. + +For example, you could use an LLM to generate an outline for a story, and then use a second LLM to evaluate the outline and provide feedback. You can then use the feedback to improve the outline, and repeat until the LLM is satisfied with the outline. + +## Parallelization + +**TODO** + +Running multiple agents in parallel is a common pattern. This can be useful for both latency (e.g. if you have multiple steps that don't depend on each other) and also for other reasons e.g. generating multiple responses and picking the best one. + +## Guardrails + +**TODO** + +Related to parallelization, you often want to run input guardrails to make sure the inputs to your agents are valid. For example, if you have a customer support agent, you might want to make sure that the user isn't trying to ask for help with a math problem. + +You can definitely do this without any special Agents SDK features by using parallelization, but we support a special guardrail primitive. Guardrails can have a "tripwire" - if the tripwire is triggered, the agent execution will immediately stop and a `GuardrailTripwireTriggered` exception will be raised. + +This is really useful for latency: for example, you might have a very fast model that runs the guardrail and a slow model that runs the actual agent. You wouldn't want to wait for the slow model to finish, so guardrails let you quickly reject invalid inputs. \ No newline at end of file diff --git a/openai_agents/basic/README.md b/openai_agents/basic/README.md new file mode 100644 index 00000000..56d9f528 --- /dev/null +++ b/openai_agents/basic/README.md @@ -0,0 +1,25 @@ +# Basic Agent Examples + +Simple examples to get started with OpenAI Agents SDK integrated with Temporal workflows. + +*Adapted from [OpenAI Agents SDK basic examples](https://github.com/openai/openai-agents-python/tree/main/examples/basic)* + +## Running the Examples + +First, start the worker (supports all basic examples): +```bash +uv run openai_agents/basic/run_worker.py +``` + +Then run individual examples in separate terminals: + +### Hello World Agent +```bash +uv run openai_agents/basic/run_hello_world_workflow.py +``` + +### Tools Agent +Agent with access to external tools (weather API): +```bash +uv run openai_agents/basic/run_tools_workflow.py +``` \ No newline at end of file diff --git a/openai_agents/customer_service/README.md b/openai_agents/customer_service/README.md new file mode 100644 index 00000000..34b9504d --- /dev/null +++ b/openai_agents/customer_service/README.md @@ -0,0 +1,21 @@ +# Customer Service + +Interactive customer service agent with escalation capabilities, extended with Temporal's durable conversational workflows. + +*Adapted from [OpenAI Agents SDK customer service](https://github.com/openai/openai-agents-python/tree/main/examples/customer_service)* + +This example demonstrates how to build persistent, stateful conversations where each conversation maintains state across multiple interactions and can survive system restarts and failures. + +## Running the Example + +First, start the worker: +```bash +uv run openai_agents/customer_service/run_worker.py +``` + +Then start a customer service conversation: +```bash +uv run openai_agents/customer_service/run_customer_service_client.py --conversation-id my-conversation-123 +``` + +You can start a new conversation with any unique conversation ID, or resume existing conversations by using the same conversation ID. The conversation state is persisted in the Temporal workflow, allowing you to resume conversations even after restarting the client. \ No newline at end of file diff --git a/openai_agents/research_bot/README.md b/openai_agents/research_bot/README.md new file mode 100644 index 00000000..f4f4a074 --- /dev/null +++ b/openai_agents/research_bot/README.md @@ -0,0 +1,35 @@ +# Research Bot + +Multi-agent research system with specialized roles, extended with Temporal's durable execution. + +*Adapted from [OpenAI Agents SDK research bot](https://github.com/openai/openai-agents-python/tree/main/examples/research_bot)* + +## Architecture + +The flow is: + +1. User enters their research topic +2. `planner_agent` comes up with a plan to search the web for information. The plan is a list of search queries, with a search term and a reason for each query. +3. For each search item, we run a `search_agent`, which uses the Web Search tool to search for that term and summarize the results. These all run in parallel. +4. Finally, the `writer_agent` receives the search summaries, and creates a written report. + +## Running the Example + +First, start the worker: +```bash +uv run openai_agents/research_bot/run_worker.py +``` + +Then run the research workflow: +```bash +uv run openai_agents/research_bot/run_research_workflow.py +``` + +## Suggested Improvements + +If you're building your own research bot, some ideas to add to this are: + +1. Retrieval: Add support for fetching relevant information from a vector store. You could use the File Search tool for this. +2. Image and file upload: Allow users to attach PDFs or other files, as baseline context for the research. +3. More planning and thinking: Models often produce better results given more time to think. Improve the planning process to come up with a better plan, and add an evaluation step so that the model can choose to improve its results, search for more stuff, etc. +4. Code execution: Allow running code, which is useful for data analysis. \ No newline at end of file From 6c9213f07c8c56b0d40ef3a95002064f9987f70a Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 11:27:22 -0700 Subject: [PATCH 08/15] formatting update --- openai_agents/README.md | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/openai_agents/README.md b/openai_agents/README.md index 4eba715a..72433b1a 100644 --- a/openai_agents/README.md +++ b/openai_agents/README.md @@ -25,16 +25,12 @@ This approach ensures that AI agent workflows are durable, observable, and can h Each directory contains a complete example with its own README for detailed instructions: -### [Basic Examples](./basic/README.md) -Simple agent examples including a hello world agent and a tools-enabled agent that can access external APIs like weather services. +- **[Basic Examples](./basic/README.md)** - Simple agent examples including a hello world agent and a tools-enabled agent that can access external APIs like weather services. -### [Agent Patterns](./agent_patterns/README.md) -Advanced patterns for agent composition, including using agents as tools within other agents. +- **[Agent Patterns](./agent_patterns/README.md)** - Advanced patterns for agent composition, including using agents as tools within other agents. -### [Research Bot](./research_bot/README.md) -Multi-agent research system with specialized roles: a planner agent, search agent, and writer agent working together to conduct comprehensive research. +- **[Research Bot](./research_bot/README.md)** - Multi-agent research system with specialized roles: a planner agent, search agent, and writer agent working together to conduct comprehensive research. -### [Customer Service](./customer_service/README.md) -Interactive customer service agent with escalation capabilities, demonstrating conversational workflows. +- **[Customer Service](./customer_service/README.md)** - Interactive customer service agent with escalation capabilities, demonstrating conversational workflows. From 923a91961a0493f35bc1da067e05bd0099351453 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 11:27:58 -0700 Subject: [PATCH 09/15] formatting --- openai_agents/README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/openai_agents/README.md b/openai_agents/README.md index 72433b1a..96975ec2 100644 --- a/openai_agents/README.md +++ b/openai_agents/README.md @@ -26,11 +26,8 @@ This approach ensures that AI agent workflows are durable, observable, and can h Each directory contains a complete example with its own README for detailed instructions: - **[Basic Examples](./basic/README.md)** - Simple agent examples including a hello world agent and a tools-enabled agent that can access external APIs like weather services. - - **[Agent Patterns](./agent_patterns/README.md)** - Advanced patterns for agent composition, including using agents as tools within other agents. - - **[Research Bot](./research_bot/README.md)** - Multi-agent research system with specialized roles: a planner agent, search agent, and writer agent working together to conduct comprehensive research. - - **[Customer Service](./customer_service/README.md)** - Interactive customer service agent with escalation capabilities, demonstrating conversational workflows. From 396fb5c8714db053e572bcdcb1099f247c5595e5 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 11:32:32 -0700 Subject: [PATCH 10/15] timeout adjustments --- openai_agents/agent_patterns/run_worker.py | 2 +- openai_agents/customer_service/run_worker.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/openai_agents/agent_patterns/run_worker.py b/openai_agents/agent_patterns/run_worker.py index 3dc336f1..3ca0eda2 100644 --- a/openai_agents/agent_patterns/run_worker.py +++ b/openai_agents/agent_patterns/run_worker.py @@ -19,7 +19,7 @@ async def main(): plugins=[ OpenAIAgentsPlugin( model_params=ModelActivityParameters( - start_to_close_timeout=timedelta(seconds=120) + start_to_close_timeout=timedelta(seconds=30) ) ), ], diff --git a/openai_agents/customer_service/run_worker.py b/openai_agents/customer_service/run_worker.py index 4062148e..b82f6919 100644 --- a/openai_agents/customer_service/run_worker.py +++ b/openai_agents/customer_service/run_worker.py @@ -19,7 +19,7 @@ async def main(): plugins=[ OpenAIAgentsPlugin( model_params=ModelActivityParameters( - start_to_close_timeout=timedelta(seconds=120) + start_to_close_timeout=timedelta(seconds=30) ) ), ], From 5e4e4864a22d672cbba38c3caad54fb5463ab6b1 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 16:18:25 -0700 Subject: [PATCH 11/15] Port financial research example and reasoning_content example --- .../financial_research_agent/README.md | 61 ++++++++ .../agents/financials_agent.py | 23 +++ .../agents/planner_agent.py | 35 +++++ .../agents/risk_agent.py | 22 +++ .../agents/search_agent.py | 20 +++ .../agents/verifier_agent.py | 27 ++++ .../agents/writer_agent.py | 34 ++++ .../financial_research_manager.py | 145 ++++++++++++++++++ .../run_financial_research_workflow.py | 40 +++++ .../financial_research_agent/run_worker.py | 32 ++++ .../workflows/financial_research_workflow.py | 13 ++ openai_agents/reasoning_content/README.md | 37 +++++ .../activities/reasoning_activities.py | 52 +++++++ .../run_reasoning_content_workflow.py | 54 +++++++ openai_agents/reasoning_content/run_worker.py | 37 +++++ .../workflows/reasoning_content_workflow.py | 32 ++++ 16 files changed, 664 insertions(+) create mode 100644 openai_agents/financial_research_agent/README.md create mode 100644 openai_agents/financial_research_agent/agents/financials_agent.py create mode 100644 openai_agents/financial_research_agent/agents/planner_agent.py create mode 100644 openai_agents/financial_research_agent/agents/risk_agent.py create mode 100644 openai_agents/financial_research_agent/agents/search_agent.py create mode 100644 openai_agents/financial_research_agent/agents/verifier_agent.py create mode 100644 openai_agents/financial_research_agent/agents/writer_agent.py create mode 100644 openai_agents/financial_research_agent/financial_research_manager.py create mode 100644 openai_agents/financial_research_agent/run_financial_research_workflow.py create mode 100644 openai_agents/financial_research_agent/run_worker.py create mode 100644 openai_agents/financial_research_agent/workflows/financial_research_workflow.py create mode 100644 openai_agents/reasoning_content/README.md create mode 100644 openai_agents/reasoning_content/activities/reasoning_activities.py create mode 100644 openai_agents/reasoning_content/run_reasoning_content_workflow.py create mode 100644 openai_agents/reasoning_content/run_worker.py create mode 100644 openai_agents/reasoning_content/workflows/reasoning_content_workflow.py diff --git a/openai_agents/financial_research_agent/README.md b/openai_agents/financial_research_agent/README.md new file mode 100644 index 00000000..fed8e5b2 --- /dev/null +++ b/openai_agents/financial_research_agent/README.md @@ -0,0 +1,61 @@ +# Financial Research Agent + +Multi-agent financial research system with specialized roles, extended with Temporal's durable execution. + +*Adapted from [OpenAI Agents SDK financial research agent](https://github.com/openai/openai-agents-python/tree/main/examples/financial_research_agent)* + +## Architecture + +This example shows how you might compose a richer financial research agent using the Agents SDK. The pattern is similar to the `research_bot` example, but with more specialized sub-agents and a verification step. + +The flow is: + +1. **Planning**: A planner agent turns the end user's request into a list of search terms relevant to financial analysis – recent news, earnings calls, corporate filings, industry commentary, etc. +2. **Search**: A search agent uses the built-in `WebSearchTool` to retrieve terse summaries for each search term. (You could also add `FileSearchTool` if you have indexed PDFs or 10-Ks.) +3. **Sub-analysts**: Additional agents (e.g. a fundamentals analyst and a risk analyst) are exposed as tools so the writer can call them inline and incorporate their outputs. +4. **Writing**: A senior writer agent brings together the search snippets and any sub-analyst summaries into a long-form markdown report plus a short executive summary. +5. **Verification**: A final verifier agent audits the report for obvious inconsistencies or missing sourcing. + +## Running the Example + +First, start the worker: +```bash +uv run openai_agents/financial_research_agent/run_worker.py +``` + +Then run the financial research workflow: +```bash +uv run openai_agents/financial_research_agent/run_financial_research_workflow.py +``` + +Enter a query like: +``` +Write up an analysis of Apple Inc.'s most recent quarter. +``` + +You can also just hit enter to run this query, which is provided as the default. + +## Components + +### Agents + +- **Planner Agent**: Creates a search plan with 5-15 relevant search terms +- **Search Agent**: Uses web search to gather financial information +- **Financials Agent**: Analyzes company fundamentals (revenue, profit, margins) +- **Risk Agent**: Identifies potential red flags and risk factors +- **Writer Agent**: Synthesizes information into a comprehensive report +- **Verifier Agent**: Audits the final report for consistency and accuracy + +### Writer Agent Tools + +The writer agent has access to tools that invoke the specialist analysts: +- `fundamentals_analysis`: Get financial performance analysis +- `risk_analysis`: Get risk factor assessment + +## Temporal Integration + +The example demonstrates several Temporal patterns: +- Durable execution of multi-step research workflows +- Parallel execution of web searches using `asyncio.create_task` +- Use of `workflow.as_completed` for handling concurrent tasks +- Proper import handling with `workflow.unsafe.imports_passed_through()` diff --git a/openai_agents/financial_research_agent/agents/financials_agent.py b/openai_agents/financial_research_agent/agents/financials_agent.py new file mode 100644 index 00000000..72a2be95 --- /dev/null +++ b/openai_agents/financial_research_agent/agents/financials_agent.py @@ -0,0 +1,23 @@ +from agents import Agent +from pydantic import BaseModel + +# A sub-agent focused on analyzing a company's fundamentals. +FINANCIALS_PROMPT = ( + "You are a financial analyst focused on company fundamentals such as revenue, " + "profit, margins and growth trajectory. Given a collection of web (and optional file) " + "search results about a company, write a concise analysis of its recent financial " + "performance. Pull out key metrics or quotes. Keep it under 2 paragraphs." +) + + +class AnalysisSummary(BaseModel): + summary: str + """Short text summary for this aspect of the analysis.""" + + +def new_financials_agent() -> Agent: + return Agent( + name="FundamentalsAnalystAgent", + instructions=FINANCIALS_PROMPT, + output_type=AnalysisSummary, + ) diff --git a/openai_agents/financial_research_agent/agents/planner_agent.py b/openai_agents/financial_research_agent/agents/planner_agent.py new file mode 100644 index 00000000..8c7ffcb9 --- /dev/null +++ b/openai_agents/financial_research_agent/agents/planner_agent.py @@ -0,0 +1,35 @@ +from agents import Agent +from pydantic import BaseModel + +# Generate a plan of searches to ground the financial analysis. +# For a given financial question or company, we want to search for +# recent news, official filings, analyst commentary, and other +# relevant background. +PROMPT = ( + "You are a financial research planner. Given a request for financial analysis, " + "produce a set of web searches to gather the context needed. Aim for recent " + "headlines, earnings calls or 10-K snippets, analyst commentary, and industry background. " + "Output between 5 and 15 search terms to query for." +) + + +class FinancialSearchItem(BaseModel): + reason: str + """Your reasoning for why this search is relevant.""" + + query: str + """The search term to feed into a web (or file) search.""" + + +class FinancialSearchPlan(BaseModel): + searches: list[FinancialSearchItem] + """A list of searches to perform.""" + + +def new_planner_agent() -> Agent: + return Agent( + name="FinancialPlannerAgent", + instructions=PROMPT, + model="o3-mini", + output_type=FinancialSearchPlan, + ) diff --git a/openai_agents/financial_research_agent/agents/risk_agent.py b/openai_agents/financial_research_agent/agents/risk_agent.py new file mode 100644 index 00000000..c73e94ef --- /dev/null +++ b/openai_agents/financial_research_agent/agents/risk_agent.py @@ -0,0 +1,22 @@ +from agents import Agent +from pydantic import BaseModel + +# A sub-agent specializing in identifying risk factors or concerns. +RISK_PROMPT = ( + "You are a risk analyst looking for potential red flags in a company's outlook. " + "Given background research, produce a short analysis of risks such as competitive threats, " + "regulatory issues, supply chain problems, or slowing growth. Keep it under 2 paragraphs." +) + + +class AnalysisSummary(BaseModel): + summary: str + """Short text summary for this aspect of the analysis.""" + + +def new_risk_agent() -> Agent: + return Agent( + name="RiskAnalystAgent", + instructions=RISK_PROMPT, + output_type=AnalysisSummary, + ) diff --git a/openai_agents/financial_research_agent/agents/search_agent.py b/openai_agents/financial_research_agent/agents/search_agent.py new file mode 100644 index 00000000..e40e357e --- /dev/null +++ b/openai_agents/financial_research_agent/agents/search_agent.py @@ -0,0 +1,20 @@ +from agents import Agent, WebSearchTool +from agents.model_settings import ModelSettings + +# Given a search term, use web search to pull back a brief summary. +# Summaries should be concise but capture the main financial points. +INSTRUCTIONS = ( + "You are a research assistant specializing in financial topics. " + "Given a search term, use web search to retrieve up-to-date context and " + "produce a short summary of at most 300 words. Focus on key numbers, events, " + "or quotes that will be useful to a financial analyst." +) + + +def new_search_agent() -> Agent: + return Agent( + name="FinancialSearchAgent", + instructions=INSTRUCTIONS, + tools=[WebSearchTool()], + model_settings=ModelSettings(tool_choice="required"), + ) diff --git a/openai_agents/financial_research_agent/agents/verifier_agent.py b/openai_agents/financial_research_agent/agents/verifier_agent.py new file mode 100644 index 00000000..9d3f0a01 --- /dev/null +++ b/openai_agents/financial_research_agent/agents/verifier_agent.py @@ -0,0 +1,27 @@ +from agents import Agent +from pydantic import BaseModel + +# Agent to sanity-check a synthesized report for consistency and recall. +# This can be used to flag potential gaps or obvious mistakes. +VERIFIER_PROMPT = ( + "You are a meticulous auditor. You have been handed a financial analysis report. " + "Your job is to verify the report is internally consistent, clearly sourced, and makes " + "no unsupported claims. Point out any issues or uncertainties." +) + + +class VerificationResult(BaseModel): + verified: bool + """Whether the report seems coherent and plausible.""" + + issues: str + """If not verified, describe the main issues or concerns.""" + + +def new_verifier_agent() -> Agent: + return Agent( + name="VerificationAgent", + instructions=VERIFIER_PROMPT, + model="gpt-4o", + output_type=VerificationResult, + ) diff --git a/openai_agents/financial_research_agent/agents/writer_agent.py b/openai_agents/financial_research_agent/agents/writer_agent.py new file mode 100644 index 00000000..4f38c1c9 --- /dev/null +++ b/openai_agents/financial_research_agent/agents/writer_agent.py @@ -0,0 +1,34 @@ +from agents import Agent +from pydantic import BaseModel + +# Writer agent brings together the raw search results and optionally calls out +# to sub-analyst tools for specialized commentary, then returns a cohesive markdown report. +WRITER_PROMPT = ( + "You are a senior financial analyst. You will be provided with the original query and " + "a set of raw search summaries. Your task is to synthesize these into a long-form markdown " + "report (at least several paragraphs) including a short executive summary and follow-up " + "questions. If needed, you can call the available analysis tools (e.g. fundamentals_analysis, " + "risk_analysis) to get short specialist write-ups to incorporate." +) + + +class FinancialReportData(BaseModel): + short_summary: str + """A short 2-3 sentence executive summary.""" + + markdown_report: str + """The full markdown report.""" + + follow_up_questions: list[str] + """Suggested follow-up questions for further research.""" + + +# Note: We will attach tools to specialist analyst agents at runtime in the manager. +# This shows how an agent can use tools to delegate to specialized subagents. +def new_writer_agent() -> Agent: + return Agent( + name="FinancialWriterAgent", + instructions=WRITER_PROMPT, + model="gpt-4.1-mini", + output_type=FinancialReportData, + ) diff --git a/openai_agents/financial_research_agent/financial_research_manager.py b/openai_agents/financial_research_agent/financial_research_manager.py new file mode 100644 index 00000000..8405c75e --- /dev/null +++ b/openai_agents/financial_research_agent/financial_research_manager.py @@ -0,0 +1,145 @@ +from __future__ import annotations + +import asyncio +from collections.abc import Sequence + +from temporalio import workflow + +from agents import RunConfig, Runner, RunResult, custom_span, trace + +from openai_agents.financial_research_agent.agents.financials_agent import ( + new_financials_agent, +) +from openai_agents.financial_research_agent.agents.planner_agent import ( + FinancialSearchItem, + FinancialSearchPlan, + new_planner_agent, +) +from openai_agents.financial_research_agent.agents.risk_agent import new_risk_agent +from openai_agents.financial_research_agent.agents.search_agent import ( + new_search_agent, +) +from openai_agents.financial_research_agent.agents.verifier_agent import ( + VerificationResult, + new_verifier_agent, +) +from openai_agents.financial_research_agent.agents.writer_agent import ( + FinancialReportData, + new_writer_agent, +) + + +async def _summary_extractor(run_result: RunResult) -> str: + """Custom output extractor for sub-agents that return an AnalysisSummary.""" + # The financial/risk analyst agents emit an AnalysisSummary with a `summary` field. + # We want the tool call to return just that summary text so the writer can drop it inline. + return str(run_result.final_output.summary) + + +class FinancialResearchManager: + """ + Orchestrates the full flow: planning, searching, sub-analysis, writing, and verification. + """ + + def __init__(self) -> None: + self.run_config = RunConfig() + self.planner_agent = new_planner_agent() + self.search_agent = new_search_agent() + self.financials_agent = new_financials_agent() + self.risk_agent = new_risk_agent() + self.writer_agent = new_writer_agent() + self.verifier_agent = new_verifier_agent() + + async def run(self, query: str) -> str: + with trace("Financial research trace"): + search_plan = await self._plan_searches(query) + search_results = await self._perform_searches(search_plan) + report = await self._write_report(query, search_results) + verification = await self._verify_report(report) + + # Return formatted output + result = f"""=====REPORT===== + +{report.markdown_report} + +=====FOLLOW UP QUESTIONS===== + +{chr(10).join(report.follow_up_questions)} + +=====VERIFICATION===== + +Verified: {verification.verified} +Issues: {verification.issues}""" + + return result + + async def _plan_searches(self, query: str) -> FinancialSearchPlan: + result = await Runner.run( + self.planner_agent, + f"Query: {query}", + run_config=self.run_config, + ) + return result.final_output_as(FinancialSearchPlan) + + async def _perform_searches( + self, search_plan: FinancialSearchPlan + ) -> Sequence[str]: + with custom_span("Search the web"): + tasks = [ + asyncio.create_task(self._search(item)) for item in search_plan.searches + ] + results: list[str] = [] + for task in workflow.as_completed(tasks): + result = await task + if result is not None: + results.append(result) + return results + + async def _search(self, item: FinancialSearchItem) -> str | None: + input_data = f"Search term: {item.query}\nReason: {item.reason}" + try: + result = await Runner.run( + self.search_agent, + input_data, + run_config=self.run_config, + ) + return str(result.final_output) + except Exception: + return None + + async def _write_report( + self, query: str, search_results: Sequence[str] + ) -> FinancialReportData: + # Expose the specialist analysts as tools so the writer can invoke them inline + # and still produce the final FinancialReportData output. + fundamentals_tool = self.financials_agent.as_tool( + tool_name="fundamentals_analysis", + tool_description="Use to get a short write-up of key financial metrics", + custom_output_extractor=_summary_extractor, + ) + risk_tool = self.risk_agent.as_tool( + tool_name="risk_analysis", + tool_description="Use to get a short write-up of potential red flags", + custom_output_extractor=_summary_extractor, + ) + writer_with_tools = self.writer_agent.clone( + tools=[fundamentals_tool, risk_tool] + ) + + input_data = ( + f"Original query: {query}\nSummarized search results: {search_results}" + ) + result = await Runner.run( + writer_with_tools, + input_data, + run_config=self.run_config, + ) + return result.final_output_as(FinancialReportData) + + async def _verify_report(self, report: FinancialReportData) -> VerificationResult: + result = await Runner.run( + self.verifier_agent, + report.markdown_report, + run_config=self.run_config, + ) + return result.final_output_as(VerificationResult) diff --git a/openai_agents/financial_research_agent/run_financial_research_workflow.py b/openai_agents/financial_research_agent/run_financial_research_workflow.py new file mode 100644 index 00000000..7105686b --- /dev/null +++ b/openai_agents/financial_research_agent/run_financial_research_workflow.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 + +import asyncio + +from temporalio.client import Client +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin + +from openai_agents.financial_research_agent.workflows.financial_research_workflow import ( + FinancialResearchWorkflow, +) + + +async def main(): + # Get the query from user input + query = input("Enter a financial research query: ") + if not query.strip(): + query = "Write up an analysis of Apple Inc.'s most recent quarter." + print(f"Using default query: {query}") + + client = await Client.connect("localhost:7233", + plugins=[ + OpenAIAgentsPlugin(), + ], + ) + + print(f"Starting financial research for: {query}") + print("This may take several minutes to complete...\n") + + result = await client.execute_workflow( + FinancialResearchWorkflow.run, + query, + id=f"financial-research-{hash(query)}", + task_queue="financial-research-task-queue", + ) + + print(result) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/openai_agents/financial_research_agent/run_worker.py b/openai_agents/financial_research_agent/run_worker.py new file mode 100644 index 00000000..e2439ccc --- /dev/null +++ b/openai_agents/financial_research_agent/run_worker.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 + +import asyncio + +from temporalio.client import Client +from temporalio.worker import Worker + +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin + +from openai_agents.financial_research_agent.workflows.financial_research_workflow import ( + FinancialResearchWorkflow, +) + +async def main(): + client = await Client.connect("localhost:7233", + plugins=[ + OpenAIAgentsPlugin(), + ], + ) + + worker = Worker( + client, + task_queue="financial-research-task-queue", + workflows=[FinancialResearchWorkflow], + ) + + print("Starting financial research worker...") + await worker.run() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/openai_agents/financial_research_agent/workflows/financial_research_workflow.py b/openai_agents/financial_research_agent/workflows/financial_research_workflow.py new file mode 100644 index 00000000..487e3fd5 --- /dev/null +++ b/openai_agents/financial_research_agent/workflows/financial_research_workflow.py @@ -0,0 +1,13 @@ +from temporalio import workflow + +from openai_agents.financial_research_agent.financial_research_manager import ( + FinancialResearchManager, +) + + +@workflow.defn +class FinancialResearchWorkflow: + @workflow.run + async def run(self, query: str) -> str: + manager = FinancialResearchManager() + return await manager.run(query) diff --git a/openai_agents/reasoning_content/README.md b/openai_agents/reasoning_content/README.md new file mode 100644 index 00000000..c654d266 --- /dev/null +++ b/openai_agents/reasoning_content/README.md @@ -0,0 +1,37 @@ +# Reasoning Content + +Example demonstrating how to use the reasoning content feature with models that support it, running in the context of Temporal's durable execution. + +*Adapted from [OpenAI Agents SDK reasoning content](https://github.com/openai/openai-agents-python/tree/main/examples/reasoning_content)* + +## Overview + +Some models, like deepseek-reasoner, provide a reasoning_content field in addition to the regular content. This example shows how to access and use this reasoning content within Temporal workflows. The reasoning content contains the model's step-by-step thinking process before providing the final answer. + +## Architecture + +This example uses an activity to handle the OpenAI model calls. The workflow orchestrates the process by calling the `get_reasoning_response` activity, which uses the OpenAI provider to get a response from a reasoning-capable model and extracts both reasoning content and regular content. + +The model calls are run in an activity rather than directly in the workflow because Temporal's the involve I/O. + +## Running the Example + +First, start the worker: +```bash +uv run openai_agents/reasoning_content/run_worker.py +``` + +Then run the reasoning content workflow: +```bash +uv run openai_agents/reasoning_content/run_reasoning_content_workflow.py +``` + +## Requirements + +- Set your `OPENAI_API_KEY` environment variable +- Use a model that supports reasoning content (e.g., `deepseek-reasoner`) +- Optionally set `EXAMPLE_MODEL_NAME` environment variable to specify the model + +## Note on Streaming + +The original OpenAI Agents SDK example includes streaming capabilities, but since Temporal workflows do not support streaming yet, this example contains only the non-streaming approach. \ No newline at end of file diff --git a/openai_agents/reasoning_content/activities/reasoning_activities.py b/openai_agents/reasoning_content/activities/reasoning_activities.py new file mode 100644 index 00000000..b111a742 --- /dev/null +++ b/openai_agents/reasoning_content/activities/reasoning_activities.py @@ -0,0 +1,52 @@ +import os +from typing import Any, cast + +from agents import ModelSettings +from agents.models.interface import ModelTracing +from agents.models.openai_provider import OpenAIProvider +from openai.types.responses import ResponseOutputRefusal, ResponseOutputText +from temporalio import activity + + +@activity.defn +async def get_reasoning_response( + prompt: str, model_name: str | None = None +) -> tuple[str | None, str | None]: + """ + Activity to get response from a reasoning-capable model. + Returns tuple of (reasoning_content, regular_content). + """ + model_name = model_name or os.getenv("EXAMPLE_MODEL_NAME") or "deepseek-reasoner" + + provider = OpenAIProvider() + model = provider.get_model(model_name) + + response = await model.get_response( + system_instructions="You are a helpful assistant that explains your reasoning step by step.", + input=prompt, + model_settings=ModelSettings(), + tools=[], + output_schema=None, + handoffs=[], + tracing=ModelTracing.DISABLED, + previous_response_id=None, + prompt=None, + ) + + # Extract reasoning content and regular content from the response + reasoning_content = None + regular_content = None + + for item in response.output: + if hasattr(item, "type") and item.type == "reasoning": + reasoning_content = item.summary[0].text + elif hasattr(item, "type") and item.type == "message": + if item.content and len(item.content) > 0: + content_item = item.content[0] + if isinstance(content_item, ResponseOutputText): + regular_content = content_item.text + elif isinstance(content_item, ResponseOutputRefusal): + refusal_item = cast(Any, content_item) + regular_content = refusal_item.refusal + + return reasoning_content, regular_content diff --git a/openai_agents/reasoning_content/run_reasoning_content_workflow.py b/openai_agents/reasoning_content/run_reasoning_content_workflow.py new file mode 100644 index 00000000..a6a64318 --- /dev/null +++ b/openai_agents/reasoning_content/run_reasoning_content_workflow.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 + +import asyncio +import os + +from temporalio.client import Client +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin + +from openai_agents.reasoning_content.workflows.reasoning_content_workflow import ( + ReasoningContentWorkflow, + ReasoningResult, +) + + +async def main(): + client = await Client.connect( + "localhost:7233", + plugins=[ + OpenAIAgentsPlugin(), + ], + ) + + # Demo prompts that benefit from reasoning + demo_prompts = [ + "What is the square root of 841? Please explain your reasoning.", + "Explain the concept of recursion in programming", + "Write a haiku about recursion in programming", + ] + + model_name = os.getenv("EXAMPLE_MODEL_NAME") or "deepseek-reasoner" + print(f"Using model: {model_name}") + print("Note: This example requires a model that supports reasoning content.") + print("You may need to use a specific model like deepseek-reasoner or similar.\n") + + for i, prompt in enumerate(demo_prompts, 1): + print(f"=== Example {i}: {prompt} ===") + + result: ReasoningResult = await client.execute_workflow( + ReasoningContentWorkflow.run, + args=[prompt, model_name], + id=f"reasoning-content-{i}", + task_queue="reasoning-content-task-queue", + ) + + print(f"\nPrompt: {result.prompt}") + print(f"\nReasoning Content:") + print(result.reasoning_content or "No reasoning content provided") + print(f"\nRegular Content:") + print(result.regular_content or "No regular content provided") + print("-" * 50 + "\n") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/openai_agents/reasoning_content/run_worker.py b/openai_agents/reasoning_content/run_worker.py new file mode 100644 index 00000000..51393b2e --- /dev/null +++ b/openai_agents/reasoning_content/run_worker.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 + +import asyncio + +from temporalio.client import Client +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin +from temporalio.worker import Worker + +from openai_agents.reasoning_content.activities.reasoning_activities import ( + get_reasoning_response, +) +from openai_agents.reasoning_content.workflows.reasoning_content_workflow import ( + ReasoningContentWorkflow, +) + + +async def main(): + client = await Client.connect( + "localhost:7233", + plugins=[ + OpenAIAgentsPlugin(), + ], + ) + + worker = Worker( + client, + task_queue="reasoning-content-task-queue", + workflows=[ReasoningContentWorkflow], + activities=[get_reasoning_response], + ) + + print("Starting reasoning content worker...") + await worker.run() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/openai_agents/reasoning_content/workflows/reasoning_content_workflow.py b/openai_agents/reasoning_content/workflows/reasoning_content_workflow.py new file mode 100644 index 00000000..0a9f0a15 --- /dev/null +++ b/openai_agents/reasoning_content/workflows/reasoning_content_workflow.py @@ -0,0 +1,32 @@ +from dataclasses import dataclass + +from temporalio import workflow + +from openai_agents.reasoning_content.activities.reasoning_activities import ( + get_reasoning_response, +) + + +@dataclass +class ReasoningResult: + reasoning_content: str | None + regular_content: str | None + prompt: str + + +@workflow.defn +class ReasoningContentWorkflow: + @workflow.run + async def run(self, prompt: str, model_name: str | None = None) -> ReasoningResult: + # Call the activity to get the reasoning response + reasoning_content, regular_content = await workflow.execute_activity( + get_reasoning_response, + args=[prompt, model_name], + start_to_close_timeout=workflow.timedelta(minutes=5), + ) + + return ReasoningResult( + reasoning_content=reasoning_content, + regular_content=regular_content, + prompt=prompt, + ) From 8a028b3b55903b74819a90365d34e8638770a2cf Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 17:10:16 -0700 Subject: [PATCH 12/15] formatting --- .../financial_research_manager.py | 7 ++----- .../run_financial_research_workflow.py | 5 +++-- openai_agents/financial_research_agent/run_worker.py | 9 +++++---- .../reasoning_content/run_reasoning_content_workflow.py | 4 ++-- 4 files changed, 12 insertions(+), 13 deletions(-) diff --git a/openai_agents/financial_research_agent/financial_research_manager.py b/openai_agents/financial_research_agent/financial_research_manager.py index 8405c75e..9a437a45 100644 --- a/openai_agents/financial_research_agent/financial_research_manager.py +++ b/openai_agents/financial_research_agent/financial_research_manager.py @@ -3,9 +3,8 @@ import asyncio from collections.abc import Sequence -from temporalio import workflow - from agents import RunConfig, Runner, RunResult, custom_span, trace +from temporalio import workflow from openai_agents.financial_research_agent.agents.financials_agent import ( new_financials_agent, @@ -16,9 +15,7 @@ new_planner_agent, ) from openai_agents.financial_research_agent.agents.risk_agent import new_risk_agent -from openai_agents.financial_research_agent.agents.search_agent import ( - new_search_agent, -) +from openai_agents.financial_research_agent.agents.search_agent import new_search_agent from openai_agents.financial_research_agent.agents.verifier_agent import ( VerificationResult, new_verifier_agent, diff --git a/openai_agents/financial_research_agent/run_financial_research_workflow.py b/openai_agents/financial_research_agent/run_financial_research_workflow.py index 7105686b..80adc86b 100644 --- a/openai_agents/financial_research_agent/run_financial_research_workflow.py +++ b/openai_agents/financial_research_agent/run_financial_research_workflow.py @@ -17,8 +17,9 @@ async def main(): query = "Write up an analysis of Apple Inc.'s most recent quarter." print(f"Using default query: {query}") - client = await Client.connect("localhost:7233", - plugins=[ + client = await Client.connect( + "localhost:7233", + plugins=[ OpenAIAgentsPlugin(), ], ) diff --git a/openai_agents/financial_research_agent/run_worker.py b/openai_agents/financial_research_agent/run_worker.py index e2439ccc..507bd77c 100644 --- a/openai_agents/financial_research_agent/run_worker.py +++ b/openai_agents/financial_research_agent/run_worker.py @@ -3,17 +3,18 @@ import asyncio from temporalio.client import Client -from temporalio.worker import Worker - from temporalio.contrib.openai_agents import OpenAIAgentsPlugin +from temporalio.worker import Worker from openai_agents.financial_research_agent.workflows.financial_research_workflow import ( FinancialResearchWorkflow, ) + async def main(): - client = await Client.connect("localhost:7233", - plugins=[ + client = await Client.connect( + "localhost:7233", + plugins=[ OpenAIAgentsPlugin(), ], ) diff --git a/openai_agents/reasoning_content/run_reasoning_content_workflow.py b/openai_agents/reasoning_content/run_reasoning_content_workflow.py index a6a64318..79e5d7ba 100644 --- a/openai_agents/reasoning_content/run_reasoning_content_workflow.py +++ b/openai_agents/reasoning_content/run_reasoning_content_workflow.py @@ -43,9 +43,9 @@ async def main(): ) print(f"\nPrompt: {result.prompt}") - print(f"\nReasoning Content:") + print("\nReasoning Content:") print(result.reasoning_content or "No reasoning content provided") - print(f"\nRegular Content:") + print("\nRegular Content:") print(result.regular_content or "No regular content provided") print("-" * 50 + "\n") From 4dea6acea2f0b20b3d6310abc03d3dd82a9e2f80 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 18:41:49 -0700 Subject: [PATCH 13/15] switch model --- openai_agents/financial_research_agent/agents/writer_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openai_agents/financial_research_agent/agents/writer_agent.py b/openai_agents/financial_research_agent/agents/writer_agent.py index 4f38c1c9..047535e7 100644 --- a/openai_agents/financial_research_agent/agents/writer_agent.py +++ b/openai_agents/financial_research_agent/agents/writer_agent.py @@ -29,6 +29,6 @@ def new_writer_agent() -> Agent: return Agent( name="FinancialWriterAgent", instructions=WRITER_PROMPT, - model="gpt-4.1-mini", + model="gpt-4.1", output_type=FinancialReportData, ) From 66bf1868d55afe9d05233c8113513e5a84092a03 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 18:51:48 -0700 Subject: [PATCH 14/15] pin model snapshot --- openai_agents/financial_research_agent/agents/writer_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openai_agents/financial_research_agent/agents/writer_agent.py b/openai_agents/financial_research_agent/agents/writer_agent.py index 047535e7..9accc202 100644 --- a/openai_agents/financial_research_agent/agents/writer_agent.py +++ b/openai_agents/financial_research_agent/agents/writer_agent.py @@ -29,6 +29,6 @@ def new_writer_agent() -> Agent: return Agent( name="FinancialWriterAgent", instructions=WRITER_PROMPT, - model="gpt-4.1", + model="gpt-4.1-2025-04-14", output_type=FinancialReportData, ) From 7094e0fcc8f0fde2220113b14760f02600a0fc2d Mon Sep 17 00:00:00 2001 From: Tim Conley Date: Tue, 29 Jul 2025 15:56:01 -0700 Subject: [PATCH 15/15] Revert uv.lock --- uv.lock | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/uv.lock b/uv.lock index 64fd68ae..8fc1c4b4 100644 --- a/uv.lock +++ b/uv.lock @@ -1406,10 +1406,14 @@ wheels = [ [[package]] name = "nexus-rpc" version = "1.1.0" -source = { git = "https://github.com/nexus-rpc/sdk-python.git?rev=35f574c711193a6e2560d3e6665732a5bb7ae92c#35f574c711193a6e2560d3e6665732a5bb7ae92c" } +source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions" }, ] +sdist = { url = "https://files.pythonhosted.org/packages/ef/66/540687556bd28cf1ec370cc6881456203dfddb9dab047b8979c6865b5984/nexus_rpc-1.1.0.tar.gz", hash = "sha256:d65ad6a2f54f14e53ebe39ee30555eaeb894102437125733fb13034a04a44553", size = 77383, upload-time = "2025-07-07T19:03:58.368Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/2f/9e9d0dcaa4c6ffa22b7aa31069a8a264c753ff8027b36af602cce038c92f/nexus_rpc-1.1.0-py3-none-any.whl", hash = "sha256:d1b007af2aba186a27e736f8eaae39c03aed05b488084ff6c3d1785c9ba2ad38", size = 27743, upload-time = "2025-07-07T19:03:57.556Z" }, +] [[package]] name = "nodeenv"