diff --git a/openai_agents/model_providers/README.md b/openai_agents/model_providers/README.md new file mode 100644 index 00000000..097dc525 --- /dev/null +++ b/openai_agents/model_providers/README.md @@ -0,0 +1,38 @@ +# Model Providers Examples + +Custom LLM provider integration examples for OpenAI Agents SDK with Temporal workflows. + +*Adapted from [OpenAI Agents SDK model providers examples](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers)* + +Before running these examples, be sure to review the [prerequisites and background on the integration](../README.md). + +## Running the Examples + +### Currently Implemented + +#### LiteLLM Auto +Uses built-in LiteLLM support to connect to various model providers. + +Start the LiteLLM provider worker: +```bash +# Set the required environment variable for your chosen provider +export ANTHROPIC_API_KEY="your_anthropic_api_key" # For Anthropic + +uv run openai_agents/model_providers/run_worker_litellm_provider.py +``` + +Then run the example in a separate terminal: +```bash +uv run openai_agents/model_providers/run_litellm_auto_workflow.py +``` + +The example uses Anthropic Claude by default but can be modified to use other LiteLLM-supported providers. + +Find more LiteLLM providers at: https://docs.litellm.ai/docs/providers + +## Not Yet Implemented + +- **Custom Example Agent** - Custom OpenAI client integration +- **Custom Example Global** - Global default client configuration +- **Custom Example Provider** - Custom ModelProvider pattern +- **LiteLLM Provider** - Interactive model/API key input \ No newline at end of file diff --git a/openai_agents/model_providers/run_litellm_auto_workflow.py b/openai_agents/model_providers/run_litellm_auto_workflow.py new file mode 100644 index 00000000..0e4e05f1 --- /dev/null +++ b/openai_agents/model_providers/run_litellm_auto_workflow.py @@ -0,0 +1,29 @@ +import asyncio + +from temporalio.client import Client +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin + +from openai_agents.model_providers.workflows.litellm_auto_workflow import ( + LitellmAutoWorkflow, +) + + +async def main(): + client = await Client.connect( + "localhost:7233", + plugins=[ + OpenAIAgentsPlugin(), + ], + ) + + result = await client.execute_workflow( + LitellmAutoWorkflow.run, + "What's the weather in Tokyo?", + id="litellm-auto-workflow-id", + task_queue="openai-agents-model-providers-task-queue", + ) + print(f"Result: {result}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/openai_agents/model_providers/run_worker_litellm_provider.py b/openai_agents/model_providers/run_worker_litellm_provider.py new file mode 100644 index 00000000..5eb8b8b2 --- /dev/null +++ b/openai_agents/model_providers/run_worker_litellm_provider.py @@ -0,0 +1,39 @@ +import asyncio +from datetime import timedelta + +from agents.extensions.models.litellm_provider import LitellmProvider +from temporalio.client import Client +from temporalio.contrib.openai_agents import ModelActivityParameters, OpenAIAgentsPlugin +from temporalio.worker import Worker + +from openai_agents.model_providers.workflows.litellm_auto_workflow import ( + LitellmAutoWorkflow, +) + + +async def main(): + # Create client connected to server at the given address + client = await Client.connect( + "localhost:7233", + plugins=[ + OpenAIAgentsPlugin( + model_params=ModelActivityParameters( + start_to_close_timeout=timedelta(seconds=30) + ), + model_provider=LitellmProvider(), + ), + ], + ) + + worker = Worker( + client, + task_queue="openai-agents-model-providers-task-queue", + workflows=[ + LitellmAutoWorkflow, + ], + ) + await worker.run() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/openai_agents/model_providers/workflows/litellm_auto_workflow.py b/openai_agents/model_providers/workflows/litellm_auto_workflow.py new file mode 100644 index 00000000..4a67ded4 --- /dev/null +++ b/openai_agents/model_providers/workflows/litellm_auto_workflow.py @@ -0,0 +1,25 @@ +from __future__ import annotations + +from agents import Agent, Runner, function_tool, set_tracing_disabled +from temporalio import workflow + + +@workflow.defn +class LitellmAutoWorkflow: + @workflow.run + async def run(self, prompt: str) -> str: + set_tracing_disabled(disabled=True) + + @function_tool + def get_weather(city: str): + return f"The weather in {city} is sunny." + + agent = Agent( + name="Assistant", + instructions="You only respond in haikus.", + model="anthropic/claude-3-5-sonnet-20240620", + tools=[get_weather], + ) + + result = await Runner.run(agent, prompt) + return result.final_output