Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 31 additions & 2 deletions openai_agents/model_providers/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ Start the LiteLLM provider worker:
# Set the required environment variable for your chosen provider
export ANTHROPIC_API_KEY="your_anthropic_api_key" # For Anthropic

uv run openai_agents/model_providers/run_worker_litellm_provider.py
uv run openai_agents/model_providers/run_litellm_provider_worker.py
```

Then run the example in a separate terminal:
Expand All @@ -30,7 +30,36 @@ The example uses Anthropic Claude by default but can be modified to use other Li

Find more LiteLLM providers at: https://docs.litellm.ai/docs/providers

## Not Yet Implemented
### Extra

#### GPT-OSS with Ollama

This example demonstrates tool calling using the gpt-oss reasoning model with a local Ollama server.
Running this example requires sufficiently powerful hardware (and involves a 14 GB model download.
It is adapted from the [OpenAI Cookbook example](https://cookbook.openai.com/articles/gpt-oss/run-locally-ollama#agents-sdk-integration).


Make sure you have [Ollama](https://ollama.com/) installed:
```bash
ollama serve
```

Download the `gpt-oss` model:
```bash
ollama pull gpt-oss:20b
```

Start the gpt-oss worker:
```bash
uv run openai_agents/model_providers/run_gpt_oss_worker.py
```

Then run the example in a separate terminal:
```bash
uv run openai_agents/model_providers/run_gpt_oss_workflow.py
```

### Not Yet Implemented

- **Custom Example Agent** - Custom OpenAI client integration
- **Custom Example Global** - Global default client configuration
Expand Down
58 changes: 58 additions & 0 deletions openai_agents/model_providers/run_gpt_oss_worker.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import asyncio
import logging
from datetime import timedelta
from typing import Optional

from agents import Model, ModelProvider, OpenAIChatCompletionsModel
from openai import AsyncOpenAI
from temporalio.client import Client
from temporalio.contrib.openai_agents import ModelActivityParameters, OpenAIAgentsPlugin
from temporalio.worker import Worker

from openai_agents.model_providers.workflows.gpt_oss_workflow import GptOssWorkflow

ollama_client = AsyncOpenAI(
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Client needs to be created outside of CustomModelProvider as otherwise we have sandboxing issues:

future: <Task finished name='Task-47 (workflow: GptOssWorkflow, id: litellm-gpt-oss-workflow-id, run: 019881a0-034a-7a48-a1e0-9412fa0b9bbb)' coro=<AsyncClient.aclose() done, defined at /Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/httpx/_client.py:1978> exception=RestrictedWorkflowAccessError('Cannot access threading.local.__mro_entries__ from inside a workflow. If this is code from a module not used in a workflow or known to only be used deterministically from a workflow, mark the import as pass through.')>
Traceback (most recent call last):
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/httpx/_client.py", line 1985, in aclose
    await self._transport.aclose()
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/httpx/_transports/default.py", line 406, in aclose
    await self._pool.aclose()
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/httpcore/_async/connection_pool.py", line 353, in aclose
    await self._close_connections(closing_connections)
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/httpcore/_async/connection_pool.py", line 343, in _close_connections
    with AsyncShieldCancellation():
         ^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/httpcore/_synchronization.py", line 203, in __init__
    self._backend = current_async_library()
                    ^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/httpcore/_synchronization.py", line 26, in current_async_library
    import sniffio
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/temporalio/worker/workflow_sandbox/_importer.py", line 497, in __call__
    return self.current(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/temporalio/worker/workflow_sandbox/_importer.py", line 248, in _import
    mod = importlib.__import__(name, globals, locals, fromlist, level)
          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "<frozen importlib._bootstrap>", line 1466, in __import__
  File "<frozen importlib._bootstrap>", line 1387, in _gcd_import
  File "<frozen importlib._bootstrap>", line 1360, in _find_and_load
  File "<frozen importlib._bootstrap>", line 1331, in _find_and_load_unlocked
  File "<frozen importlib._bootstrap>", line 935, in _load_unlocked
  File "<frozen importlib._bootstrap_external>", line 999, in exec_module
  File "<frozen importlib._bootstrap>", line 488, in _call_with_frames_removed
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/sniffio/__init__.py", line 12, in <module>
    from ._impl import (
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/temporalio/worker/workflow_sandbox/_importer.py", line 497, in __call__
    return self.current(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/temporalio/worker/workflow_sandbox/_importer.py", line 248, in _import
    mod = importlib.__import__(name, globals, locals, fromlist, level)
          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "<frozen importlib._bootstrap>", line 1470, in __import__
  File "<frozen importlib._bootstrap>", line 1387, in _gcd_import
  File "<frozen importlib._bootstrap>", line 1360, in _find_and_load
  File "<frozen importlib._bootstrap>", line 1331, in _find_and_load_unlocked
  File "<frozen importlib._bootstrap>", line 935, in _load_unlocked
  File "<frozen importlib._bootstrap_external>", line 999, in exec_module
  File "<frozen importlib._bootstrap>", line 488, in _call_with_frames_removed
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/sniffio/_impl.py", line 11, in <module>
    class _ThreadLocal(threading.local):
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/temporalio/worker/workflow_sandbox/_restrictions.py", line 1016, in __getattribute__
    state.assert_child_not_restricted(__name)
  File "/Users/jssmith/t/samples-python-2/.venv/lib/python3.12/site-packages/temporalio/worker/workflow_sandbox/_restrictions.py", line 852, in assert_child_not_restricted
    raise RestrictedWorkflowAccessError(
temporalio.worker.workflow_sandbox._restrictions.RestrictedWorkflowAccessError: Cannot access threading.local.__mro_entries__ from inside a workflow. If this is code from a module not used in a workflow or known to only be used deterministically from a workflow, mark the import as pass through.

base_url="http://localhost:11434/v1", # Local Ollama API endpoint
api_key="ollama", # Ignored by Ollama
)


class CustomModelProvider(ModelProvider):
def get_model(self, model_name: Optional[str]) -> Model:
model = OpenAIChatCompletionsModel(
model=model_name if model_name else "gpt-oss:20b",
openai_client=ollama_client,
)
return model


async def main():
# Configure logging to show workflow debug messages
logging.basicConfig(level=logging.WARNING)
logging.getLogger("temporalio.workflow").setLevel(logging.DEBUG)

# Create client connected to server at the given address
client = await Client.connect(
"localhost:7233",
plugins=[
OpenAIAgentsPlugin(
model_params=ModelActivityParameters(
start_to_close_timeout=timedelta(seconds=30)
),
model_provider=CustomModelProvider(),
),
],
)

worker = Worker(
client,
task_queue="openai-agents-model-providers-task-queue",
workflows=[
GptOssWorkflow,
],
)
await worker.run()


if __name__ == "__main__":
asyncio.run(main())
27 changes: 27 additions & 0 deletions openai_agents/model_providers/run_gpt_oss_workflow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import asyncio

from temporalio.client import Client
from temporalio.contrib.openai_agents import OpenAIAgentsPlugin

from openai_agents.model_providers.workflows.gpt_oss_workflow import GptOssWorkflow


async def main():
client = await Client.connect(
"localhost:7233",
plugins=[
OpenAIAgentsPlugin(),
],
)

result = await client.execute_workflow(
GptOssWorkflow.run,
"What's the weather in Tokyo?",
id="litellm-gpt-oss-workflow-id",
task_queue="openai-agents-model-providers-task-queue",
)
print(f"Result: {result}")


if __name__ == "__main__":
asyncio.run(main())
26 changes: 26 additions & 0 deletions openai_agents/model_providers/workflows/gpt_oss_workflow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from __future__ import annotations

from agents import Agent, Runner, function_tool, set_tracing_disabled
from temporalio import workflow


@workflow.defn
class GptOssWorkflow:
@workflow.run
async def run(self, prompt: str) -> str:
set_tracing_disabled(disabled=True)

@function_tool
def get_weather(city: str):
workflow.logger.debug(f"Getting weather for {city}")
return f"The weather in {city} is sunny."

agent = Agent(
name="Assistant",
instructions="You only respond in haikus. When asked about the weather always use the tool to get the current weather..",
model="gpt-oss:20b",
tools=[get_weather],
)

result = await Runner.run(agent, prompt)
return result.final_output
Loading