diff --git a/02-samples/14-agentic-ai-at-the-edge/main.py b/02-samples/14-agentic-ai-at-the-edge/main.py index 7d7dd811..8e5f4655 100644 --- a/02-samples/14-agentic-ai-at-the-edge/main.py +++ b/02-samples/14-agentic-ai-at-the-edge/main.py @@ -33,6 +33,8 @@ # Disable FAISS GPU warnings globally - we only use CPU os.environ["CUDA_VISIBLE_DEVICES"] = "-1" +from opentelemetry import baggage, context + from strands import Agent from strands.models import BedrockModel from strands.models.llamacpp import LlamaCppModel @@ -644,6 +646,13 @@ def display_welcome_message(): print() +def set_session_context(session_id=None): + """Set the session ID in OpenTelemetry baggage for trace correlation""" + ctx = baggage.set_baggage("session.id", session_id) + token = context.attach(ctx) + return token + + def main(): """Main entry point""" global USE_API_CLIENT @@ -677,6 +686,8 @@ def main(): display_welcome_message() + set_session_context(SESSION_ID) + if USE_RICH_UI and console: console.print("🚀 [bold green]Assistant is ready![/bold green]") console.print() diff --git a/02-samples/14-agentic-ai-at-the-edge/pyproject.toml b/02-samples/14-agentic-ai-at-the-edge/pyproject.toml index 0a5e264a..65d73df8 100644 --- a/02-samples/14-agentic-ai-at-the-edge/pyproject.toml +++ b/02-samples/14-agentic-ai-at-the-edge/pyproject.toml @@ -26,7 +26,7 @@ classifiers = [ dependencies = [ # Core Framework - "strands-agents @ git+https://github.com/westonbrown/sdk-python.git@main", + "strands-agents[otel] @ git+https://github.com/westonbrown/sdk-python.git@main", # Configuration & Environment "python-dotenv>=1.0.0,<2.0.0", @@ -52,6 +52,9 @@ dependencies = [ "markdownify>=0.11.0,<1.0.0", "readabilipy>=0.2.0,<1.0.0", + # Observation + "aws-opentelemetry-distro~=0.10.1", + # System & Utilities "psutil>=5.9.0,<8.0.0", "py-cpuinfo>=9.0.0,<10.0.0", diff --git a/02-samples/14-agentic-ai-at-the-edge/src/agents/tools/model_selector.py b/02-samples/14-agentic-ai-at-the-edge/src/agents/tools/model_selector.py index fac570ff..64482a27 100644 --- a/02-samples/14-agentic-ai-at-the-edge/src/agents/tools/model_selector.py +++ b/02-samples/14-agentic-ai-at-the-edge/src/agents/tools/model_selector.py @@ -10,7 +10,7 @@ from strands.models.llamacpp import LlamaCppModel import logging import json -from config import BEDROCK_MODEL_ID, LLAMACPP_URL +from config import BEDROCK_MODEL_ID, LLAMACPP_URL, SESSION_ID logger = logging.getLogger(__name__) @@ -93,7 +93,10 @@ def select_model(query: str, context: Optional[str] = None) -> Dict[str, Any]: # Create analysis agent analysis_agent = Agent( - model=local_model, system_prompt=analysis_prompt, callback_handler=None + model=local_model, + system_prompt=analysis_prompt, + callback_handler=None, + trace_attributes={"session.id", SESSION_ID}, ) # Prepare analysis query