@@ -33,9 +33,8 @@ def __init__(
3333 model_name : str = "claude-3-7-sonnet-latest" ,
3434 memory : bool = True ,
3535 tools : Optional [list [BaseTool ]] = None ,
36- run_id : Optional [str ] = None ,
37- instance_id : Optional [str ] = None ,
38- difficulty : Optional [int ] = None ,
36+ tags : Optional [list [str ]] = [],
37+ metadata : Optional [dict ] = {},
3938 ** kwargs ,
4039 ):
4140 """Initialize a CodeAgent.
@@ -46,6 +45,8 @@ def __init__(
4645 model_name: Name of the model to use
4746 memory: Whether to let LLM keep track of the conversation history
4847 tools: Additional tools to use
48+ tags: Tags to add to the agent trace. Must be of the same type.
49+ metadata: Metadata to use for the agent. Must be a dictionary.
4950 **kwargs: Additional LLM configuration options. Supported options:
5051 - temperature: Temperature parameter (0-1)
5152 - top_p: Top-p sampling parameter (0-1)
@@ -63,14 +64,21 @@ def __init__(
6364 )
6465 self .model_name = model_name
6566 self .langsmith_client = Client ()
66- self .run_id = run_id
67- self .instance_id = instance_id
68- self .difficulty = difficulty
6967
7068 # Get project name from environment variable or use a default
7169 self .project_name = os .environ .get ("LANGCHAIN_PROJECT" , "RELACE" )
7270 print (f"Using LangSmith project: { self .project_name } " )
7371
72+ # Initialize tags for agent trace
73+ self .tags = [* tags , self .model_name ]
74+
75+ # Initialize metadata for agent trace
76+ self .metadata = {
77+ "project" : self .project_name ,
78+ "model" : self .model_name ,
79+ ** metadata ,
80+ }
81+
7482 def run (self , prompt : str , thread_id : Optional [str ] = None ) -> str :
7583 """Run the agent with a prompt.
7684
@@ -95,18 +103,8 @@ def run(self, prompt: str, thread_id: Optional[str] = None) -> str:
95103 # this message has a reducer which appends the current message to the existing history
96104 # see more https://langchain-ai.github.io/langgraph/concepts/low_level/#reducers
97105 input = {"query" : prompt }
98- metadata = {"project" : self .project_name }
99- tags = []
100- # Add SWEBench run ID and instance ID to the metadata and tags for filtering
101- if self .run_id is not None :
102- metadata ["swebench_run_id" ] = self .run_id
103- tags .append (self .run_id )
104-
105- if self .instance_id is not None :
106- metadata ["swebench_instance_id" ] = self .instance_id
107- tags .append (self .instance_id )
108106
109- config = RunnableConfig (configurable = {"thread_id" : thread_id }, tags = tags , metadata = metadata , recursion_limit = 100 )
107+ config = RunnableConfig (configurable = {"thread_id" : thread_id }, tags = self . tags , metadata = self . metadata , recursion_limit = 100 )
110108 # we stream the steps instead of invoke because it allows us to access intermediate nodes
111109 stream = self .agent .stream (input , config = config , stream_mode = "values" )
112110
0 commit comments