-
Notifications
You must be signed in to change notification settings - Fork 0
Harrison/fix logging api #1
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
2bef195
7ec2107
c2580cf
ad85f3b
bf8bed4
ec842b7
64ea17b
ec65ca0
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -13,6 +13,7 @@ | |
| ) | ||
| from langchain.docstore import InMemoryDocstore, Wikipedia | ||
| from langchain.llms import Cohere, HuggingFaceHub, OpenAI | ||
| from langchain.logger import BaseLogger | ||
| from langchain.prompts import ( | ||
| BasePromptTemplate, | ||
| FewShotPromptTemplate, | ||
|
|
@@ -23,6 +24,8 @@ | |
| from langchain.sql_database import SQLDatabase | ||
| from langchain.vectorstores import FAISS, ElasticVectorSearch | ||
|
|
||
| logger = BaseLogger() | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The code instantiates a BaseLogger which is likely meant to be an abstract base class. This could lead to missing implementations of required logging methods. Should use a concrete logger implementation instead. |
||
|
|
||
| __all__ = [ | ||
| "LLMChain", | ||
| "LLMMathChain", | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2,24 +2,18 @@ | |
| from __future__ import annotations | ||
|
|
||
| from abc import ABC, abstractmethod | ||
| from typing import Any, ClassVar, Dict, List, NamedTuple, Optional, Tuple | ||
| from typing import Any, ClassVar, Dict, List, Optional, Tuple | ||
|
|
||
| from pydantic import BaseModel | ||
|
|
||
| from langchain.agents.input import ChainedInput | ||
| from langchain.agents.tools import Tool | ||
| from langchain.chains.base import Chain | ||
| from langchain.chains.llm import LLMChain | ||
| from langchain.input import ChainedInput, get_color_mapping | ||
| from langchain.input import get_color_mapping | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Incorrect import path. The 'input' module seems incorrect - should likely be importing from a different module like langchain.utils or similar since input handling related utilities are typically not in a top-level 'input' module |
||
| from langchain.llms.base import LLM | ||
| from langchain.prompts.base import BasePromptTemplate | ||
|
|
||
|
|
||
| class Action(NamedTuple): | ||
| """Action to take.""" | ||
|
|
||
| tool: str | ||
| tool_input: str | ||
| log: str | ||
| from langchain.schema import AgentAction | ||
|
|
||
|
|
||
| class Agent(Chain, BaseModel, ABC): | ||
|
|
@@ -99,7 +93,7 @@ def from_llm_and_tools(cls, llm: LLM, tools: List[Tool], **kwargs: Any) -> Agent | |
| llm_chain = LLMChain(llm=llm, prompt=cls.create_prompt(tools)) | ||
| return cls(llm_chain=llm_chain, tools=tools, **kwargs) | ||
|
|
||
| def get_action(self, text: str) -> Action: | ||
| def get_action(self, text: str) -> AgentAction: | ||
| """Given input, decided what to do. | ||
|
|
||
| Args: | ||
|
|
@@ -119,7 +113,7 @@ def get_action(self, text: str) -> Action: | |
| full_output += output | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Potential infinite loop risk. If _extract_tool_and_input keeps returning None, this will keep appending output indefinitely. Should add a maximum retry limit or other exit condition |
||
| parsed_output = self._extract_tool_and_input(full_output) | ||
| tool, tool_input = parsed_output | ||
| return Action(tool, tool_input, full_output) | ||
| return AgentAction(tool, tool_input, full_output) | ||
|
|
||
| def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: | ||
| """Run text through and get agent response.""" | ||
|
|
@@ -145,7 +139,7 @@ def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: | |
| # Call the LLM to see what to do. | ||
| output = self.get_action(chained_input.input) | ||
| # Add the log to the Chained Input. | ||
| chained_input.add(output.log, color="green") | ||
| chained_input.add_action(output, color="green") | ||
| # If the tool chosen is the finishing tool, then we end and return. | ||
| if output.tool == self.finish_tool_name: | ||
| return {self.output_key: output.tool_input} | ||
|
|
@@ -154,8 +148,9 @@ def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: | |
| # We then call the tool on the tool input to get an observation | ||
| observation = chain(output.tool_input) | ||
| # We then log the observation | ||
| chained_input.add(f"\n{self.observation_prefix}") | ||
| chained_input.add(observation, color=color_mapping[output.tool]) | ||
| # We then add the LLM prefix into the prompt to get the LLM to start | ||
| # thinking, and start the loop all over. | ||
| chained_input.add(f"\n{self.llm_prefix}") | ||
| chained_input.add_observation( | ||
| observation, | ||
| self.observation_prefix, | ||
| self.llm_prefix, | ||
| color=color_mapping[output.tool], | ||
| ) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,44 @@ | ||
| """Input manager for agents.""" | ||
| from typing import Optional | ||
|
|
||
| import langchain | ||
| from langchain.schema import AgentAction | ||
|
|
||
|
|
||
| class ChainedInput: | ||
| """Class for working with input that is the result of chains.""" | ||
|
|
||
| def __init__(self, text: str, verbose: bool = False): | ||
| """Initialize with verbose flag and initial text.""" | ||
| self._verbose = verbose | ||
| if self._verbose: | ||
| langchain.logger.log_agent_start(text) | ||
| self._input = text | ||
|
|
||
| def add_action(self, action: AgentAction, color: Optional[str] = None) -> None: | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The color parameter is unused in the method implementation. Either remove the unused parameter or implement the color functionality in the method |
||
| """Add text to input, print if in verbose mode.""" | ||
| if self._verbose: | ||
| langchain.logger.log_agent_action(action, color=color) | ||
| self._input += action.log | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Attempting to add strings without first ensuring a newline or proper spacing. This could cause text to run together. Should include proper separators like: self._input += f'\n{action.log}' |
||
|
|
||
| def add_observation( | ||
| self, | ||
| observation: str, | ||
| observation_prefix: str, | ||
| llm_prefix: str, | ||
| color: Optional[str], | ||
| ) -> None: | ||
| """Add observation to input, print if in verbose mode.""" | ||
| if self._verbose: | ||
| langchain.logger.log_agent_observation( | ||
| observation, | ||
| color=color, | ||
| observation_prefix=observation_prefix, | ||
| llm_prefix=llm_prefix, | ||
| ) | ||
| self._input += f"\n{observation_prefix}{observation}\n{llm_prefix}" | ||
|
|
||
| @property | ||
| def input(self) -> str: | ||
| """Return the accumulated input.""" | ||
| return self._input | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -80,7 +80,7 @@ def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: | |
| print_text(api_url, color="green", end="\n") | ||
| api_response = self.requests_wrapper.run(api_url) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. API response text is processed without checking the response status code. Should verify response.status_code is in the 200-299 range before processing the text |
||
| if self.verbose: | ||
| print_text(api_url, color="yellow", end="\n") | ||
| print_text(api_response, color="yellow", end="\n") | ||
| answer = self.api_answer_chain.predict( | ||
| question=question, | ||
| api_docs=self.api_docs, | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -3,8 +3,8 @@ | |
|
|
||
| from pydantic import BaseModel, Extra | ||
|
|
||
| import langchain | ||
| from langchain.chains.base import Chain | ||
| from langchain.input import print_text | ||
| from langchain.llms.base import LLM | ||
| from langchain.prompts.base import BasePromptTemplate | ||
|
|
||
|
|
@@ -55,12 +55,13 @@ def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]: | |
| selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} | ||
| prompt = self.prompt.format(**selected_inputs) | ||
| if self.verbose: | ||
| print("Prompt after formatting:") | ||
| print_text(prompt, color="green", end="\n") | ||
| langchain.logger.log_llm_inputs(selected_inputs, prompt) | ||
| kwargs = {} | ||
| if "stop" in inputs: | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The code checks for 'stop' in the main inputs dictionary, but this parameter should be handled separately from prompt inputs. This could lead to 'stop' being passed to the prompt template if it's included in input_variables. Should separate LLM-specific parameters from prompt parameters. |
||
| kwargs["stop"] = inputs["stop"] | ||
| response = self.llm(prompt, **kwargs) | ||
| if self.verbose: | ||
| langchain.logger.log_llm_response(response) | ||
| return {self.output_key: response} | ||
|
|
||
| def predict(self, **kwargs: Any) -> str: | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -27,25 +27,3 @@ def print_text(text: str, color: Optional[str] = None, end: str = "") -> None: | |
| else: | ||
| color_str = _TEXT_COLOR_MAPPING[color] | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Direct dictionary access without checking if the color exists in _TEXT_COLOR_MAPPING. This could raise a KeyError if an invalid color is provided. Should add a check or use .get() method with a default value |
||
| print(f"\u001b[{color_str}m\033[1;3m{text}\u001b[0m", end=end) | ||
|
|
||
|
|
||
| class ChainedInput: | ||
| """Class for working with input that is the result of chains.""" | ||
|
|
||
| def __init__(self, text: str, verbose: bool = False): | ||
| """Initialize with verbose flag and initial text.""" | ||
| self._verbose = verbose | ||
| if self._verbose: | ||
| print_text(text, color=None) | ||
| self._input = text | ||
|
|
||
| def add(self, text: str, color: Optional[str] = None) -> None: | ||
| """Add text to input, print if in verbose mode.""" | ||
| if self._verbose: | ||
| print_text(text, color=color) | ||
| self._input += text | ||
|
|
||
| @property | ||
| def input(self) -> str: | ||
| """Return the accumulated input.""" | ||
| return self._input | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,56 @@ | ||
| from typing import Any, Optional | ||
|
|
||
| from langchain.input import print_text | ||
| from langchain.schema import AgentAction | ||
|
|
||
|
|
||
| class BaseLogger: | ||
| def log_agent_start(self, text: str, **kwargs: Any): | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Missing return type annotation -> should be -> None for consistency with base class |
||
| pass | ||
|
|
||
| def log_agent_end(self, text: str, **kwargs: Any): | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Missing return type annotation -> should be -> None for consistency with base class |
||
| pass | ||
|
|
||
| def log_agent_action(self, action: AgentAction, **kwargs: Any): | ||
| pass | ||
|
|
||
| def log_agent_observation(self, observation: str, **kwargs: Any): | ||
| pass | ||
|
|
||
| def log_llm_inputs(self, inputs: dict, prompt: str, **kwargs): | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Missing return type annotation -> should be -> None for consistency with other methods in the class and to follow proper typing practices |
||
| pass | ||
|
|
||
| def log_llm_response(self, output: str, **kwargs): | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Missing return type annotation -> should be -> None for consistency with other methods |
||
| pass | ||
|
|
||
|
|
||
| class StOutLogger(BaseLogger): | ||
| def log_agent_start(self, text: str, **kwargs: Any): | ||
| print_text(text) | ||
|
|
||
| def log_agent_end(self, text: str, **kwargs: Any): | ||
| pass | ||
|
|
||
| def log_agent_action( | ||
| self, action: AgentAction, color: Optional[str] = None, **kwargs: Any | ||
| ): | ||
| print_text(action.log, color=color) | ||
|
|
||
| def log_llm_inputs(self, inputs: dict, prompt: str, **kwargs): | ||
| print("Prompt after formatting:") | ||
| print_text(prompt, color="green", end="\n") | ||
|
|
||
| def log_llm_response(self, output: str, **kwargs): | ||
| pass | ||
|
|
||
| def log_agent_observation( | ||
| self, | ||
| observation: str, | ||
| color: Optional[str] = None, | ||
| observation_prefix: Optional[str] = None, | ||
| llm_prefix: Optional[str] = None, | ||
| **kwargs: Any, | ||
| ): | ||
| print_text(f"\n{observation_prefix}") | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. observation_prefix could be None, which would print 'None'. Should check if observation_prefix exists before printing |
||
| print_text(observation, color=color) | ||
| print_text(f"\n{llm_prefix}") | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. llm_prefix could be None, which would print 'None'. Should check if llm_prefix exists before printing |
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,11 @@ | ||
| from __future__ import annotations | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This import is redundant in Python 3.7+ since string annotations are the default. While not strictly a bug, it's unnecessary code that could be removed for cleaner code. Additionally, in Python 3.9+, the typing.NamedTuple already handles forward references automatically. |
||
|
|
||
| from typing import NamedTuple | ||
|
|
||
|
|
||
| class AgentAction(NamedTuple): | ||
| """Agent's action to take.""" | ||
|
|
||
| tool: str | ||
| tool_input: str | ||
| log: str | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,6 +1,6 @@ | ||
| [tool.poetry] | ||
| name = "langchain" | ||
| version = "0.0.28" | ||
| version = "0.0.29" | ||
| description = "Building applications with LLMs through composability" | ||
| authors = [] | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Empty authors list. While technically valid, it's recommended to include at least one author for proper package metadata. |
||
| license = "MIT" | ||
|
|
@@ -21,7 +21,6 @@ manifest-ml = {version = "^0.0.1", optional = true} | |
| spacy = {version = "^3", optional = true} | ||
| nltk = {version = "^3", optional = true} | ||
| transformers = {version = "^4", optional = true} | ||
| types-toml = "^0.10.8.1" | ||
|
|
||
| [tool.poetry.group.test.dependencies] | ||
| pytest = "^7.2.0" | ||
|
|
@@ -37,6 +36,7 @@ flake8 = "^6.0.0" | |
| mypy = "^0.991" | ||
| types-pyyaml = "^6.0.12.2" | ||
| types-requests = "^2.28.11.5" | ||
| types-toml = "^0.10.8.1" | ||
|
|
||
| [tool.poetry.group.dev] | ||
| optional = true | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Importing Wikipedia directly from langchain.docstore is deprecated. It should be imported from langchain.docstore.wikipedia to follow the package's structure