Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions hatchling/config/languages/en.toml
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,19 @@ quit_name = "quit"
quit_description = "End the chat session (alias for exit)"
clear_name = "clear"
clear_description = "Clear the chat history"
show_logs_description = "Display session logs"
set_log_level_description = "Change log level"
version_description = "Display the current version of Hatchling"

# History commands
[commands.history]
delete_description = "Delete the last N messages from the chat history"
keep_description = "Keep only the last N messages in the chat history"
show_description = "Display the formatted chat history"
export_description = "Export the formatted chat history to a file"
save_description = "Save the current chat history to a file"
load_description = "Load chat history from a file"
clear_description = "Clear the entire chat history"

# Additional base commands
show_logs_name = "show_logs"
Expand Down Expand Up @@ -393,6 +406,11 @@ force_removal_description = "Force removal without confirmation"
cmd_description = "Command to run in the shell (optional)"
tag_description = "Git tag/branch reference for wrapper installation (e.g., 'dev', 'v0.1.0')"
all_settings_description = "Include read-only settings in the export"
history_delete_count_description = "Number of messages to delete from the end of the history (default: 1)"
history_keep_count_description = "Number of most recent messages to keep"
history_show_count_description = "Number of latest messages to display (optional)"
history_export_file_path_description = "Path to the file to export the history to"
history_file_path_description = "Path to the history file"

# Error messages
[errors]
Expand Down
147 changes: 143 additions & 4 deletions hatchling/core/chat/message_history.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,14 @@
"""

from typing import List, Dict, Any, Optional
import json
from hatchling.core.logging.logging_manager import logging_manager
from hatchling.core.llm.providers.registry import ProviderRegistry
from hatchling.core.llm.event_system import EventSubscriber, Event, EventType
from hatchling.config.llm_settings import ELLMProvider

from hatchling.core.llm.data_structures import ToolCallParsedResult, ToolCallExecutionResult
from hatchling.config.settings import AppSettings

class MessageHistory(EventSubscriber):
"""Event-driven message history manager with canonical and provider-specific histories.
Expand All @@ -19,8 +21,9 @@ class MessageHistory(EventSubscriber):
provider-specific histories based on the current LLM provider.
"""

def __init__(self):
def __init__(self, settings: AppSettings = None):
"""Initialize an empty message history with dual-history support."""
self.settings = settings or AppSettings.get_instance()
# Canonical history storing all events in normalized format
self.canonical_history: List[Dict[str, Any]] = []

Expand Down Expand Up @@ -196,6 +199,12 @@ def _regenerate_provider_history(self) -> None:
"""Regenerate provider-specific history from canonical history."""
self.provider_history = []

# Determine the provider to use for formatting
provider_to_use = self._current_provider
if provider_to_use is None:
provider_to_use = self.settings.llm.provider_enum
self.logger.debug(f"_current_provider is None, using default provider from settings: {provider_to_use}")

for entry in self.canonical_history:
entry_type = entry["type"]

Expand All @@ -207,13 +216,13 @@ def _regenerate_provider_history(self) -> None:
tool_call = entry["data"]
provider_entry = {
"role": "assistant",
"tool_calls": [ProviderRegistry.get_provider(self._current_provider).hatchling_to_llm_tool_call(tool_call)]
"tool_calls": [ProviderRegistry.get_provider(provider_to_use).hatchling_to_llm_tool_call(tool_call)]
}
elif entry_type == "tool_result":
tool_result = entry["data"]
provider_entry = {
"role": "tool",
**ProviderRegistry.get_provider(self._current_provider).hatchling_to_provider_tool_result(tool_result)
**ProviderRegistry.get_provider(provider_to_use).hatchling_to_provider_tool_result(tool_result)
}
else:
continue # Skip unknown entry types
Expand Down Expand Up @@ -315,10 +324,140 @@ def clear(self) -> None:

self.logger.info("MessageHistory - Cleared!")

def delete_last_n_messages(self, n: int) -> None:
"""Delete the last 'n' messages from the history.

Args:
n (int): The number of messages to delete from the end of the history.
"""
if n <= 0:
self.logger.warning(f"Attempted to delete {n} messages. 'n' must be a positive integer.")
return

if len(self.canonical_history) < n:
self.logger.warning(f"Attempted to delete {n} messages, but only {len(self.canonical_history)} exist. Clearing history.")
self.canonical_history = []
else:
self.canonical_history = self.canonical_history[:-n]

self._regenerate_provider_history()
self.logger.info(f"Deleted last {n} messages. Current history length: {len(self.canonical_history)}")

def delete_last_message(self) -> None:
"""Delete the last message from the history."""
self.delete_last_n_messages(1)
self.logger.info("Deleted last message.")

def keep_last_n_messages(self, n: int) -> None:
"""Keep only the last 'n' messages in the history, deleting older ones.

Args:
n (int): The number of most recent messages to keep.
"""
if n <= 0:
self.logger.warning(f"Attempted to keep {n} messages. 'n' must be a positive integer. Clearing history.")
self.canonical_history = []
elif len(self.canonical_history) > n:
self.canonical_history = self.canonical_history[-n:]

self._regenerate_provider_history()
self.logger.info(f"Kept last {n} messages. Current history length: {len(self.canonical_history)}")

def __len__(self) -> int:
"""Get the number of entries in canonical history.

Returns:
int: The number of entries in the canonical history.
"""
return len(self.canonical_history)
return len(self.canonical_history)

def get_formatted_history(self, n: Optional[int] = None) -> str:
"""Get a formatted string representation of the canonical history.

Args:
n (Optional[int]): If provided, return only the last 'n' messages.

Returns:
str: A multi-line string with formatted history entries.
"""
history_to_format = self.canonical_history
if n is not None and n > 0:
history_to_format = self.canonical_history[-n:]

formatted_output = []
for i, entry in enumerate(history_to_format):
entry_type = entry["type"]
data = entry["data"]

# Use the enumerate index for display
display_index = i + 1

if entry_type == "user":
formatted_output.append(f"[{display_index}] User: {data.get("content", "")}")
elif entry_type == "assistant":
formatted_output.append(f"[{display_index}] Assistant: {data.get("content", "")}")
elif entry_type == "tool_call":
tool_call = data
formatted_output.append(f"[{display_index}] Tool Call: {tool_call.function_name}({tool_call.arguments})")
elif entry_type == "tool_result":
tool_result = data
formatted_output.append(f"[{display_index}] Tool Result ({tool_result.function_name}): {tool_result.result or tool_result.error}")
else:
formatted_output.append(f"[{display_index}] Unknown Entry Type: {entry_type} - {data}")

if not formatted_output:
return "(History is empty)"

return "\n".join(formatted_output)

def save_history_to_file(self, file_path: str) -> None:
"""Save the canonical history to a specified file in JSON format.

Args:
file_path (str): The absolute path to the file where the history will be saved.
"""
try:
serializable_history = []
for entry in self.canonical_history:
serializable_entry = entry.copy()
if "data" in serializable_entry and hasattr(serializable_entry["data"], "to_dict"):
serializable_entry["data"] = serializable_entry["data"].to_dict()
serializable_history.append(serializable_entry)

with open(file_path, 'w', encoding='utf-8') as f:
json.dump(serializable_history, f, ensure_ascii=False, indent=4)
self.logger.info(f"History saved to {file_path}")
except Exception as e:
self.logger.error(f"Failed to save history to {file_path}: {e}")

def load_history_from_file(self, file_path: str) -> None:
"""Load canonical history from a specified JSON file.

Args:
file_path (str): The absolute path to the file from which the history will be loaded.
"""
try:
with open(file_path, 'r', encoding='utf-8') as f:
loaded_history = json.load(f)

deserialized_history = []
for entry in loaded_history:
if entry["type"] == "tool_call":
entry["data"] = ToolCallParsedResult(**entry["data"])
elif entry["type"] == "tool_result":
entry["data"] = ToolCallExecutionResult(**entry["data"])
deserialized_history.append(entry)

self.canonical_history = deserialized_history
# After loading, ensure the current provider is set for history regeneration
# This prevents issues where _current_provider might be None after loading
# and _regenerate_provider_history tries to use it.
self._current_provider = self.settings.llm.provider_enum
self._regenerate_provider_history()
self.logger.info(f"History loaded from {file_path}")
except FileNotFoundError:
self.logger.error(f"History file not found: {file_path}")
except json.JSONDecodeError as e:
self.logger.error(f"Failed to decode JSON from {file_path}: {e}")
except Exception as e:
self.logger.error(f"Failed to load history from {file_path}: {e}")
6 changes: 4 additions & 2 deletions hatchling/core/chat/message_history_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from typing import Dict, List, Optional

from hatchling.core.logging.logging_manager import logging_manager
from hatchling.config.settings import AppSettings
from .message_history import MessageHistory

logger = logging_manager.get_session("MessageHistoryRegistry")
Expand Down Expand Up @@ -64,19 +65,20 @@ def get_history(cls, uid: str) -> Optional[MessageHistory]:


@classmethod
def get_or_create_history(cls, uid: str) -> MessageHistory:
def get_or_create_history(cls, uid: str, settings: AppSettings = None) -> MessageHistory:
"""Get a MessageHistory instance for the given UID, or create one if it doesn't exist.

Args:
uid (str): The UID to get or create a history for.
settings (AppSettings, optional): Application settings to pass to MessageHistory constructor.

Returns:
MessageHistory: The existing or newly created MessageHistory instance.
"""
history = cls._histories.get(uid)
if history is not None:
return history
history = MessageHistory()
history = MessageHistory(settings=settings)
cls._histories[uid] = history
logger.debug(f"Created and registered new history for UID '{uid}'")
return history
Expand Down
3 changes: 2 additions & 1 deletion hatchling/core/llm/chat_session.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def __init__(self, settings: AppSettings = None):
self.tool_execution = MCPToolExecution()
# Initialize message components
self.session_id = str(uuid.uuid4())
self.history = MessageHistoryRegistry.get_or_create_history(self.session_id)
self.history = MessageHistoryRegistry.get_or_create_history(self.session_id, settings=self.settings)

# Create tool chaining subscriber for automatic tool calling chains
self._tool_chaining_subscriber = ToolChainingSubscriber(self.settings, self.tool_execution, self.session_id)
Expand Down Expand Up @@ -106,5 +106,6 @@ async def send_message(self, user_message: str) -> None:
# In the future, we must allow users to specify tools directly in the query.
payload = provider.add_tools_to_payload(payload)

self.logger.debug(f"Sending payload to LLM: {payload}")
# Stream the response using provider abstraction
await provider.stream_chat_response(payload)
2 changes: 1 addition & 1 deletion hatchling/core/llm/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class ToolCallExecutionResult:
tool_call_id: str
function_name: str
arguments: Dict[str, Any]
result: Any
result: Dict[str, Any]
error: Optional[str] = None

def to_dict(self) -> Dict[str, Any]:
Expand Down
8 changes: 6 additions & 2 deletions hatchling/core/llm/providers/ollama_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,6 +474,10 @@ def hatchling_to_provider_tool_result(self, tool_result: ToolCallExecutionResult
"""

return {
"content": str(tool_result.result.content[0].text) if tool_result.result.content[0].text else "No result",
"tool_name": tool_result.function_name
"content": json.dumps({
"tool_name": tool_result.function_name,
"content": str(tool_result.result["content"][0]["text"]) if tool_result.result and "content" in tool_result.result and tool_result.result["content"] and tool_result.result["content"][0] and "text" in tool_result.result["content"][0] else "No result",
"structuredContent": tool_result.result.get("structuredContent"),
"isError": tool_result.result.get("isError", False)
})
}
7 changes: 6 additions & 1 deletion hatchling/core/llm/providers/openai_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -545,5 +545,10 @@ def hatchling_to_provider_tool_result(self, tool_result: ToolCallExecutionResult
"""
return {
"tool_call_id": tool_result.tool_call_id,
"content": str(tool_result.result.content[0].text) if tool_result.result.content[0].text else "No result",
"content": json.dumps({
"tool_name": tool_result.function_name,
"content": str(tool_result.result["content"][0]["text"]) if tool_result.result and "content" in tool_result.result and tool_result.result["content"] and tool_result.result["content"][0] and "text" in tool_result.result["content"][0] else "No result",
"structuredContent": tool_result.result.get("structuredContent"),
"isError": tool_result.result.get("isError", False)
})
}
34 changes: 28 additions & 6 deletions hatchling/mcp_utils/mcp_tool_execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import logging
import time
import asyncio
import json
from mcp.types import CallToolResult

from hatchling.mcp_utils.manager import mcp_manager
Expand Down Expand Up @@ -88,28 +89,49 @@ async def execute_tool(self, parsed_tool_call: ToolCallParsedResult) -> None:
self.logger.debug(f"Tool {parsed_tool_call.function_name} executed with responses: {tool_response}")

if tool_response and not tool_response.isError:
# Convert CallToolResult to a serializable dictionary
serializable_tool_response = tool_response.__dict__.copy()
if "content" in serializable_tool_response and isinstance(serializable_tool_response["content"], list):
serializable_tool_response["content"] = [
item.text if hasattr(item, "text") else str(item)
for item in serializable_tool_response["content"]
]

result_obj = ToolCallExecutionResult(
**parsed_tool_call.to_dict(),
result=tool_response,
result=serializable_tool_response,
error=None
)
self._event_publisher.publish(EventType.MCP_TOOL_CALL_RESULT, result_obj.to_dict())
else:
# Convert CallToolResult to a serializable dictionary for error case as well
serializable_tool_response = tool_response.__dict__.copy()
if "content" in serializable_tool_response and isinstance(serializable_tool_response["content"], list):
serializable_tool_response["content"] = [
item.text if hasattr(item, "text") else str(item)
for item in serializable_tool_response["content"]
]

result_obj = ToolCallExecutionResult(
**parsed_tool_call.to_dict(),
result=tool_response,
result=serializable_tool_response,
error="Tool execution failed or returned no valid response"
)
self._event_publisher.publish(EventType.MCP_TOOL_CALL_ERROR, result_obj.to_dict())

except Exception as e:
self.logger.error(f"Error executing tool: {e}")
# For error case, create a serializable representation of the error result
error_content = [{"type": "text", "text": f"{e}"}]
serializable_error_response = {
"meta": None,
"content": [item["text"] if isinstance(item, dict) and "text" in item else str(item) for item in error_content],
"structuredContent": None,
"isError": True,
}
result_obj = ToolCallExecutionResult(
**parsed_tool_call.to_dict(),
result=CallToolResult(
content=[{"type": "text", "text": f"{e}"}],
isError=True,
),
result=serializable_error_response,
error=str(e)
)
self._event_publisher.publish(EventType.MCP_TOOL_CALL_ERROR, result_obj.to_dict())
Expand Down
Loading