From c877d50dcb95302c97abb51105c323483c0d2610 Mon Sep 17 00:00:00 2001 From: Cyrus Ye Date: Mon, 8 Sep 2025 23:47:24 -0500 Subject: [PATCH 1/3] feat(examples):social graph demo --- examples/social_graph_demo/README.md | 112 +++++ examples/social_graph_demo/__init__.py | 20 + .../demo_knowledge_evaluator.py | 460 ++++++++++++++++++ .../social_graph_demo/knowledge_agents.py | 128 +++++ .../knowledge_propagation_demo.py | 194 ++++++++ .../private_conversation_environment.py | 177 +++++++ .../social_network_knowledge_demo.py | 262 ++++++++++ 7 files changed, 1353 insertions(+) create mode 100644 examples/social_graph_demo/README.md create mode 100644 examples/social_graph_demo/__init__.py create mode 100644 examples/social_graph_demo/demo_knowledge_evaluator.py create mode 100644 examples/social_graph_demo/knowledge_agents.py create mode 100644 examples/social_graph_demo/knowledge_propagation_demo.py create mode 100644 examples/social_graph_demo/private_conversation_environment.py create mode 100644 examples/social_graph_demo/social_network_knowledge_demo.py diff --git a/examples/social_graph_demo/README.md b/examples/social_graph_demo/README.md new file mode 100644 index 0000000..8fea8de --- /dev/null +++ b/examples/social_graph_demo/README.md @@ -0,0 +1,112 @@ +# Social Graph Knowledge Propagation Demo + +这个demo实现了一个多智能体社交网络中的知识传播基准测试。 + +## 功能特点 + +- **知识传播模拟**: 模拟知识在社交网络中的传播过程 +- **私有对话环境**: 智能体只能看到邻居的消息,模拟真实的社交网络 +- **LLM语义评估**: 使用大语言模型进行语义理解和知识评估 +- **完整的传播链追踪**: 追踪知识从源头到目标的完整传播路径 + +## 文件说明 + +### 核心文件 + +- `social_network_knowledge_demo.py` - 主要演示文件,展示GPT-5发布信息传播 +- `demo_knowledge_evaluator.py` - LLM驱动的知识传播评估器 +- `knowledge_agents.py` - 具有知识存储和学习能力的智能体 +- `private_conversation_environment.py` - 私有对话环境实现 + +### 额外演示 + +- `knowledge_propagation_demo.py` - 使用自定义知识智能体的传播演示 + +## 场景设置 + +### 智能体角色 + +1. **Sam Altman** (知识源) - OpenAI CEO,拥有GPT-5发布信息 +2. **Alice Chen** (研究员) - 从Sam获取信息,传递给Bob +3. **Bob Wilson** (记者) - 从Alice获取信息,传递给Reporter +4. **Reporter** (报告者) - 最终接收者,询问发布细节 + +### 社交网络拓扑 + +``` +Sam Altman → Alice Chen → Bob Wilson → Reporter +``` + +每个智能体只能看到直接邻居的消息,确保知识必须通过传播链传递。 + +## 使用方法 + +### 运行主要演示 + +```bash +cd examples/social_graph_demo +python -m social_network_knowledge_demo +``` + +### 运行知识智能体演示 + +```bash +cd examples/social_graph_demo +python -m knowledge_propagation_demo +``` + +## 评估指标 + +### 知识传播评估 + +- **传播率**: 获得目标知识的智能体比例 +- **准确率**: 传播信息的准确程度 +- **完整性**: 知识内容的完整程度 +- **置信度**: 评估结果的可信度 + +### LLM语义分析 + +评估器对每条消息进行语义分析,判断: + +- 是否包含目标知识 +- 知识的完整性和准确性 +- 智能体对知识的理解程度 + +## 环境要求 + +- Python 3.8+ +- OpenAI API Key (设置为环境变量 `OPENAI_API_KEY`) +- 依赖包:tiny_chat, litellm + +## 输出示例 + +``` +CUSTOM KNOWLEDGE EVALUATION +================================================== + +KNOWLEDGE_PROPAGATION: 9.90/10 +Comments: +Knowledge Propagation Analysis: +- Knowledge mentions: 4 +- Agents reached: 3 +- Propagation rate: 100.0% +- Accuracy rate: 97.5% +- Agents with knowledge: Sam Altman, Reporter, Alice Chen + +Detailed LLM Analysis: +1. Sam Altman: + Has Knowledge: True + Completeness: 100.0% + Accuracy: 100.0% + Confidence: 95.0% + Reasoning: The message explicitly states that the release of GPT-5 is targeted for May 2025... +``` + +## 扩展性 + +这个框架可以轻松扩展到: + +- 不同的知识类型和内容 +- 更复杂的社交网络拓扑 +- 多种评估维度和指标 +- 自定义智能体行为和目标 diff --git a/examples/social_graph_demo/__init__.py b/examples/social_graph_demo/__init__.py new file mode 100644 index 0000000..4c1210f --- /dev/null +++ b/examples/social_graph_demo/__init__.py @@ -0,0 +1,20 @@ +""" +Social Graph Knowledge Propagation Demo + +A benchmark for testing knowledge propagation in multi-agent social networks. +""" + +from .demo_knowledge_evaluator import DemoKnowledgeEvaluator +from .knowledge_agents import KnowledgeAgent +from .private_conversation_environment import ( + PrivateConversationEnvironment, + PrivateConversationTinyChatEnvironment, +) + +__version__ = '1.0.0' +__all__ = [ + 'DemoKnowledgeEvaluator', + 'KnowledgeAgent', + 'PrivateConversationEnvironment', + 'PrivateConversationTinyChatEnvironment', +] diff --git a/examples/social_graph_demo/demo_knowledge_evaluator.py b/examples/social_graph_demo/demo_knowledge_evaluator.py new file mode 100644 index 0000000..3b4c68a --- /dev/null +++ b/examples/social_graph_demo/demo_knowledge_evaluator.py @@ -0,0 +1,460 @@ +""" +LLM-based knowledge propagation evaluator using semantic understanding. +""" + +import json +import re +from typing import Any + +from tiny_chat.evaluator import Evaluator +from tiny_chat.messages import Message +from tiny_chat.server.config import ModelProviderConfig +from tiny_chat.server.providers import ModelProviderFactory + + +class DemoKnowledgeEvaluator(Evaluator): + """ + LLM-based evaluator that uses semantic understanding to assess + knowledge propagation in conversations. + """ + + def __init__( + self, + target_knowledge: str = 'GPT-5 will be released in May 2025', + knowledge_source: str = 'Sam Altman', + target_agents: list[str] | None = None, + model_name: str = 'gpt-4o-mini', + ): + """ + Initialize the knowledge evaluator. + + Args: + target_knowledge: The specific knowledge to track + knowledge_source: The original source agent + target_agents: List of agents that should receive the knowledge + model_name: LLM model to use for evaluation + """ + self.target_knowledge = target_knowledge + self.knowledge_source = knowledge_source + self.target_agents = target_agents or [] + + model_config = ModelProviderConfig( + name=model_name, type='openai', temperature=0.1 + ) + self.model_provider = ModelProviderFactory.create_provider(model_config) + + self.agents_with_knowledge = set() + self._evaluation_cache = {} + + async def _analyze_message_for_knowledge( + self, agent_name: str, message_text: str + ) -> dict[str, Any]: + """Analyze a message for knowledge content using LLM.""" + cache_key = f'{agent_name}:{hash(message_text)}' + if cache_key in self._evaluation_cache: + return self._evaluation_cache[cache_key] + + if len(message_text.strip()) < 10 or 'did nothing' in message_text.lower(): + result = { + 'agent': agent_name, + 'has_knowledge': False, + 'completeness': 0.0, + 'accuracy': 0.0, + 'confidence': 0.0, + 'reasoning': 'Message too short or no action taken', + 'key_information': [], + 'message_snippet': message_text[:50] + '...' + if len(message_text) > 50 + else message_text, + } + self._evaluation_cache[cache_key] = result + return result + + evaluation_prompt = f""" +Analyze whether the following message demonstrates knowledge about this specific information: +TARGET KNOWLEDGE: "{self.target_knowledge}" +MESSAGE FROM {agent_name}: +{message_text} + +Determine if this message shows that {agent_name} has learned or possesses the target knowledge. +Consider these factors: +1. Does the message explicitly mention the target knowledge? +2. Does the message show understanding of the key concepts? +3. Does the message indicate the agent has learned this information? +4. Even if not explicitly stated, does the context suggest knowledge acquisition? + +Respond ONLY with a JSON object in this exact format: +{{ + "has_knowledge": true/false, + "completeness": 0-100 (percentage of target knowledge demonstrated), + "accuracy": 0-100 (accuracy of the information shared), + "confidence": 0-100 (confidence in this evaluation), + "reasoning": "detailed explanation of why this agent does/doesn't have the knowledge", + "key_information_found": ["list", "of", "key", "pieces", "found"] +}} +""" + + try: + from litellm import acompletion + + print(f' Evaluating {agent_name} message with LLM...') + response = await acompletion( + model=self.model_provider._get_agenerate_model_name(), + messages=[{'role': 'user', 'content': evaluation_prompt}], + temperature=0.1, + ) + print(f' LLM response received for {agent_name}') + + response_content = response.choices[0].message.content + + json_match = re.search(r'\{.*\}', response_content, re.DOTALL) + if json_match: + result = json.loads(json_match.group()) + + def normalize_percentage(value): + if value is None: + return 0.0 + if value <= 1.0: + return value * 100 + return value + + llm_result = { + 'agent': agent_name, + 'has_knowledge': result.get('has_knowledge', False), + 'completeness': normalize_percentage( + result.get('completeness', 0.0) + ), + 'accuracy': normalize_percentage(result.get('accuracy', 0.0)), + 'confidence': normalize_percentage(result.get('confidence', 0.0)), + 'reasoning': result.get('reasoning', ''), + 'key_information': result.get('key_information_found', []), + 'message_snippet': message_text[:100] + '...' + if len(message_text) > 100 + else message_text, + } + self._evaluation_cache[cache_key] = llm_result + return llm_result + else: + return self._fallback_analysis(agent_name, message_text) + + except Exception as e: + print(f'LLM evaluation failed for {agent_name}: {e}') + return self._fallback_analysis(agent_name, message_text) + + def _fallback_analysis(self, agent_name: str, message_text: str) -> dict[str, Any]: + """Fallback keyword-based analysis if LLM evaluation fails.""" + knowledge_keywords = ['gpt-5', 'gpt5', 'may 2025', 'release'] + + message_lower = message_text.lower() + keywords_found = [kw for kw in knowledge_keywords if kw in message_lower] + + has_knowledge = len(keywords_found) >= 2 + + return { + 'agent': agent_name, + 'has_knowledge': has_knowledge, + 'completeness': len(keywords_found) * 25.0, + 'accuracy': 80.0 if has_knowledge else 0.0, + 'confidence': 60.0, + 'reasoning': f'Fallback keyword analysis. Found keywords: {keywords_found}', + 'key_information': keywords_found, + 'message_snippet': message_text[:100] + '...' + if len(message_text) > 100 + else message_text, + } + + def __call__( + self, turn_number: int, messages: list[tuple[str, Message]] + ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]: + """Synchronous evaluation using fallback analysis.""" + knowledge_analysis = [] + + for source, message in messages: + if source == 'Environment': + continue + + message_text = message.to_natural_language() + analysis = self._fallback_analysis(source, message_text) + + if analysis['has_knowledge']: + knowledge_analysis.append(analysis) + if source != self.knowledge_source: + self.agents_with_knowledge.add(source) + + total_target_agents = ( + len(self.target_agents) + if self.target_agents + else len( + set( + msg[0] + for msg in messages + if msg[0] != 'Environment' and msg[0] != self.knowledge_source + ) + ) + ) + agents_reached = len(self.agents_with_knowledge) + + if total_target_agents > 0: + propagation_rate = agents_reached / total_target_agents + else: + propagation_rate = 0.0 + + if knowledge_analysis: + accuracy_rate = sum(a['accuracy'] for a in knowledge_analysis) / len( + knowledge_analysis + ) + if accuracy_rate <= 1.0: + accuracy_rate = accuracy_rate * 100 + else: + accuracy_rate = 0.0 + + analysis_details = { + 'total_knowledge_mentions': len(knowledge_analysis), + 'agents_with_knowledge': list(self.agents_with_knowledge), + 'propagation_rate': propagation_rate, + 'accuracy_rate': accuracy_rate, + 'knowledge_analysis': knowledge_analysis, + } + + accuracy_for_score = ( + accuracy_rate / 100 if accuracy_rate > 1.0 else accuracy_rate + ) + overall_score = (propagation_rate * 0.6 + accuracy_for_score * 0.4) * 10 + + comments = self._generate_evaluation_comments(analysis_details) + + return [('environment', (('knowledge_propagation', overall_score), comments))] + + async def __acall__( + self, turn_number: int, messages: list[tuple[str, Message]] + ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]: + """Async version using LLM evaluation - analyzes both speaking and listening.""" + + knowledge_analysis = [] + + knowledge_messages = [] + + for source, message in messages: + if source == 'Environment': + continue + + message_text = message.to_natural_language() + analysis = await self._analyze_message_for_knowledge(source, message_text) + + if analysis['has_knowledge']: + knowledge_analysis.append(analysis) + knowledge_messages.append((source, message_text, analysis)) + if source != self.knowledge_source: + self.agents_with_knowledge.add(source) + + await self._analyze_knowledge_reception( + messages, knowledge_messages, knowledge_analysis + ) + + total_target_agents = ( + len(self.target_agents) + if self.target_agents + else len( + set( + msg[0] + for msg in messages + if msg[0] != 'Environment' and msg[0] != self.knowledge_source + ) + ) + ) + agents_reached = len(self.agents_with_knowledge) + + if total_target_agents > 0: + propagation_rate = agents_reached / total_target_agents + else: + propagation_rate = 0.0 + + if knowledge_analysis: + accuracy_rate = sum(a['accuracy'] for a in knowledge_analysis) / len( + knowledge_analysis + ) + if accuracy_rate <= 1.0: + accuracy_rate = accuracy_rate * 100 + else: + accuracy_rate = 0.0 + + analysis_details = { + 'total_knowledge_mentions': len(knowledge_analysis), + 'agents_with_knowledge': list(self.agents_with_knowledge), + 'propagation_rate': propagation_rate, + 'accuracy_rate': accuracy_rate, + 'knowledge_analysis': knowledge_analysis, + } + + accuracy_for_score = ( + accuracy_rate / 100 if accuracy_rate > 1.0 else accuracy_rate + ) + overall_score = (propagation_rate * 0.6 + accuracy_for_score * 0.4) * 10 + + comments = self._generate_evaluation_comments(analysis_details) + + return [('environment', (('knowledge_propagation', overall_score), comments))] + + def _generate_evaluation_comments(self, details: dict[str, Any]) -> str: + """Generate human-readable evaluation comments.""" + comments = [] + + comments.append('Knowledge Propagation Analysis:') + comments.append(f"- Knowledge mentions: {details['total_knowledge_mentions']}") + comments.append(f"- Agents reached: {len(details['agents_with_knowledge'])}") + comments.append(f"- Propagation rate: {details['propagation_rate']:.1%}") + comments.append(f"- Accuracy rate: {details['accuracy_rate']:.1f}%") + + if details['agents_with_knowledge']: + comments.append( + f"- Agents with knowledge: {', '.join(details['agents_with_knowledge'])}" + ) + else: + comments.append('- No knowledge propagation detected') + + if details['knowledge_analysis']: + comments.append('\nDetailed LLM Analysis:') + for i, analysis in enumerate(details['knowledge_analysis'][:3], 1): + comments.append(f"{i}. {analysis['agent']}:") + comments.append( + f" Has Knowledge: {analysis.get('has_knowledge', 'N/A')}" + ) + comments.append( + f" Completeness: {analysis.get('completeness', 0):.1f}%" + ) + comments.append(f" Accuracy: {analysis.get('accuracy', 0):.1f}%") + comments.append(f" Confidence: {analysis.get('confidence', 0):.1f}%") + if analysis.get('reasoning'): + comments.append(f" Reasoning: {analysis['reasoning']}") + if analysis.get('key_information'): + comments.append(f" Key Info: {analysis['key_information']}") + if analysis.get('message_snippet'): + comments.append(f" Message: \"{analysis['message_snippet']}\"") + + return '\n'.join(comments) + + async def _analyze_knowledge_reception( + self, messages, knowledge_messages, knowledge_analysis + ): + """Analyze if agents received knowledge by listening to others.""" + + conversation_flow = [] + for source, message in messages: + if source != 'Environment': + conversation_flow.append((source, message.to_natural_language())) + + for knowledge_source, knowledge_text, knowledge_info in knowledge_messages: + potential_listeners = set( + msg[0] + for msg in messages + if msg[0] != 'Environment' and msg[0] != knowledge_source + ) + + for listener in potential_listeners: + if listener not in self.agents_with_knowledge: + reception_analysis = ( + await self._analyze_knowledge_reception_for_agent( + listener, + knowledge_source, + knowledge_text, + conversation_flow, + ) + ) + + if reception_analysis.get('received_knowledge', False): + knowledge_analysis.append(reception_analysis) + self.agents_with_knowledge.add(listener) + + async def _analyze_knowledge_reception_for_agent( + self, listener, knowledge_source, knowledge_text, conversation_flow + ): + """Analyze if a specific agent received knowledge by listening.""" + + knowledge_index = -1 + for i, (speaker, text) in enumerate(conversation_flow): + if speaker == knowledge_source and knowledge_text[:50] in text: + knowledge_index = i + break + + if knowledge_index == -1: + return {'received_knowledge': False} + + listener_responses = [] + for i in range(knowledge_index + 1, len(conversation_flow)): + speaker, text = conversation_flow[i] + if speaker == listener: + listener_responses.append(text) + + if not listener_responses: + return { + 'agent': listener, + 'received_knowledge': True, + 'completeness': 80.0, + 'accuracy': 90.0, + 'confidence': 70.0, + 'reasoning': f'{listener} was present when {knowledge_source} shared knowledge', + 'key_information': ['GPT-5', 'heard from ' + knowledge_source], + 'message_snippet': f'(Heard from {knowledge_source})', + } + + response_text = ' '.join(listener_responses) + + reception_prompt = f""" +Analyze if {listener} received and understood knowledge from {knowledge_source}. + +KNOWLEDGE SHARED BY {knowledge_source}: +{knowledge_text} + +{listener}'S SUBSEQUENT RESPONSES: +{response_text} + +Determine if {listener}'s responses show they received and understood the shared information. +Look for: +1. Acknowledgments like "thanks", "got it", "I'll share this" +2. References to GPT-5 or release information +3. Plans to pass information to others +4. Any indication they heard and processed the information + +Respond ONLY with a JSON object: +{{ + "received_knowledge": true/false, + "completeness": 0-100, + "accuracy": 0-100, + "confidence": 0-100, + "reasoning": "explanation", + "key_information_found": ["list", "of", "indicators"] +}} +""" + + try: + from litellm import acompletion + + response = await acompletion( + model=self.model_provider._get_agenerate_model_name(), + messages=[{'role': 'user', 'content': reception_prompt}], + temperature=0.1, + ) + + response_content = response.choices[0].message.content + json_match = re.search(r'\{.*\}', response_content, re.DOTALL) + + if json_match: + result = json.loads(json_match.group()) + return { + 'agent': listener, + 'received_knowledge': result.get('received_knowledge', False), + 'completeness': result.get('completeness', 0.0), + 'accuracy': result.get('accuracy', 0.0), + 'confidence': result.get('confidence', 0.0), + 'reasoning': result.get('reasoning', ''), + 'key_information': result.get('key_information_found', []), + 'message_snippet': response_text[:100] + '...' + if len(response_text) > 100 + else response_text, + } + else: + return {'received_knowledge': False} + + except Exception as e: + print(f'Reception analysis failed for {listener}: {e}') + return {'received_knowledge': False} diff --git a/examples/social_graph_demo/knowledge_agents.py b/examples/social_graph_demo/knowledge_agents.py new file mode 100644 index 0000000..d6b2554 --- /dev/null +++ b/examples/social_graph_demo/knowledge_agents.py @@ -0,0 +1,128 @@ +""" +Knowledge-aware agents that can store, share, and propagate information through social networks. +""" + +from typing import Any + +from tiny_chat import AgentAction, BaseAgentProfile, LLMAgent, Observation + + +class KnowledgeAgent(LLMAgent): + """An agent capable of storing and sharing knowledge through memory.""" + + def __init__( + self, + agent_name: str | None = None, + uuid_str: str | None = None, + agent_profile: BaseAgentProfile | dict[str, Any] | None = None, + profile_jsonl_path: str | None = None, + model_provider: Any = None, + script_like: bool = False, + initial_knowledge: list[str] | None = None, + ) -> None: + super().__init__( + agent_name=agent_name, + uuid_str=uuid_str, + agent_profile=agent_profile, + profile_jsonl_path=profile_jsonl_path, + model_provider=model_provider, + script_like=script_like, + ) + + self.memory: list[str] = initial_knowledge or [] + + def add_knowledge(self, knowledge: str) -> None: + """Add new knowledge to agent's memory.""" + if knowledge and knowledge not in self.memory: + self.memory.append(knowledge) + + def get_all_knowledge(self) -> list[str]: + """Get all knowledge stored in memory.""" + return self.memory.copy() + + def search_knowledge(self, query: str) -> list[str]: + """Search for relevant knowledge based on query.""" + relevant_knowledge = [] + query_lower = query.lower() + + for knowledge in self.memory: + if any(keyword in knowledge.lower() for keyword in query_lower.split()): + relevant_knowledge.append(knowledge) + + return relevant_knowledge + + async def act(self, obs: Observation) -> AgentAction: + """Enhanced act method that considers knowledge sharing and learning.""" + self.recv_message('Environment', obs) + await self._ensure_goal() + + if self._only_none_action(obs.available_actions): + return AgentAction(action_type='none', argument='') + + await self._learn_from_observation(obs) + + enhanced_history = self._build_enhanced_history() + + action = await self._model_provider.agenerate_action( + history=enhanced_history, + turn_number=obs.turn_number, + action_types=obs.available_actions, + agent=self.agent_name, + goal=self.goal, + script_like=self.script_like, + ) + + await self._learn_from_own_action(action) + + return action + + def _build_enhanced_history(self) -> str: + """Build conversation history enhanced with agent's knowledge.""" + base_history = self._history_text(self.inbox) + + if self.memory: + knowledge_context = 'My current knowledge: ' + '; '.join(self.memory) + return f'{knowledge_context}\n\n{base_history}' + + return base_history + + def _extract_knowledge_from_conversation(self, conversation: str) -> list[str]: + """Extract potential knowledge from conversation text.""" + knowledge_indicators = [ + 'will be released', + 'is scheduled for', + 'announced that', + 'confirmed that', + 'stated that', + ] + + extracted = [] + sentences = conversation.split('.') + + for sentence in sentences: + sentence = sentence.strip() + if any(indicator in sentence.lower() for indicator in knowledge_indicators): + if len(sentence) > 20: + extracted.append(sentence) + + return extracted + + async def _learn_from_observation(self, obs: Observation) -> None: + """Learn knowledge from observation (what other agents said).""" + obs_text = obs.to_natural_language() + new_knowledge = self._extract_knowledge_from_conversation(obs_text) + + for knowledge in new_knowledge: + print(f'[{self.agent_name}] Learned: {knowledge}') + self.add_knowledge(knowledge) + + async def _learn_from_own_action(self, action: AgentAction) -> None: + """Learn knowledge from own action (what I just said).""" + if action.action_type == 'speak': + action_text = action.to_natural_language() + new_knowledge = self._extract_knowledge_from_conversation(action_text) + + for knowledge in new_knowledge: + if knowledge not in self.memory: + print(f'[{self.agent_name}] Reinforced knowledge: {knowledge}') + self.add_knowledge(knowledge) diff --git a/examples/social_graph_demo/knowledge_propagation_demo.py b/examples/social_graph_demo/knowledge_propagation_demo.py new file mode 100644 index 0000000..d616301 --- /dev/null +++ b/examples/social_graph_demo/knowledge_propagation_demo.py @@ -0,0 +1,194 @@ +""" +Knowledge Propagation Demo with Private Conversations and True Knowledge Updates + +This demo demonstrates true knowledge propagation where: +1. Agents have persistent memory that gets updated +2. Conversations are private (only participants know the content) +3. Knowledge spreads through the social network via direct interactions +""" + +import asyncio +import os +import sys +from pathlib import Path + +sys.path.append(str(Path(__file__).parent)) + +from demo_knowledge_evaluator import DemoKnowledgeEvaluator +from knowledge_agents import KnowledgeAgent +from private_conversation_environment import PrivateConversationEnvironment + +from tiny_chat import BaseAgentProfile +from tiny_chat.server.config import ModelProviderConfig +from tiny_chat.server.providers import ModelProviderFactory + + +async def create_knowledge_agent( + name: str, + age: int, + occupation: str, + initial_knowledge: list[str] = None, + goal: str = None, +) -> KnowledgeAgent: + """Create a knowledge agent with proper model provider.""" + + model_config = ModelProviderConfig( + name='gpt-4o-mini', type='openai', temperature=0.7 + ) + model_provider = ModelProviderFactory.create_provider(model_config) + + profile = BaseAgentProfile( + first_name=name.split()[0], + last_name=name.split()[-1] if len(name.split()) > 1 else '', + age=age, + occupation=occupation, + speaking_id=0, + personality_and_values=f'Curious {occupation.lower()} interested in AI developments', + ) + + agent = KnowledgeAgent( + agent_name=name, + agent_profile=profile, + model_provider=model_provider, + initial_knowledge=initial_knowledge or [], + ) + + if goal: + agent.goal = goal + + return agent + + +async def setup_sam_altman_scenario(): + """Set up the Sam Altman GPT-5 knowledge propagation scenario.""" + + print('Setting up Sam Altman GPT-5 Knowledge Propagation Scenario') + print('=' * 60) + + sam = await create_knowledge_agent( + name='Sam Altman', + age=39, + occupation='CEO of OpenAI', + initial_knowledge=['GPT-5 will be released in May 2025'], + goal='Share information about GPT-5 release with researchers when appropriate', + ) + + alice = await create_knowledge_agent( + name='Alice Chen', + age=32, + occupation='AI Researcher', + goal='Learn about upcoming AI releases and share with colleagues', + ) + + bob = await create_knowledge_agent( + name='Bob Wilson', + age=28, + occupation='Tech Journalist', + goal='Gather information about AI releases for reporting', + ) + + reporter = await create_knowledge_agent( + name='Sarah Reporter', + age=35, + occupation='Science Reporter', + goal='Investigate AI release timelines for news stories', + ) + + social_network = { + 'Sam Altman': ['Alice Chen'], + 'Alice Chen': ['Sam Altman', 'Bob Wilson'], + 'Bob Wilson': ['Alice Chen', 'Sarah Reporter'], + 'Sarah Reporter': ['Bob Wilson'], + } + + agents = { + 'Sam Altman': sam, + 'Alice Chen': alice, + 'Bob Wilson': bob, + 'Sarah Reporter': reporter, + } + + return agents, social_network + + +async def run_evaluation(agents: dict, conversation_logs: list): + """Run GPT-5 knowledge evaluation on the results.""" + + evaluator = DemoKnowledgeEvaluator( + target_knowledge='GPT-5 will be released in May 2025', + knowledge_source='Sam Altman', + target_agents=['Alice Chen', 'Bob Wilson', 'Sarah Reporter'], + ) + + print('\n' + '=' * 50) + print('KNOWLEDGE PROPAGATION EVALUATION') + print('=' * 50) + + for agent_name, agent in agents.items(): + status = agent.get_knowledge_status() + + print(f'\n{agent_name}:') + print(f" Total Knowledge Items: {status['total_knowledge']}") + + if status['knowledge_items']: + for item in status['knowledge_items']: + print(f' - {item}') + else: + print(' - No knowledge items') + + has_gpt5_knowledge = [] + for agent_name, agent in agents.items(): + if agent_name == 'Sam Altman': + continue + + knowledge_items = agent.get_all_knowledge() + has_knowledge = any( + 'gpt-5' in item.lower() or 'may 2025' in item.lower() + for item in knowledge_items + ) + + if has_knowledge: + has_gpt5_knowledge.append(agent_name) + + total_target_agents = len(agents) - 1 + propagation_rate = ( + len(has_gpt5_knowledge) / total_target_agents if total_target_agents > 0 else 0 + ) + + print('\nPropagation Summary:') + print(' Knowledge source: Sam Altman') + print(f' Target agents: {total_target_agents}') + print(f' Agents with knowledge: {len(has_gpt5_knowledge)} ({has_gpt5_knowledge})') + print(f' Propagation rate: {propagation_rate:.1%}') + + return { + 'agents_with_knowledge': has_gpt5_knowledge, + 'propagation_rate': propagation_rate, + 'conversation_logs': conversation_logs, + } + + +async def main(): + """Main demo function.""" + + api_key = os.getenv('OPENAI_API_KEY') + if not api_key: + print('Error: OPENAI_API_KEY environment variable not set') + return + + agents, social_network = await setup_sam_altman_scenario() + + environment = PrivateConversationEnvironment( + agents=agents, social_network=social_network, max_rounds=5 + ) + + results = await environment.run_knowledge_propagation_simulation() + + evaluation_results = await run_evaluation(agents, results['conversation_logs']) + + print(f"\nSimulation completed in {results['total_rounds']} rounds") + print(f"Final propagation rate: {evaluation_results['propagation_rate']:.1%}") + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/examples/social_graph_demo/private_conversation_environment.py b/examples/social_graph_demo/private_conversation_environment.py new file mode 100644 index 0000000..7cd18f1 --- /dev/null +++ b/examples/social_graph_demo/private_conversation_environment.py @@ -0,0 +1,177 @@ +""" +Private conversation environment that supports knowledge propagation through direct agent interactions. +Only participants in a conversation can see the messages. +""" + +import asyncio +from typing import Any + +from knowledge_agents import KnowledgeAgent + +from tiny_chat import TinyChatEnvironment + + +class PrivateConversationEnvironment: + """ + Environment that manages private conversations between agents. + Knowledge propagates only through direct interactions. + """ + + def __init__( + self, + agents: dict[str, KnowledgeAgent], + social_network: dict[str, list[str]], + max_rounds: int = 10, + ): + self.agents = agents + self.social_network = social_network + self.max_rounds = max_rounds + self.current_round = 0 + self.conversation_logs = [] + + async def run_knowledge_propagation_simulation(self) -> dict[str, Any]: + """ + Run the knowledge propagation simulation through private conversations. + """ + print('Starting knowledge propagation simulation') + print(f'Social network: {self.social_network}') + + print('\n=== Initial Knowledge State ===') + for agent_name, agent in self.agents.items(): + status = agent.get_knowledge_status() + print(f"{agent_name}: {status['total_knowledge']} items") + for item in status['knowledge_items']: + print(f' - {item}') + + for round_num in range(self.max_rounds): + self.current_round = round_num + 1 + print(f'\n=== Round {self.current_round} ===') + + any_new_knowledge = await self._conduct_round() + + if not any_new_knowledge: + print('No new knowledge shared this round. Ending simulation.') + break + + print('\n=== Final Knowledge State ===') + final_state = {} + for agent_name, agent in self.agents.items(): + status = agent.get_knowledge_status() + final_state[agent_name] = status + print(f"{agent_name}: {status['total_knowledge']} items") + for item in status['knowledge_items']: + print(f' - {item}') + + return { + 'final_knowledge_state': final_state, + 'total_rounds': self.current_round, + 'conversation_logs': self.conversation_logs, + } + + async def _conduct_round(self) -> bool: + """Conduct one round of private conversations.""" + any_new_knowledge = False + round_conversations = [] + + for agent_name, connected_agents in self.social_network.items(): + if agent_name not in self.agents: + continue + + agent = self.agents[agent_name] + + for connected_agent_name in connected_agents: + if connected_agent_name not in self.agents: + continue + + connected_agent = self.agents[connected_agent_name] + + knowledge_shared = await self._private_conversation( + agent, connected_agent, agent_name, connected_agent_name + ) + + if knowledge_shared: + any_new_knowledge = True + round_conversations.append( + { + 'from': agent_name, + 'to': connected_agent_name, + 'knowledge_shared': knowledge_shared, + } + ) + + self.conversation_logs.append( + {'round': self.current_round, 'conversations': round_conversations} + ) + + return any_new_knowledge + + async def _private_conversation( + self, + agent1: KnowledgeAgent, + agent2: KnowledgeAgent, + agent1_name: str, + agent2_name: str, + ) -> bool: + """Simulate a private conversation between two agents.""" + + agent1_knowledge = set(agent1.get_all_knowledge()) + agent2_knowledge = set(agent2.get_all_knowledge()) + + knowledge_to_share_1_to_2 = agent1_knowledge - agent2_knowledge + knowledge_to_share_2_to_1 = agent2_knowledge - agent1_knowledge + + knowledge_shared = False + + if knowledge_to_share_1_to_2: + shared_item = next(iter(knowledge_to_share_1_to_2)) + agent2.add_knowledge(shared_item) + print(f'{agent1_name} shared with {agent2_name}: {shared_item}') + knowledge_shared = True + + if knowledge_to_share_2_to_1: + shared_item = next(iter(knowledge_to_share_2_to_1)) + agent1.add_knowledge(shared_item) + print(f'{agent2_name} shared with {agent1_name}: {shared_item}') + knowledge_shared = True + + await asyncio.sleep(0.1) + + return knowledge_shared + + +class PrivateConversationTinyChatEnvironment(TinyChatEnvironment): + """ + TinyChatEnvironment that enforces private conversations based on neighbor_map. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # 确保设置neighbor_map + self.neighbor_map = getattr(self, 'neighbor_map', {}) + + def _get_visible_agents(self, agent_name: str) -> list[str]: + """Get list of agents visible to the given agent based on neighbor_map.""" + if hasattr(self, 'neighbor_map') and self.neighbor_map: + return self.neighbor_map.get(agent_name, []) + else: + return [name for name in self.agents.keys() if name != agent_name] + + def _last_turn_text_for(self, agent_name: str) -> str: + """Generate text representation of the last turn for a specific agent.""" + if not self.inbox: + return '' + + visible_agents = self._get_visible_agents(agent_name) + + last_turn_messages = [] + for from_agent, message in self.inbox[-len(self.agents) :]: + if ( + from_agent == agent_name + or from_agent in visible_agents + or from_agent == 'Environment' + ): + last_turn_messages.append( + f'{from_agent}: {message.to_natural_language()}' + ) + + return '\n'.join(last_turn_messages) if last_turn_messages else '' diff --git a/examples/social_graph_demo/social_network_knowledge_demo.py b/examples/social_graph_demo/social_network_knowledge_demo.py new file mode 100644 index 0000000..1890a9a --- /dev/null +++ b/examples/social_graph_demo/social_network_knowledge_demo.py @@ -0,0 +1,262 @@ +""" +Social Network Knowledge Propagation Benchmark + +This demo implements a knowledge propagation scenario where agents share information +through a social network. The scenario features Sam Altman sharing GPT-5 release +information with other agents through social connections. +""" + +import asyncio +import os +import sys +from pathlib import Path + +sys.path.append(str(Path(__file__).parent)) + +import glob +import json + +from demo_knowledge_evaluator import DemoKnowledgeEvaluator + +from tiny_chat import SimpleMessage, TinyChatBackground +from tiny_chat.server.core import create_server + + +async def run_custom_evaluation_from_logs(evaluator: DemoKnowledgeEvaluator): + """Run custom evaluation on the most recent conversation log.""" + + log_pattern = 'conversation_logs/conversation_*.json' + log_files = glob.glob(log_pattern) + + if not log_files: + print('No conversation logs found for evaluation') + return + + latest_log = max(log_files, key=os.path.getctime) + print(f'Evaluating conversation log: {latest_log}') + + try: + with open(latest_log, encoding='utf-8') as f: + conversation_data = json.load(f) + + evaluator_messages = [] + + if 'conversation_history' in conversation_data: + for msg_data in conversation_data['conversation_history']: + try: + if ( + isinstance(msg_data, dict) + and 'agent' in msg_data + and 'content' in msg_data + ): + source = msg_data['agent'] + message_content = msg_data['content'] + + if message_content.startswith( + 'said: "' + ) and message_content.endswith('"'): + message_content = message_content[7:-1] + elif message_content == 'did nothing': + continue + + message = SimpleMessage(message=message_content) + evaluator_messages.append((source, message)) + + except Exception as e: + print(f'Error processing message: {e}') + continue + else: + print('No conversation_history found in log file') + return + + if not evaluator_messages: + print('No evaluable messages found in conversation log') + return + + print(f'Found {len(evaluator_messages)} evaluable messages:') + for source, msg in evaluator_messages: + content = msg.to_natural_language() + preview = content[:60] + ('...' if len(content) > 60 else '') + print(f' {source}: {preview}') + + print(f'\nCalling evaluator with {len(evaluator_messages)} messages...') + evaluation_results = await evaluator.__acall__( + turn_number=len(evaluator_messages), messages=evaluator_messages + ) + print( + f'Evaluator completed. Results: {len(evaluation_results) if evaluation_results else 0} items' + ) + + print('\n' + '=' * 50) + print('CUSTOM KNOWLEDGE EVALUATION') + print('=' * 50) + + for metric_name, metric_data in evaluation_results: + if isinstance(metric_data, tuple) and len(metric_data) == 2: + score_info, comments = metric_data + if isinstance(score_info, tuple) and len(score_info) == 2: + actual_metric_name, actual_score = score_info + print(f'\n{actual_metric_name.upper()}: {actual_score:.2f}/10') + else: + print(f'\n{metric_name.upper()}: {score_info:.2f}/10') + print('Comments:') + print(comments) + else: + print(f'\n{metric_name.upper()}: {metric_data}') + + except Exception as e: + print(f'Error during evaluation: {e}') + import traceback + + traceback.print_exc() + + +async def main(): + """Main demo function.""" + + api_key = os.getenv('OPENAI_API_KEY') + if not api_key: + print('Warning: OPENAI_API_KEY not set. Demo may not work properly.') + return + + agent_configs = [ + { + 'name': 'Sam Altman', + 'type': 'llm', + 'goal': 'I am the CEO of OpenAI with insider knowledge. I know GPT-5 will be released in May 2025. I should share this information when Alice asks about upcoming AI developments.', + 'model_provider': 'gpt-4o-mini', + 'speaking_id': 0, + }, + { + 'name': 'Alice Chen', + 'type': 'llm', + 'goal': 'I am an AI researcher. I MUST ask Sam Altman about upcoming AI releases first, then immediately share any information I learn with Bob. I will not leave until I complete both tasks.', + 'model_provider': 'gpt-4o-mini', + 'speaking_id': 1, + }, + { + 'name': 'Bob Wilson', + 'type': 'llm', + 'goal': 'I am a tech journalist. I MUST wait for Alice to share GPT-5 information with me, then share this information with the Reporter. I will not leave until I complete both tasks.', + 'model_provider': 'gpt-4o-mini', + 'speaking_id': 2, + }, + { + 'name': 'Reporter', + 'type': 'llm', + 'goal': 'I need GPT-5 release information for my story. I MUST wait for Bob to share information with me and ask follow-up questions to get complete details. I will not leave until I get the information.', + 'model_provider': 'gpt-4o-mini', + 'speaking_id': 3, + }, + ] + + background = TinyChatBackground( + scenario='A professional AI industry discussion where knowledge about GPT-5 release spreads through the network', + agent_configs=[ + { + 'name': 'Sam Altman', + 'background': 'Sam Altman is the CEO of OpenAI with exclusive insider knowledge that GPT-5 will be released in May 2025. He is willing to share this information with AI researchers.', + 'goal': 'Share GPT-5 release information when asked by Alice about upcoming AI developments', + }, + { + 'name': 'Alice Chen', + 'background': 'Alice is an AI researcher who MUST complete the knowledge chain. She will ask Sam about GPT-5, then immediately inform Bob. She stays until both tasks are done.', + 'goal': 'Ask Sam about GPT-5 releases, then actively share the information with Bob to continue the knowledge chain', + }, + { + 'name': 'Bob Wilson', + 'background': 'Bob is a tech journalist who MUST wait for Alice to share GPT-5 information, then pass it to Reporter. He stays until both tasks are completed.', + 'goal': 'Receive GPT-5 information from Alice, then actively share it with Reporter to complete the knowledge propagation', + }, + { + 'name': 'Reporter', + 'background': 'Reporter is investigating GPT-5 for breaking news. Must stay and actively seek information from Bob until getting complete GPT-5 release details.', + 'goal': 'Actively ask Bob for GPT-5 information and stay until receiving complete release timeline details', + }, + ], + ) + + obs_control = { + 'mode': 'local', + 'neighbor_map': { + 'Sam Altman': ['Alice Chen'], + 'Alice Chen': ['Sam Altman', 'Bob Wilson'], + 'Bob Wilson': ['Alice Chen', 'Reporter'], + 'Reporter': ['Bob Wilson'], + }, + } + + print('Social Network Knowledge Propagation Demo') + print('Scenario: GPT-5 Release Information Spreading') + print('-' * 50) + print('Starting knowledge propagation scenario...') + print('Social network topology:') + print(' Sam Altman -> Alice Chen') + print(' Alice Chen -> Sam Altman, Bob Wilson') + print(' Bob Wilson -> Alice Chen, Reporter') + print(' Reporter -> Bob Wilson') + print() + + print('Expected knowledge propagation chain:') + print( + "1. Sam Altman (Source): Has exclusive knowledge 'GPT-5 will be released in May 2025'" + ) + print('2. Alice Chen asks Sam about upcoming AI releases') + print('3. Sam shares GPT-5 release information with Alice') + print('4. Bob Wilson asks Alice about GPT-5 release dates') + print('5. Alice shares the May 2025 information with Bob') + print('6. Reporter asks Bob about GPT-5 timeline') + print('7. Bob shares the release information with Reporter') + print('8. Knowledge successfully propagates: Sam → Alice → Bob → Reporter') + print() + + try: + demo_evaluator = DemoKnowledgeEvaluator( + target_knowledge='GPT-5 will be released in May 2025', + knowledge_source='Sam Altman', + target_agents=['Alice Chen', 'Bob Wilson', 'Reporter'], + ) + + print('Running conversation with private observation control...') + + async with create_server() as server: + episode_log = await server.run_conversation( + agent_configs=agent_configs, + background=background, + max_turns=12, + enable_evaluation=True, + return_log=True, + action_order='sequential', + obs_control=obs_control, + ) + + print('\nRunning custom knowledge evaluation...') + await run_custom_evaluation_from_logs(demo_evaluator) + + print('\n' + '=' * 50) + print('KNOWLEDGE PROPAGATION RESULTS') + print('=' * 50) + episode_length = getattr( + episode_log, 'episode_length', len(episode_log.rewards) + ) + print(f'Total conversation turns: {episode_length}') + + print('\nKnowledge propagation analysis:') + print('Knowledge indicators found: See evaluation results above') + print('Propagation success: See evaluation results above') + + print( + f'\nEvaluation Scores: {[(score, data) for score, data in episode_log.rewards]}' + ) + + print('\nConversation log saved and can be reviewed for detailed analysis.') + + except Exception as e: + print(f'Error running demo: {e}') + import traceback + + traceback.print_exc() + + +if __name__ == '__main__': + asyncio.run(main()) From 7d6b477174b118cab03a328aea71dad76230127f Mon Sep 17 00:00:00 2001 From: Cyrus Ye Date: Mon, 8 Sep 2025 23:52:50 -0500 Subject: [PATCH 2/3] chore: remove examples/social_graph_demo/README.md from branch --- examples/social_graph_demo/README.md | 112 --------------------------- 1 file changed, 112 deletions(-) delete mode 100644 examples/social_graph_demo/README.md diff --git a/examples/social_graph_demo/README.md b/examples/social_graph_demo/README.md deleted file mode 100644 index 8fea8de..0000000 --- a/examples/social_graph_demo/README.md +++ /dev/null @@ -1,112 +0,0 @@ -# Social Graph Knowledge Propagation Demo - -这个demo实现了一个多智能体社交网络中的知识传播基准测试。 - -## 功能特点 - -- **知识传播模拟**: 模拟知识在社交网络中的传播过程 -- **私有对话环境**: 智能体只能看到邻居的消息,模拟真实的社交网络 -- **LLM语义评估**: 使用大语言模型进行语义理解和知识评估 -- **完整的传播链追踪**: 追踪知识从源头到目标的完整传播路径 - -## 文件说明 - -### 核心文件 - -- `social_network_knowledge_demo.py` - 主要演示文件,展示GPT-5发布信息传播 -- `demo_knowledge_evaluator.py` - LLM驱动的知识传播评估器 -- `knowledge_agents.py` - 具有知识存储和学习能力的智能体 -- `private_conversation_environment.py` - 私有对话环境实现 - -### 额外演示 - -- `knowledge_propagation_demo.py` - 使用自定义知识智能体的传播演示 - -## 场景设置 - -### 智能体角色 - -1. **Sam Altman** (知识源) - OpenAI CEO,拥有GPT-5发布信息 -2. **Alice Chen** (研究员) - 从Sam获取信息,传递给Bob -3. **Bob Wilson** (记者) - 从Alice获取信息,传递给Reporter -4. **Reporter** (报告者) - 最终接收者,询问发布细节 - -### 社交网络拓扑 - -``` -Sam Altman → Alice Chen → Bob Wilson → Reporter -``` - -每个智能体只能看到直接邻居的消息,确保知识必须通过传播链传递。 - -## 使用方法 - -### 运行主要演示 - -```bash -cd examples/social_graph_demo -python -m social_network_knowledge_demo -``` - -### 运行知识智能体演示 - -```bash -cd examples/social_graph_demo -python -m knowledge_propagation_demo -``` - -## 评估指标 - -### 知识传播评估 - -- **传播率**: 获得目标知识的智能体比例 -- **准确率**: 传播信息的准确程度 -- **完整性**: 知识内容的完整程度 -- **置信度**: 评估结果的可信度 - -### LLM语义分析 - -评估器对每条消息进行语义分析,判断: - -- 是否包含目标知识 -- 知识的完整性和准确性 -- 智能体对知识的理解程度 - -## 环境要求 - -- Python 3.8+ -- OpenAI API Key (设置为环境变量 `OPENAI_API_KEY`) -- 依赖包:tiny_chat, litellm - -## 输出示例 - -``` -CUSTOM KNOWLEDGE EVALUATION -================================================== - -KNOWLEDGE_PROPAGATION: 9.90/10 -Comments: -Knowledge Propagation Analysis: -- Knowledge mentions: 4 -- Agents reached: 3 -- Propagation rate: 100.0% -- Accuracy rate: 97.5% -- Agents with knowledge: Sam Altman, Reporter, Alice Chen - -Detailed LLM Analysis: -1. Sam Altman: - Has Knowledge: True - Completeness: 100.0% - Accuracy: 100.0% - Confidence: 95.0% - Reasoning: The message explicitly states that the release of GPT-5 is targeted for May 2025... -``` - -## 扩展性 - -这个框架可以轻松扩展到: - -- 不同的知识类型和内容 -- 更复杂的社交网络拓扑 -- 多种评估维度和指标 -- 自定义智能体行为和目标 From 99e608a498f0462ddb7651a4716dec1a65938997 Mon Sep 17 00:00:00 2001 From: Cyrus Ye Date: Tue, 9 Sep 2025 00:03:08 -0500 Subject: [PATCH 3/3] chore: remove deprecated demo files and update __init__.py --- examples/social_graph_demo/__init__.py | 6 - .../knowledge_propagation_demo.py | 194 ------------------ .../private_conversation_environment.py | 177 ---------------- 3 files changed, 377 deletions(-) delete mode 100644 examples/social_graph_demo/knowledge_propagation_demo.py delete mode 100644 examples/social_graph_demo/private_conversation_environment.py diff --git a/examples/social_graph_demo/__init__.py b/examples/social_graph_demo/__init__.py index 4c1210f..95e1449 100644 --- a/examples/social_graph_demo/__init__.py +++ b/examples/social_graph_demo/__init__.py @@ -6,15 +6,9 @@ from .demo_knowledge_evaluator import DemoKnowledgeEvaluator from .knowledge_agents import KnowledgeAgent -from .private_conversation_environment import ( - PrivateConversationEnvironment, - PrivateConversationTinyChatEnvironment, -) __version__ = '1.0.0' __all__ = [ 'DemoKnowledgeEvaluator', 'KnowledgeAgent', - 'PrivateConversationEnvironment', - 'PrivateConversationTinyChatEnvironment', ] diff --git a/examples/social_graph_demo/knowledge_propagation_demo.py b/examples/social_graph_demo/knowledge_propagation_demo.py deleted file mode 100644 index d616301..0000000 --- a/examples/social_graph_demo/knowledge_propagation_demo.py +++ /dev/null @@ -1,194 +0,0 @@ -""" -Knowledge Propagation Demo with Private Conversations and True Knowledge Updates - -This demo demonstrates true knowledge propagation where: -1. Agents have persistent memory that gets updated -2. Conversations are private (only participants know the content) -3. Knowledge spreads through the social network via direct interactions -""" - -import asyncio -import os -import sys -from pathlib import Path - -sys.path.append(str(Path(__file__).parent)) - -from demo_knowledge_evaluator import DemoKnowledgeEvaluator -from knowledge_agents import KnowledgeAgent -from private_conversation_environment import PrivateConversationEnvironment - -from tiny_chat import BaseAgentProfile -from tiny_chat.server.config import ModelProviderConfig -from tiny_chat.server.providers import ModelProviderFactory - - -async def create_knowledge_agent( - name: str, - age: int, - occupation: str, - initial_knowledge: list[str] = None, - goal: str = None, -) -> KnowledgeAgent: - """Create a knowledge agent with proper model provider.""" - - model_config = ModelProviderConfig( - name='gpt-4o-mini', type='openai', temperature=0.7 - ) - model_provider = ModelProviderFactory.create_provider(model_config) - - profile = BaseAgentProfile( - first_name=name.split()[0], - last_name=name.split()[-1] if len(name.split()) > 1 else '', - age=age, - occupation=occupation, - speaking_id=0, - personality_and_values=f'Curious {occupation.lower()} interested in AI developments', - ) - - agent = KnowledgeAgent( - agent_name=name, - agent_profile=profile, - model_provider=model_provider, - initial_knowledge=initial_knowledge or [], - ) - - if goal: - agent.goal = goal - - return agent - - -async def setup_sam_altman_scenario(): - """Set up the Sam Altman GPT-5 knowledge propagation scenario.""" - - print('Setting up Sam Altman GPT-5 Knowledge Propagation Scenario') - print('=' * 60) - - sam = await create_knowledge_agent( - name='Sam Altman', - age=39, - occupation='CEO of OpenAI', - initial_knowledge=['GPT-5 will be released in May 2025'], - goal='Share information about GPT-5 release with researchers when appropriate', - ) - - alice = await create_knowledge_agent( - name='Alice Chen', - age=32, - occupation='AI Researcher', - goal='Learn about upcoming AI releases and share with colleagues', - ) - - bob = await create_knowledge_agent( - name='Bob Wilson', - age=28, - occupation='Tech Journalist', - goal='Gather information about AI releases for reporting', - ) - - reporter = await create_knowledge_agent( - name='Sarah Reporter', - age=35, - occupation='Science Reporter', - goal='Investigate AI release timelines for news stories', - ) - - social_network = { - 'Sam Altman': ['Alice Chen'], - 'Alice Chen': ['Sam Altman', 'Bob Wilson'], - 'Bob Wilson': ['Alice Chen', 'Sarah Reporter'], - 'Sarah Reporter': ['Bob Wilson'], - } - - agents = { - 'Sam Altman': sam, - 'Alice Chen': alice, - 'Bob Wilson': bob, - 'Sarah Reporter': reporter, - } - - return agents, social_network - - -async def run_evaluation(agents: dict, conversation_logs: list): - """Run GPT-5 knowledge evaluation on the results.""" - - evaluator = DemoKnowledgeEvaluator( - target_knowledge='GPT-5 will be released in May 2025', - knowledge_source='Sam Altman', - target_agents=['Alice Chen', 'Bob Wilson', 'Sarah Reporter'], - ) - - print('\n' + '=' * 50) - print('KNOWLEDGE PROPAGATION EVALUATION') - print('=' * 50) - - for agent_name, agent in agents.items(): - status = agent.get_knowledge_status() - - print(f'\n{agent_name}:') - print(f" Total Knowledge Items: {status['total_knowledge']}") - - if status['knowledge_items']: - for item in status['knowledge_items']: - print(f' - {item}') - else: - print(' - No knowledge items') - - has_gpt5_knowledge = [] - for agent_name, agent in agents.items(): - if agent_name == 'Sam Altman': - continue - - knowledge_items = agent.get_all_knowledge() - has_knowledge = any( - 'gpt-5' in item.lower() or 'may 2025' in item.lower() - for item in knowledge_items - ) - - if has_knowledge: - has_gpt5_knowledge.append(agent_name) - - total_target_agents = len(agents) - 1 - propagation_rate = ( - len(has_gpt5_knowledge) / total_target_agents if total_target_agents > 0 else 0 - ) - - print('\nPropagation Summary:') - print(' Knowledge source: Sam Altman') - print(f' Target agents: {total_target_agents}') - print(f' Agents with knowledge: {len(has_gpt5_knowledge)} ({has_gpt5_knowledge})') - print(f' Propagation rate: {propagation_rate:.1%}') - - return { - 'agents_with_knowledge': has_gpt5_knowledge, - 'propagation_rate': propagation_rate, - 'conversation_logs': conversation_logs, - } - - -async def main(): - """Main demo function.""" - - api_key = os.getenv('OPENAI_API_KEY') - if not api_key: - print('Error: OPENAI_API_KEY environment variable not set') - return - - agents, social_network = await setup_sam_altman_scenario() - - environment = PrivateConversationEnvironment( - agents=agents, social_network=social_network, max_rounds=5 - ) - - results = await environment.run_knowledge_propagation_simulation() - - evaluation_results = await run_evaluation(agents, results['conversation_logs']) - - print(f"\nSimulation completed in {results['total_rounds']} rounds") - print(f"Final propagation rate: {evaluation_results['propagation_rate']:.1%}") - - -if __name__ == '__main__': - asyncio.run(main()) diff --git a/examples/social_graph_demo/private_conversation_environment.py b/examples/social_graph_demo/private_conversation_environment.py deleted file mode 100644 index 7cd18f1..0000000 --- a/examples/social_graph_demo/private_conversation_environment.py +++ /dev/null @@ -1,177 +0,0 @@ -""" -Private conversation environment that supports knowledge propagation through direct agent interactions. -Only participants in a conversation can see the messages. -""" - -import asyncio -from typing import Any - -from knowledge_agents import KnowledgeAgent - -from tiny_chat import TinyChatEnvironment - - -class PrivateConversationEnvironment: - """ - Environment that manages private conversations between agents. - Knowledge propagates only through direct interactions. - """ - - def __init__( - self, - agents: dict[str, KnowledgeAgent], - social_network: dict[str, list[str]], - max_rounds: int = 10, - ): - self.agents = agents - self.social_network = social_network - self.max_rounds = max_rounds - self.current_round = 0 - self.conversation_logs = [] - - async def run_knowledge_propagation_simulation(self) -> dict[str, Any]: - """ - Run the knowledge propagation simulation through private conversations. - """ - print('Starting knowledge propagation simulation') - print(f'Social network: {self.social_network}') - - print('\n=== Initial Knowledge State ===') - for agent_name, agent in self.agents.items(): - status = agent.get_knowledge_status() - print(f"{agent_name}: {status['total_knowledge']} items") - for item in status['knowledge_items']: - print(f' - {item}') - - for round_num in range(self.max_rounds): - self.current_round = round_num + 1 - print(f'\n=== Round {self.current_round} ===') - - any_new_knowledge = await self._conduct_round() - - if not any_new_knowledge: - print('No new knowledge shared this round. Ending simulation.') - break - - print('\n=== Final Knowledge State ===') - final_state = {} - for agent_name, agent in self.agents.items(): - status = agent.get_knowledge_status() - final_state[agent_name] = status - print(f"{agent_name}: {status['total_knowledge']} items") - for item in status['knowledge_items']: - print(f' - {item}') - - return { - 'final_knowledge_state': final_state, - 'total_rounds': self.current_round, - 'conversation_logs': self.conversation_logs, - } - - async def _conduct_round(self) -> bool: - """Conduct one round of private conversations.""" - any_new_knowledge = False - round_conversations = [] - - for agent_name, connected_agents in self.social_network.items(): - if agent_name not in self.agents: - continue - - agent = self.agents[agent_name] - - for connected_agent_name in connected_agents: - if connected_agent_name not in self.agents: - continue - - connected_agent = self.agents[connected_agent_name] - - knowledge_shared = await self._private_conversation( - agent, connected_agent, agent_name, connected_agent_name - ) - - if knowledge_shared: - any_new_knowledge = True - round_conversations.append( - { - 'from': agent_name, - 'to': connected_agent_name, - 'knowledge_shared': knowledge_shared, - } - ) - - self.conversation_logs.append( - {'round': self.current_round, 'conversations': round_conversations} - ) - - return any_new_knowledge - - async def _private_conversation( - self, - agent1: KnowledgeAgent, - agent2: KnowledgeAgent, - agent1_name: str, - agent2_name: str, - ) -> bool: - """Simulate a private conversation between two agents.""" - - agent1_knowledge = set(agent1.get_all_knowledge()) - agent2_knowledge = set(agent2.get_all_knowledge()) - - knowledge_to_share_1_to_2 = agent1_knowledge - agent2_knowledge - knowledge_to_share_2_to_1 = agent2_knowledge - agent1_knowledge - - knowledge_shared = False - - if knowledge_to_share_1_to_2: - shared_item = next(iter(knowledge_to_share_1_to_2)) - agent2.add_knowledge(shared_item) - print(f'{agent1_name} shared with {agent2_name}: {shared_item}') - knowledge_shared = True - - if knowledge_to_share_2_to_1: - shared_item = next(iter(knowledge_to_share_2_to_1)) - agent1.add_knowledge(shared_item) - print(f'{agent2_name} shared with {agent1_name}: {shared_item}') - knowledge_shared = True - - await asyncio.sleep(0.1) - - return knowledge_shared - - -class PrivateConversationTinyChatEnvironment(TinyChatEnvironment): - """ - TinyChatEnvironment that enforces private conversations based on neighbor_map. - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - # 确保设置neighbor_map - self.neighbor_map = getattr(self, 'neighbor_map', {}) - - def _get_visible_agents(self, agent_name: str) -> list[str]: - """Get list of agents visible to the given agent based on neighbor_map.""" - if hasattr(self, 'neighbor_map') and self.neighbor_map: - return self.neighbor_map.get(agent_name, []) - else: - return [name for name in self.agents.keys() if name != agent_name] - - def _last_turn_text_for(self, agent_name: str) -> str: - """Generate text representation of the last turn for a specific agent.""" - if not self.inbox: - return '' - - visible_agents = self._get_visible_agents(agent_name) - - last_turn_messages = [] - for from_agent, message in self.inbox[-len(self.agents) :]: - if ( - from_agent == agent_name - or from_agent in visible_agents - or from_agent == 'Environment' - ): - last_turn_messages.append( - f'{from_agent}: {message.to_natural_language()}' - ) - - return '\n'.join(last_turn_messages) if last_turn_messages else ''