Skip to content

Commit 135bd8a

Browse files
authored
feat:Update document_toolkit.py (#485)
2 parents 37bc18d + 9735f8f commit 135bd8a

File tree

3 files changed

+374
-62
lines changed

3 files changed

+374
-62
lines changed
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
{
2+
"mcpServers": {
3+
"edgeone-pages-mcp": {
4+
"type": "sse",
5+
"url": "https://mcp.api-inference.modelscope.cn/sse/fcbc9ff4e9704d"
6+
},
7+
"playwright": {
8+
"command": "npx",
9+
"args": [
10+
"@playwright/mcp@latest"
11+
]
12+
}
13+
}
14+
}
Lines changed: 332 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,332 @@
1+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2+
# Licensed under the Apache License, Version 2.0 (the "License");
3+
# you may not use this file except in compliance with the License.
4+
# You may obtain a copy of the License at
5+
#
6+
# http://www.apache.org/licenses/LICENSE-2.0
7+
#
8+
# Unless required by applicable law or agreed to in writing, software
9+
# distributed under the License is distributed on an "AS IS" BASIS,
10+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11+
# See the License for the specific language governing permissions and
12+
# limitations under the License.
13+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14+
import asyncio
15+
import sys
16+
import contextlib
17+
import time
18+
from pathlib import Path
19+
from typing import List, Dict, Any
20+
import os
21+
22+
from colorama import Fore, init
23+
from dotenv import load_dotenv
24+
25+
from camel.agents.chat_agent import ToolCallingRecord
26+
from camel.models import ModelFactory
27+
from camel.toolkits import FunctionTool, MCPToolkit
28+
from camel.types import ModelPlatformType, ModelType
29+
from camel.logger import set_log_level
30+
from camel.utils import print_text_animated
31+
32+
from owl.utils.enhanced_role_playing import OwlRolePlaying, arun_society
33+
34+
import pathlib
35+
36+
# Initialize colorama for cross-platform colored terminal output
37+
init()
38+
39+
base_dir = pathlib.Path(__file__).parent.parent
40+
env_path = base_dir / "owl" / ".env"
41+
load_dotenv(dotenv_path=str(env_path))
42+
43+
set_log_level(level="INFO")
44+
45+
46+
async def construct_society(
47+
question: str,
48+
tools: List[FunctionTool],
49+
) -> OwlRolePlaying:
50+
r"""Build a multi-agent OwlRolePlaying instance for task completion.
51+
52+
Args:
53+
question (str): The task to perform.
54+
tools (List[FunctionTool]): The MCP tools to use for interaction.
55+
56+
Returns:
57+
OwlRolePlaying: The configured society of agents.
58+
"""
59+
models = {
60+
"user": ModelFactory.create(
61+
model_platform=ModelPlatformType.QWEN,
62+
model_type=ModelType.QWEN_PLUS_LATEST,
63+
model_config_dict={"temperature": 0},
64+
),
65+
"assistant": ModelFactory.create(
66+
model_platform=ModelPlatformType.QWEN,
67+
model_type=ModelType.QWEN_PLUS_LATEST,
68+
model_config_dict={"temperature": 0},
69+
),
70+
}
71+
72+
user_agent_kwargs = {"model": models["user"]}
73+
assistant_agent_kwargs = {
74+
"model": models["assistant"],
75+
"tools": tools,
76+
}
77+
78+
task_kwargs = {
79+
"task_prompt": question,
80+
"with_task_specify": False,
81+
}
82+
83+
society = OwlRolePlaying(
84+
**task_kwargs,
85+
user_role_name="user",
86+
user_agent_kwargs=user_agent_kwargs,
87+
assistant_role_name="assistant",
88+
assistant_agent_kwargs=assistant_agent_kwargs,
89+
)
90+
return society
91+
92+
93+
def create_md_file(task: str) -> str:
94+
"""Create a markdown file for the conversation with timestamp in filename.
95+
96+
Args:
97+
task (str): The task being performed.
98+
99+
Returns:
100+
str: Path to the created markdown file.
101+
"""
102+
timestamp = time.strftime("%Y%m%d_%H%M%S")
103+
# Create logs directory if it doesn't exist
104+
logs_dir = Path("conversation_logs")
105+
logs_dir.mkdir(exist_ok=True)
106+
107+
# Create a shortened task name for the filename
108+
task_short = task[:30].replace(" ", "_").replace("/", "_")
109+
filename = f"{logs_dir}/conversation_{timestamp}_{task_short}.md"
110+
111+
# Initialize the file with header
112+
with open(filename, "w", encoding="utf-8") as f:
113+
f.write(f"# Agent Conversation: {task}\n\n")
114+
f.write(f"*Generated on: {time.strftime('%Y-%m-%d %H:%M:%S')}*\n\n")
115+
f.write("## Task Details\n\n")
116+
f.write(f"**Task:** {task}\n\n")
117+
f.write("## Conversation\n\n")
118+
119+
return filename
120+
121+
122+
def write_to_md(filename: str, content: Dict[str, Any]) -> None:
123+
"""Write content to the markdown file.
124+
125+
Args:
126+
filename (str): Path to the markdown file.
127+
content (Dict[str, Any]): Content to write to the file.
128+
"""
129+
with open(filename, "a", encoding="utf-8") as f:
130+
if "system_info" in content:
131+
f.write(f"### System Information\n\n")
132+
for key, value in content["system_info"].items():
133+
f.write(f"**{key}:** {value}\n\n")
134+
135+
if "assistant" in content:
136+
f.write(f"### 🤖 Assistant\n\n")
137+
if "tool_calls" in content:
138+
f.write("**Tool Calls:**\n\n")
139+
for tool_call in content["tool_calls"]:
140+
f.write(f"```\n{tool_call}\n```\n\n")
141+
f.write(f"{content['assistant']}\n\n")
142+
143+
if "user" in content:
144+
f.write(f"### 👤 User\n\n")
145+
f.write(f"{content['user']}\n\n")
146+
147+
if "summary" in content:
148+
f.write(f"## Summary\n\n")
149+
f.write(f"{content['summary']}\n\n")
150+
151+
if "token_count" in content:
152+
f.write(f"**Total tokens used:** {content['token_count']}\n\n")
153+
154+
155+
async def run_society_with_formatted_output(society: OwlRolePlaying, md_filename: str, round_limit: int = 15):
156+
"""Run the society with nicely formatted terminal output and write to markdown.
157+
158+
Args:
159+
society (OwlRolePlaying): The society to run.
160+
md_filename (str): Path to the markdown file for output.
161+
round_limit (int, optional): Maximum number of conversation rounds. Defaults to 15.
162+
163+
Returns:
164+
tuple: (answer, chat_history, token_count)
165+
"""
166+
print(Fore.GREEN + f"AI Assistant sys message:\n{society.assistant_sys_msg}\n")
167+
print(Fore.BLUE + f"AI User sys message:\n{society.user_sys_msg}\n")
168+
169+
print(Fore.YELLOW + f"Original task prompt:\n{society.task_prompt}\n")
170+
print(Fore.CYAN + "Specified task prompt:" + f"\n{society.specified_task_prompt}\n")
171+
print(Fore.RED + f"Final task prompt:\n{society.task_prompt}\n")
172+
173+
# Write system information to markdown
174+
write_to_md(md_filename, {
175+
"system_info": {
176+
"AI Assistant System Message": society.assistant_sys_msg,
177+
"AI User System Message": society.user_sys_msg,
178+
"Original Task Prompt": society.task_prompt,
179+
"Specified Task Prompt": society.specified_task_prompt,
180+
"Final Task Prompt": society.task_prompt
181+
}
182+
})
183+
184+
input_msg = society.init_chat()
185+
chat_history = []
186+
token_count = {"total": 0}
187+
n = 0
188+
189+
while n < round_limit:
190+
n += 1
191+
assistant_response, user_response = await society.astep(input_msg)
192+
193+
md_content = {}
194+
195+
if assistant_response.terminated:
196+
termination_msg = f"AI Assistant terminated. Reason: {assistant_response.info['termination_reasons']}."
197+
print(Fore.GREEN + termination_msg)
198+
md_content["summary"] = termination_msg
199+
write_to_md(md_filename, md_content)
200+
break
201+
202+
if user_response.terminated:
203+
termination_msg = f"AI User terminated. Reason: {user_response.info['termination_reasons']}."
204+
print(Fore.GREEN + termination_msg)
205+
md_content["summary"] = termination_msg
206+
write_to_md(md_filename, md_content)
207+
break
208+
209+
# Handle tool calls for both terminal and markdown
210+
if "tool_calls" in assistant_response.info:
211+
tool_calls: List[ToolCallingRecord] = [
212+
ToolCallingRecord(**call.as_dict())
213+
for call in assistant_response.info['tool_calls']
214+
]
215+
md_content["tool_calls"] = tool_calls
216+
217+
# Print to terminal
218+
print(Fore.GREEN + "AI Assistant:")
219+
for func_record in tool_calls:
220+
print(f"{func_record}")
221+
else:
222+
print(Fore.GREEN + "AI Assistant:")
223+
224+
# Print assistant response to terminal
225+
print(f"{assistant_response.msg.content}\n")
226+
227+
# Print user response to terminal
228+
print(Fore.BLUE + f"AI User:\n\n{user_response.msg.content}\n")
229+
230+
# Build content for markdown file
231+
md_content["assistant"] = assistant_response.msg.content
232+
md_content["user"] = user_response.msg.content
233+
234+
# Write to markdown
235+
write_to_md(md_filename, md_content)
236+
237+
# Update chat history
238+
chat_history.append({
239+
"assistant": assistant_response.msg.content,
240+
"user": user_response.msg.content,
241+
})
242+
243+
# Update token count
244+
if "token_count" in assistant_response.info:
245+
token_count["total"] += assistant_response.info["token_count"]
246+
247+
if "TASK_DONE" in user_response.msg.content:
248+
task_done_msg = "Task completed successfully!"
249+
print(Fore.YELLOW + task_done_msg + "\n")
250+
write_to_md(md_filename, {"summary": task_done_msg})
251+
break
252+
253+
input_msg = assistant_response.msg
254+
255+
# Write token count information
256+
write_to_md(md_filename, {"token_count": token_count["total"]})
257+
258+
# Extract final answer
259+
answer = assistant_response.msg.content if assistant_response and assistant_response.msg else ""
260+
261+
return answer, chat_history, token_count
262+
263+
264+
@contextlib.asynccontextmanager
265+
async def mcp_toolkit_context(config_path):
266+
"""Context manager for safely handling MCP Toolkit connection/disconnection.
267+
268+
Args:
269+
config_path (str): Path to the MCP configuration file.
270+
271+
Yields:
272+
MCPToolkit: The connected MCPToolkit instance.
273+
"""
274+
toolkit = MCPToolkit(config_path=str(config_path))
275+
try:
276+
await toolkit.connect()
277+
print(Fore.GREEN + "Successfully connected to SSE server")
278+
yield toolkit
279+
finally:
280+
# Use a separate try/except to ensure we always attempt to disconnect
281+
try:
282+
await toolkit.disconnect()
283+
print(Fore.GREEN + "Successfully disconnected from SSE server")
284+
except Exception as e:
285+
# Just log the error but don't re-raise as we're in cleanup
286+
print(Fore.RED + f"Warning: Error during disconnect: {e}")
287+
288+
289+
async def main():
290+
# Load SSE server configuration
291+
config_path = Path(__file__).parent / "mcp_sse_config.json"
292+
293+
# Set default task - a simple example query
294+
default_task = (
295+
"Visit the Qwen3 GitHub repository, summarize the introduction of the repository."
296+
"Write a comprehensive HTML documentation site with the following features:"
297+
"A clear introduction to Qwen3"
298+
"Well-organized sections of the technical documentation"
299+
"Practical code examples"
300+
"A visually appealing purple technology theme (e.g. modern, clean, purple-accented design)"
301+
"Finally, deploy the HTML site and open it in the browser."
302+
)
303+
304+
# Use command line argument if provided, otherwise use default task
305+
task = sys.argv[1] if len(sys.argv) > 1 else default_task
306+
307+
try:
308+
# Create markdown file for conversation export
309+
md_filename = create_md_file(task)
310+
print(Fore.CYAN + f"Conversation will be saved to: {md_filename}")
311+
312+
async with mcp_toolkit_context(config_path) as mcp_toolkit:
313+
# Get available tools
314+
tools = [*mcp_toolkit.get_tools()]
315+
316+
# Build and run society
317+
print(Fore.YELLOW + f"Starting task: {task}\n")
318+
society = await construct_society(task, tools)
319+
answer, chat_history, token_count = await run_society_with_formatted_output(society, md_filename)
320+
321+
print(Fore.GREEN + f"\nFinal Result: {answer}")
322+
print(Fore.CYAN + f"Total tokens used: {token_count['total']}")
323+
print(Fore.CYAN + f"Full conversation log saved to: {md_filename}")
324+
325+
except KeyboardInterrupt:
326+
print(Fore.YELLOW + "\nReceived exit signal, shutting down...")
327+
except Exception as e:
328+
print(Fore.RED + f"Error occurred: {e}")
329+
330+
331+
if __name__ == "__main__":
332+
asyncio.run(main())

0 commit comments

Comments
 (0)