Skip to content

Commit b02cae9

Browse files
committed
update
1 parent 12ea90f commit b02cae9

File tree

2 files changed

+114
-52
lines changed

2 files changed

+114
-52
lines changed

camel/agents/_utils.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,44 @@
2525
logger = logging.getLogger(__name__)
2626

2727

28+
def build_default_summary_prompt(conversation_text: str) -> str:
29+
r"""Create the default prompt used for conversation summarization.
30+
31+
Args:
32+
conversation_text (str): The conversation to be summarized.
33+
34+
Returns:
35+
str: A formatted prompt instructing the model to produce a structured
36+
markdown summary.
37+
"""
38+
template = textwrap.dedent(
39+
"""\
40+
Summarize the conversation below.
41+
Produce markdown that strictly follows this outline and numbering:
42+
43+
Summary:
44+
1. **Primary Request and Intent**:
45+
2. **Key Concepts**:
46+
3. **Errors and Fixes**:
47+
4. **Problem Solving**:
48+
5. **Pending Tasks**:
49+
6. **Current Work**:
50+
7. **Optional Next Step**:
51+
52+
Requirements:
53+
- Use bullet lists under each section (`- item`). If a section has no
54+
information, output `- None noted`.
55+
- Keep the ordering, headings, and formatting as written above.
56+
- Focus on concrete actions, findings, and decisions.
57+
- Do not invent details that are not supported by the conversation.
58+
59+
Conversation:
60+
{conversation_text}
61+
"""
62+
)
63+
return template.format(conversation_text=conversation_text)
64+
65+
2866
def generate_tool_prompt(tool_schema_list: List[Dict[str, Any]]) -> str:
2967
r"""Generates a tool prompt based on the provided tool schema list.
3068

camel/agents/chat_agent.py

Lines changed: 76 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@
5656

5757
from camel.agents._types import ModelResponse, ToolCallRequest
5858
from camel.agents._utils import (
59+
build_default_summary_prompt,
5960
convert_to_function_tool,
6061
convert_to_schema,
6162
get_info_dict,
@@ -981,6 +982,36 @@ def _format_tool_limit_notice(self) -> Optional[str]:
981982

982983
return "\n".join(notice_lines)
983984

985+
@staticmethod
986+
def _append_user_messages_section(
987+
summary_content: str, user_messages: List[str]
988+
) -> str:
989+
section_title = "- **All User Messages**:"
990+
sanitized_messages: List[str] = []
991+
for msg in user_messages:
992+
if not isinstance(msg, str):
993+
msg = str(msg)
994+
cleaned = " ".join(msg.strip().splitlines())
995+
if cleaned:
996+
sanitized_messages.append(cleaned)
997+
998+
bullet_block = (
999+
"\n".join(f"- {m}" for m in sanitized_messages)
1000+
if sanitized_messages
1001+
else "- None noted"
1002+
)
1003+
user_section = f"{section_title}\n{bullet_block}"
1004+
1005+
pattern_existing = re.compile(
1006+
r"(?:\n\n)?- \*\*All User Messages\*\*:"
1007+
r"(?:\n- .*)*(?=\n\n- \*\*|\Z)",
1008+
re.DOTALL,
1009+
)
1010+
summary_clean = pattern_existing.sub("", summary_content).rstrip()
1011+
1012+
separator = "\n\n" if summary_clean else ""
1013+
return f"{summary_clean}{separator}{user_section}"
1014+
9841015
def _reset_summary_state(self) -> None:
9851016
self._summary_token_count = 0 # Total tokens in summary messages
9861017

@@ -1019,15 +1050,15 @@ def _calculate_next_summary_threshold(self) -> int:
10191050
return threshold
10201051

10211052
def _update_memory_with_summary(
1022-
self, summary: Dict[str, Any], include_summaries: bool = False
1053+
self, summary: str, include_summaries: bool = False
10231054
) -> None:
10241055
r"""Update memory with summary result.
10251056
10261057
This method handles memory clearing and restoration of summaries based
10271058
on whether it's a progressive or full compression.
10281059
"""
10291060

1030-
summary_content: str = summary.get("summary", "")
1061+
summary_content: str = summary
10311062

10321063
existing_summaries = []
10331064
if not include_summaries:
@@ -1047,26 +1078,26 @@ def _update_memory_with_summary(
10471078
content = old_summary.get('content', '')
10481079
if not isinstance(content, str):
10491080
content = str(content)
1050-
summary_msg = BaseMessage.make_user_message(
1081+
summary_msg = BaseMessage.make_assistant_message(
10511082
role_name="assistant", content=content
10521083
)
10531084
self.update_memory(summary_msg, OpenAIBackendRole.ASSISTANT)
10541085

10551086
# Add new summary
1056-
new_summary_msg = BaseMessage.make_user_message(
1087+
new_summary_msg = BaseMessage.make_assistant_message(
10571088
role_name="assistant", content=summary_content
10581089
)
10591090
self.update_memory(new_summary_msg, OpenAIBackendRole.ASSISTANT)
1060-
input_message = BaseMessage.make_user_message(
1061-
role_name="user",
1091+
input_message = BaseMessage.make_assistant_message(
1092+
role_name="assistant",
10621093
content=(
10631094
"Please continue the conversation from "
10641095
"where we left it off without asking the user any further "
10651096
"questions. Continue with the last task that you were "
10661097
"asked to work on."
10671098
),
10681099
)
1069-
self.update_memory(input_message, OpenAIBackendRole.USER)
1100+
self.update_memory(input_message, OpenAIBackendRole.ASSISTANT)
10701101
# Update token count
10711102
try:
10721103
summary_tokens = (
@@ -1256,6 +1287,7 @@ def summarize(
12561287
response_format: Optional[Type[BaseModel]] = None,
12571288
working_directory: Optional[Union[str, Path]] = None,
12581289
include_summaries: bool = False,
1290+
add_user_messages: bool = True,
12591291
) -> Dict[str, Any]:
12601292
r"""Summarize the agent's current conversation context and persist it
12611293
to a markdown file.
@@ -1283,7 +1315,8 @@ def summarize(
12831315
working_directory (Optional[str|Path]): Optional directory to save
12841316
the markdown summary file. If provided, overrides the default
12851317
directory used by ContextUtility.
1286-
1318+
add_user_messages (bool): Whether add user messages to summary.
1319+
(default: :obj:`True`)
12871320
Returns:
12881321
Dict[str, Any]: A dictionary containing the summary text, file
12891322
path, status message, and optionally structured_summary if
@@ -1329,6 +1362,7 @@ def summarize(
13291362

13301363
# Convert messages to conversation text
13311364
conversation_lines = []
1365+
user_messages: List[str] = []
13321366
for message in messages:
13331367
role = message.get('role', 'unknown')
13341368
content = message.get('content', '')
@@ -1390,6 +1424,9 @@ def summarize(
13901424

13911425
# Handle regular content messages (user/assistant/system)
13921426
elif content:
1427+
content = str(content)
1428+
if role == 'user':
1429+
user_messages.append(content)
13931430
conversation_lines.append(f"{role}: {content}")
13941431

13951432
conversation_text = "\n".join(conversation_lines).strip()
@@ -1420,12 +1457,7 @@ def summarize(
14201457
f"{conversation_text}"
14211458
)
14221459
else:
1423-
prompt_text = (
1424-
"Summarize the context information in concise markdown "
1425-
"bullet points highlighting key decisions, action items, "
1426-
"user's intent.\n\nContext information:\n"
1427-
f"{conversation_text}"
1428-
)
1460+
prompt_text = build_default_summary_prompt(conversation_text)
14291461

14301462
try:
14311463
# Use structured output if response_format is provided
@@ -1495,6 +1527,10 @@ def summarize(
14951527
summary_content = context_util.structured_output_to_markdown(
14961528
structured_data=structured_output, metadata=metadata
14971529
)
1530+
if add_user_messages:
1531+
summary_content = self._append_user_messages_section(
1532+
summary_content, user_messages
1533+
)
14981534

14991535
# Save the markdown (either custom structured or default)
15001536
save_status = context_util.save_markdown_file(
@@ -1538,6 +1574,7 @@ async def asummarize(
15381574
response_format: Optional[Type[BaseModel]] = None,
15391575
working_directory: Optional[Union[str, Path]] = None,
15401576
include_summaries: bool = False,
1577+
add_user_messages: bool = True,
15411578
) -> Dict[str, Any]:
15421579
r"""Asynchronously summarize the agent's current conversation context
15431580
and persist it to a markdown file.
@@ -1565,7 +1602,8 @@ async def asummarize(
15651602
only non-summary messages will be summarized. If True, all
15661603
messages including previous summaries will be summarized
15671604
(full compression). (default: :obj:`False`)
1568-
1605+
add_user_messages (bool): Whether add user messages to summary.
1606+
(default: :obj:`True`)
15691607
Returns:
15701608
Dict[str, Any]: A dictionary containing the summary text, file
15711609
path, status message, and optionally structured_summary if
@@ -1601,6 +1639,7 @@ async def asummarize(
16011639

16021640
# Convert messages to conversation text
16031641
conversation_lines = []
1642+
user_messages: List[str] = []
16041643
for message in messages:
16051644
role = message.get('role', 'unknown')
16061645
content = message.get('content', '')
@@ -1662,6 +1701,9 @@ async def asummarize(
16621701

16631702
# Handle regular content messages (user/assistant/system)
16641703
elif content:
1704+
content = str(content)
1705+
if role == 'user':
1706+
user_messages.append(content)
16651707
conversation_lines.append(f"{role}: {content}")
16661708

16671709
conversation_text = "\n".join(conversation_lines).strip()
@@ -1692,12 +1734,7 @@ async def asummarize(
16921734
f"{conversation_text}"
16931735
)
16941736
else:
1695-
prompt_text = (
1696-
"Summarize the context information in concise markdown "
1697-
"bullet points highlighting key decisions, action items, "
1698-
"user's intent.\n\nContext information:\n"
1699-
f"{conversation_text}"
1700-
)
1737+
prompt_text = build_default_summary_prompt(conversation_text)
17011738

17021739
try:
17031740
# Use structured output if response_format is provided
@@ -1776,6 +1813,10 @@ async def asummarize(
17761813
summary_content = context_util.structured_output_to_markdown(
17771814
structured_data=structured_output, metadata=metadata
17781815
)
1816+
if add_user_messages:
1817+
summary_content = self._append_user_messages_section(
1818+
summary_content, user_messages
1819+
)
17791820

17801821
# Save the markdown (either custom structured or default)
17811822
save_status = context_util.save_markdown_file(
@@ -2359,7 +2400,8 @@ def _step_impl(
23592400
# Summarize everything (including summaries)
23602401
summary = self.summarize(include_summaries=True)
23612402
self._update_memory_with_summary(
2362-
summary, include_summaries=True
2403+
summary.get("summary", ""),
2404+
include_summaries=True,
23632405
)
23642406
elif num_tokens > threshold:
23652407
logger.info(
@@ -2369,7 +2411,8 @@ def _step_impl(
23692411
# Only summarize non-summary content
23702412
summary = self.summarize(include_summaries=False)
23712413
self._update_memory_with_summary(
2372-
summary, include_summaries=False
2414+
summary.get("summary", ""),
2415+
include_summaries=False,
23732416
)
23742417
accumulated_context_tokens += num_tokens
23752418
except RuntimeError as e:
@@ -2441,25 +2484,15 @@ def _step_impl(
24412484
)
24422485
self.memory.remove_records_by_indices(indices_to_remove)
24432486

2444-
summary = self.summarize()
2487+
summary = self.summarize(include_summaries=False)
24452488
tool_notice = self._format_tool_limit_notice()
24462489
summary_messages = summary.get("summary", "")
24472490

24482491
if tool_notice:
24492492
summary_messages += "\n\n" + tool_notice
2450-
help_message = (
2451-
"Please continue the conversation from "
2452-
"where we left it off without asking the user any "
2453-
"further questions. Continue with the last task "
2454-
"that you were asked to work on."
2455-
)
2456-
summary_messages += "\n\n" + help_message
2457-
self.clear_memory()
2458-
summary_messages = BaseMessage.make_assistant_message(
2459-
role_name="assistant", content=summary_messages
2460-
)
2461-
self.update_memory(
2462-
summary_messages, OpenAIBackendRole.ASSISTANT
2493+
2494+
self._update_memory_with_summary(
2495+
summary_messages, include_summaries=False
24632496
)
24642497
self._last_token_limit_tool_signature = tool_signature
24652498
return self._step_impl(input_message, response_format)
@@ -2690,7 +2723,8 @@ async def _astep_non_streaming_task(
26902723
include_summaries=True
26912724
)
26922725
self._update_memory_with_summary(
2693-
summary, include_summaries=True
2726+
summary.get("summary", ""),
2727+
include_summaries=True,
26942728
)
26952729
elif num_tokens > threshold:
26962730
logger.info(
@@ -2702,7 +2736,8 @@ async def _astep_non_streaming_task(
27022736
include_summaries=False
27032737
)
27042738
self._update_memory_with_summary(
2705-
summary, include_summaries=False
2739+
summary.get("summary", ""),
2740+
include_summaries=False,
27062741
)
27072742
accumulated_context_tokens += num_tokens
27082743
except RuntimeError as e:
@@ -2781,19 +2816,8 @@ async def _astep_non_streaming_task(
27812816

27822817
if tool_notice:
27832818
summary_messages += "\n\n" + tool_notice
2784-
help_message = (
2785-
"Please continue the conversation from "
2786-
"where we left it off without asking the user any "
2787-
"further questions. Continue with the last task "
2788-
"that you were asked to work on."
2789-
)
2790-
summary_messages += "\n\n" + help_message
2791-
self.clear_memory()
2792-
summary_messages = BaseMessage.make_assistant_message(
2793-
role_name="assistant", content=summary_messages
2794-
)
2795-
self.update_memory(
2796-
summary_messages, OpenAIBackendRole.ASSISTANT
2819+
self._update_memory_with_summary(
2820+
summary_messages, include_summaries=False
27972821
)
27982822
self._last_token_limit_tool_signature = tool_signature
27992823
return await self._astep_non_streaming_task(

0 commit comments

Comments
 (0)