Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 66 additions & 2 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -2915,6 +2915,7 @@ def _execute_tool(
args = tool_call_request.args
tool_call_id = tool_call_request.tool_call_id
tool = self._internal_tools[func_name]

try:
raw_result = tool(**args)
if self.mask_tool_output:
Expand All @@ -2936,7 +2937,11 @@ def _execute_tool(
logger.warning(f"{error_msg} with result: {result}")

return self._record_tool_calling(
func_name, args, result, tool_call_id, mask_output=mask_flag
func_name,
args,
result,
tool_call_id,
mask_output=mask_flag,
)

async def _aexecute_tool(
Expand Down Expand Up @@ -2978,6 +2983,7 @@ async def _aexecute_tool(
error_msg = f"Error executing async tool '{func_name}': {e!s}"
result = f"Tool execution failed: {error_msg}"
logger.warning(error_msg)

return self._record_tool_calling(func_name, args, result, tool_call_id)

def _record_tool_calling(
Expand Down Expand Up @@ -3042,16 +3048,74 @@ def _record_tool_calling(
timestamp=base_timestamp + 1e-6,
)

# Record information about this tool call
# Calculate tool cost and token usage
cost_info = self._calculate_tool_cost(assist_msg, func_msg)
# Record information about this tool call with cost tracking
tool_record = ToolCallingRecord(
tool_name=func_name,
args=args,
result=result,
tool_call_id=tool_call_id,
token_usage={
"prompt_tokens": int(cost_info["prompt_tokens"]),
"completion_tokens": int(cost_info["completion_tokens"]),
"total_tokens": int(cost_info["total_tokens"]),
},
)

return tool_record

def _calculate_tool_cost(
self, assist_msg: OpenAIMessage, func_msg: OpenAIMessage
) -> Dict[str, int]:
r"""Calculate the tool cost and token usage for a tool call.

Args:
assist_msg (OpenAIMessage): The assistant message
as tool call input.
func_msg (OpenAIMessage): The function message
as tool call output.

Returns:
Dictionary containing token usage and cost estimates.
"""

if hasattr(self.model_backend, 'token_counter'):
try:
input_messages = assist_msg.to_openai_message(
OpenAIBackendRole.ASSISTANT
)
output_messages = func_msg.to_openai_message(
OpenAIBackendRole.FUNCTION
)
input_tokens = \
self.model_backend.token_counter.count_tokens_from_messages(
[input_messages]
)
output_tokens = \
self.model_backend.token_counter.count_tokens_from_messages(
[output_messages]
)
except Exception as e:
logger.error(
f"Error calculating tool call token usage tokens: {e}"
)
input_tokens = len(assist_msg.content.split())
output_tokens = len(func_msg.content.split())
else:
logger.warning(
"Token counter not available. "
"Using contexnt words count to estimate token usage."
)
input_tokens = len(assist_msg.content.split())
output_tokens = len(func_msg.content.split())

return {
"prompt_tokens": input_tokens,
"completion_tokens": output_tokens,
"total_tokens": input_tokens + output_tokens,
}

def _stream(
self,
input_message: Union[BaseMessage, str],
Expand Down
15 changes: 13 additions & 2 deletions camel/types/agents/tool_calling_record.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from __future__ import annotations

from typing import Any, Dict, List, Optional

from pydantic import BaseModel
Expand All @@ -26,27 +28,36 @@ class ToolCallingRecord(BaseModel):
tool_call_id (str): The ID of the tool call, if available.
images (Optional[List[str]]): List of base64-encoded images returned
by the tool, if any.
token_usage (Optional[Dict[str, int]]): Token usage breakdown for this
tool call. Contains 'prompt_tokens', 'completion_tokens', and
'total_tokens'.
"""

tool_name: str
args: Dict[str, Any]
result: Any
tool_call_id: str
images: Optional[List[str]] = None
token_usage: Optional[Dict[str, int]] = None

def __str__(self) -> str:
r"""Overridden version of the string function.

Returns:
str: Modified string to represent the tool calling.
"""
return (
base_str = (
f"Tool Execution: {self.tool_name}\n"
f"\tArgs: {self.args}\n"
f"\tResult: {self.result}\n"
)

def as_dict(self) -> dict[str, Any]:
if self.token_usage:
base_str += f"\tToken Usage: {self.token_usage}\n"

return base_str

def as_dict(self) -> Dict[str, Any]:
r"""Returns the tool calling record as a dictionary.

Returns:
Expand Down
Loading