Skip to content

Commit d1999d6

Browse files
committed
Update
2 parents d78ab8f + 69f7778 commit d1999d6

File tree

13 files changed

+179
-55
lines changed

13 files changed

+179
-55
lines changed
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
"""
2+
This example shows how to use complex input types with tools.
3+
4+
Recommendations:
5+
- Specify fields with descriptions, these will be used in the JSON schema sent to the model and will increase accuracy.
6+
- Try not to nest the structures too deeply, the model will have a hard time understanding them.
7+
"""
8+
9+
from datetime import datetime
10+
from enum import Enum
11+
from typing import List, Optional
12+
13+
from agno.agent import Agent
14+
from agno.tools.decorator import tool
15+
from pydantic import BaseModel, Field
16+
17+
18+
# Define Pydantic models for our tools
19+
class UserProfile(BaseModel):
20+
"""User profile information."""
21+
22+
name: str = Field(..., description="Full name of the user")
23+
email: str = Field(..., description="Valid email address")
24+
age: int = Field(..., ge=0, le=120, description="Age of the user")
25+
interests: List[str] = Field(
26+
default_factory=list, description="List of user interests"
27+
)
28+
created_at: datetime = Field(
29+
default_factory=datetime.now, description="Account creation timestamp"
30+
)
31+
32+
33+
class TaskPriority(str, Enum):
34+
"""Priority levels for tasks."""
35+
36+
LOW = "low"
37+
MEDIUM = "medium"
38+
HIGH = "high"
39+
URGENT = "urgent"
40+
41+
42+
class Task(BaseModel):
43+
"""Task information."""
44+
45+
title: str = Field(..., min_length=1, max_length=100, description="Task title")
46+
description: Optional[str] = Field(None, description="Detailed task description")
47+
priority: TaskPriority = Field(
48+
default=TaskPriority.MEDIUM, description="Task priority level"
49+
)
50+
due_date: Optional[datetime] = Field(None, description="Task due date")
51+
assigned_to: Optional[UserProfile] = Field(
52+
None, description="User assigned to the task"
53+
)
54+
55+
56+
# Custom tools using Pydantic models
57+
@tool
58+
def create_user(user_data: UserProfile) -> str:
59+
"""Create a new user profile with validated information."""
60+
# In a real application, this would save to a database
61+
return f"Created user profile for {user_data.name} with email {user_data.email}"
62+
63+
64+
@tool
65+
def create_task(task_data: Task) -> str:
66+
"""Create a new task with priority and assignment."""
67+
# In a real application, this would save to a database
68+
return f"Created task '{task_data.title}' with priority {task_data.priority}"
69+
70+
71+
# Create the agent
72+
agent = Agent(
73+
name="task_manager",
74+
description="An agent that manages users and tasks with proper validation",
75+
tools=[create_user, create_task],
76+
)
77+
78+
# Example usage
79+
if __name__ == "__main__":
80+
# Example 1: Create a user
81+
agent.print_response(
82+
"Create a new user named John Doe with email john@example.com, age 30, and interests in Python and AI"
83+
)
84+
85+
# Example 2: Create a task
86+
agent.print_response(
87+
"Create a high priority task titled 'Implement API endpoints' due tomorrow"
88+
)

cookbook/apps/playground/mcp_demo.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,12 @@
1+
"""This example shows how to run an Agent using our MCP integration in the Agno Playground.
2+
3+
For this example to run you need:
4+
- Create a GitHub personal access token following these steps:
5+
- https://github.com/modelcontextprotocol/servers/tree/main/src/github#setup
6+
- Set the GITHUB_TOKEN environment variable: `export GITHUB_TOKEN=<Your GitHub access token>`
7+
- Run: `pip install agno mcp openai` to install the dependencies
8+
"""
9+
110
import asyncio
211
from os import getenv
312
from textwrap import dedent

cookbook/models/anthropic/prompt_caching.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,14 +34,10 @@
3434
response = agent.run(
3535
"Explain the difference between REST and GraphQL APIs with examples"
3636
)
37-
print(
38-
f"First run cache creation input tokens = {response.metrics['cache_creation_input_tokens']}"
39-
) # type: ignore
37+
print(f"First run cache write tokens = {response.metrics['cache_write_tokens']}") # type: ignore
4038

4139
# Second run - this will use the cached system prompt
4240
response = agent.run(
4341
"What are the key principles of clean code and how do I apply them in Python?"
4442
)
45-
print(
46-
f"Second run cache read input tokens = {response.metrics['cache_read_input_tokens']}"
47-
) # type: ignore
43+
print(f"Second run cache read tokens = {response.metrics['cached_tokens']}") # type: ignore

cookbook/models/anthropic/prompt_caching_extended.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -35,14 +35,10 @@
3535
response = agent.run(
3636
"Explain the difference between REST and GraphQL APIs with examples"
3737
)
38-
print(
39-
f"First run cache creation input tokens = {response.metrics['cache_creation_input_tokens']}"
40-
) # type: ignore
38+
print(f"First run cache write tokens = {response.metrics['cache_write_tokens']}") # type: ignore
4139

4240
# Second run - this will use the cached system prompt
4341
response = agent.run(
4442
"What are the key principles of clean code and how do I apply them in Python?"
4543
)
46-
print(
47-
f"Second run cache read input tokens = {response.metrics['cache_read_input_tokens']}"
48-
) # type: ignore
44+
print(f"Second run cache read tokens = {response.metrics['cached_tokens']}") # type: ignore

libs/agno/agno/agent/metrics.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ class SessionMetrics:
1515
input_audio_tokens: int = 0
1616
output_audio_tokens: int = 0
1717
cached_tokens: int = 0
18+
cache_write_tokens: int = 0
1819
reasoning_tokens: int = 0
1920
prompt_tokens: int = 0
2021
completion_tokens: int = 0
@@ -55,6 +56,7 @@ def __add__(self, other: Union["SessionMetrics", "MessageMetrics"]) -> "SessionM
5556
input_audio_tokens=self.input_audio_tokens + other.input_audio_tokens,
5657
output_audio_tokens=self.output_audio_tokens + other.output_audio_tokens,
5758
cached_tokens=self.cached_tokens + other.cached_tokens,
59+
cache_write_tokens=self.cache_write_tokens + other.cache_write_tokens,
5860
reasoning_tokens=self.reasoning_tokens + other.reasoning_tokens,
5961
)
6062

libs/agno/agno/app/playground/async_router.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -616,15 +616,15 @@ async def get_teams():
616616
if teams is None:
617617
return []
618618

619-
return [TeamGetResponse.from_team(team) for team in teams]
619+
return [TeamGetResponse.from_team(team, async_mode=True) for team in teams]
620620

621621
@playground_router.get("/teams/{team_id}")
622622
async def get_team(team_id: str):
623623
team = get_team_by_id(team_id, teams)
624624
if team is None:
625625
raise HTTPException(status_code=404, detail="Team not found")
626626

627-
return TeamGetResponse.from_team(team)
627+
return TeamGetResponse.from_team(team, async_mode=True)
628628

629629
@playground_router.post("/teams/{team_id}/runs")
630630
async def create_team_run(

libs/agno/agno/app/playground/schemas.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ class AgentGetResponse(BaseModel):
3232
instructions: Optional[Union[List[str], str, Callable]] = None
3333

3434
@classmethod
35-
def from_agent(self, agent: Agent) -> "AgentGetResponse":
35+
def from_agent(self, agent: Agent, async_mode: bool = False) -> "AgentGetResponse":
3636
if agent.memory:
3737
memory_dict: Optional[Dict[str, Any]] = {}
3838
if isinstance(agent.memory, AgentMemory) and agent.memory.db:
@@ -52,7 +52,7 @@ def from_agent(self, agent: Agent) -> "AgentGetResponse":
5252
memory_dict = None
5353
else:
5454
memory_dict = None
55-
tools = agent.get_tools(session_id=str(uuid4()))
55+
tools = agent.get_tools(session_id=str(uuid4()), async_mode=async_mode)
5656
return AgentGetResponse(
5757
agent_id=agent.agent_id,
5858
name=agent.name,
@@ -151,9 +151,10 @@ class TeamGetResponse(BaseModel):
151151
response_model: Optional[str] = None
152152
storage: Optional[Dict[str, Any]] = None
153153
memory: Optional[Dict[str, Any]] = None
154+
async_mode: bool = False
154155

155156
@classmethod
156-
def from_team(self, team: Team) -> "TeamGetResponse":
157+
def from_team(self, team: Team, async_mode: bool = False) -> "TeamGetResponse":
157158
import json
158159

159160
memory_dict: Optional[Dict[str, Any]] = {}
@@ -191,9 +192,9 @@ def from_team(self, team: Team) -> "TeamGetResponse":
191192
storage={"name": team.storage.__class__.__name__} if team.storage else None,
192193
memory=memory_dict,
193194
members=[
194-
AgentGetResponse.from_agent(member)
195+
AgentGetResponse.from_agent(member, async_mode=async_mode)
195196
if isinstance(member, Agent)
196-
else TeamGetResponse.from_team(member)
197+
else TeamGetResponse.from_team(member, async_mode=async_mode)
197198
if isinstance(member, Team)
198199
else None
199200
for member in team.members

libs/agno/agno/models/anthropic/claude.py

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -463,12 +463,12 @@ def parse_provider_response(self, response: AnthropicMessage, **kwargs) -> Model
463463

464464
# Add usage metrics
465465
if response.usage is not None:
466-
model_response.response_usage = response.usage
467-
if response.usage.cache_creation_input_tokens is not None:
468-
model_response.response_usage.cache_creation_input_tokens = response.usage.cache_creation_input_tokens
469-
if response.usage.cache_read_input_tokens is not None:
470-
model_response.response_usage.cache_read_input_tokens += response.usage.cache_read_input_tokens
471-
466+
model_response.response_usage = {
467+
"cache_write_tokens": response.usage.cache_creation_input_tokens,
468+
"cached_tokens": response.usage.cache_read_input_tokens,
469+
"input_tokens": response.usage.input_tokens,
470+
"output_tokens": response.usage.output_tokens,
471+
}
472472
return model_response
473473

474474
def parse_provider_response_delta(
@@ -543,14 +543,11 @@ def parse_provider_response_delta(
543543
)
544544

545545
if response.message.usage is not None:
546-
model_response.response_usage = response.message.usage
547-
if response.message.usage.cache_creation_input_tokens is not None:
548-
model_response.response_usage.cache_creation_input_tokens = (
549-
response.message.usage.cache_creation_input_tokens
550-
)
551-
if response.message.usage.cache_read_input_tokens is not None:
552-
model_response.response_usage.cache_read_input_tokens += (
553-
response.message.usage.cache_read_input_tokens
554-
)
546+
model_response.response_usage = {
547+
"cache_write_tokens": response.usage.cache_creation_input_tokens,
548+
"cached_tokens": response.usage.cache_read_input_tokens,
549+
"input_tokens": response.usage.input_tokens,
550+
"output_tokens": response.usage.output_tokens,
551+
}
555552

556553
return model_response

libs/agno/agno/models/base.py

Lines changed: 4 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,8 @@ def _add_usage_metrics_to_assistant_message(assistant_message: Message, response
6868
assistant_message.metrics.total_tokens = response_usage.get("total_tokens", 0)
6969
if "cached_tokens" in response_usage:
7070
assistant_message.metrics.cached_tokens = response_usage.get("cached_tokens", 0)
71+
if "cache_write_tokens" in response_usage:
72+
assistant_message.metrics.cache_write_tokens = response_usage.get("cache_write_tokens", 0)
7173
else:
7274
assistant_message.metrics.total_tokens = (
7375
assistant_message.metrics.input_tokens + assistant_message.metrics.output_tokens
@@ -87,21 +89,8 @@ def _add_usage_metrics_to_assistant_message(assistant_message: Message, response
8789
assistant_message.metrics.total_tokens = response_usage.total_tokens
8890
if hasattr(response_usage, "cached_tokens") and response_usage.cached_tokens is not None:
8991
assistant_message.metrics.cached_tokens = response_usage.cached_tokens
90-
91-
# Anthropic prompt caching specific metric
92-
if (
93-
hasattr(response_usage, "cache_creation_input_tokens")
94-
and response_usage.cache_creation_input_tokens is not None
95-
):
96-
assistant_message.metrics.cache_creation_input_tokens = response_usage.cache_creation_input_tokens
97-
98-
# Anthropic prompt caching specific metric
99-
if hasattr(response_usage, "cache_read_input_tokens") and response_usage.cache_read_input_tokens is not None:
100-
assistant_message.metrics.cache_read_input_tokens = response_usage.cache_read_input_tokens
101-
else:
102-
assistant_message.metrics.total_tokens = (
103-
assistant_message.metrics.input_tokens + assistant_message.metrics.output_tokens
104-
)
92+
if hasattr(response_usage, "cache_write_tokens") and response_usage.cache_write_tokens is not None:
93+
assistant_message.metrics.cache_write_tokens = response_usage.cache_write_tokens
10594

10695
# If you didn't capture any total tokens
10796
if (

libs/agno/agno/models/message.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,7 @@ class MessageMetrics:
5959
input_audio_tokens: int = 0
6060
output_audio_tokens: int = 0
6161
cached_tokens: int = 0
62-
cache_creation_input_tokens: int = 0
63-
cache_read_input_tokens: int = 0
62+
cache_write_tokens: int = 0
6463
reasoning_tokens: int = 0
6564
prompt_tokens: int = 0
6665
completion_tokens: int = 0
@@ -111,6 +110,7 @@ def __add__(self, other: "MessageMetrics") -> "MessageMetrics":
111110
input_audio_tokens=self.input_audio_tokens + other.input_audio_tokens,
112111
output_audio_tokens=self.output_audio_tokens + other.output_audio_tokens,
113112
cached_tokens=self.cached_tokens + other.cached_tokens,
113+
cache_write_tokens=self.cache_write_tokens + other.cache_write_tokens,
114114
reasoning_tokens=self.reasoning_tokens + other.reasoning_tokens,
115115
)
116116

@@ -367,6 +367,8 @@ def log(self, metrics: bool = True, level: Optional[str] = None):
367367
token_metrics.append(f"total={self.metrics.total_tokens}")
368368
if self.metrics.cached_tokens:
369369
token_metrics.append(f"cached={self.metrics.cached_tokens}")
370+
if self.metrics.cache_write_tokens:
371+
token_metrics.append(f"cache_write_tokens={self.metrics.cache_write_tokens}")
370372
if self.metrics.reasoning_tokens:
371373
token_metrics.append(f"reasoning={self.metrics.reasoning_tokens}")
372374
if self.metrics.audio_tokens:

0 commit comments

Comments
 (0)