Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def grab_customer_profile_hook(
raise ValueError(f"Customer profile for {cust_id} not found")
customer_profile = agent.session_state["customer_profiles"][cust_id]

# Replace the customer_id with the customer_profile
# Replace the customer with the customer_profile
arguments["customer"] = json.dumps(customer_profile)
# Call the function with the updated arguments
result = function_call(**arguments)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import asyncio
import random
import uuid

from agno.agent import Agent
from agno.eval.performance import PerformanceEval
Expand All @@ -8,6 +9,7 @@
from agno.models.openai import OpenAIChat
from agno.storage.postgres import PostgresStorage
from agno.team.team import Team
from agno.tools.reasoning import ReasoningTools

users = [
"abel@example.com",
Expand Down Expand Up @@ -68,8 +70,8 @@ def get_activities(city: str) -> str:
return f"The activities in {city} are {', '.join(selected_activities)}."


agent_1 = Agent(
agent_id="agent_1",
weather_agent = Agent(
agent_id="weather_agent",
model=OpenAIChat(id="gpt-4o-mini"),
description="You are a helpful assistant that can answer questions about the weather.",
instructions="Be concise, reply with one sentence.",
Expand All @@ -79,8 +81,8 @@ def get_activities(city: str) -> str:
add_history_to_messages=True,
)

agent_2 = Agent(
agent_id="agent_2",
activities_agent = Agent(
agent_id="activities_agent",
model=OpenAIChat(id="gpt-4o-mini"),
description="You are a helpful assistant that can answer questions about activities in a city.",
instructions="Be concise, reply with one sentence.",
Expand All @@ -91,11 +93,12 @@ def get_activities(city: str) -> str:
)

team = Team(
members=[agent_1, agent_2],
members=[weather_agent, activities_agent],
model=OpenAIChat(id="gpt-4o-mini"),
instructions="Be concise, reply with one sentence.",
memory=memory,
storage=team_storage,
tools=[ReasoningTools()],
markdown=True,
enable_user_memories=True,
add_history_to_messages=True,
Expand All @@ -108,7 +111,7 @@ async def run_team_for_user(user: str):
await team.arun(
message=f"I love {random_city}! What activities and weather can I expect in {random_city}?",
user_id=user,
session_id=f"session_{user}",
session_id=f"session_{uuid.uuid4()}",
)

tasks = []
Expand All @@ -132,11 +135,11 @@ async def run_team_for_user(user: str):
warmup_runs=0,
measure_runtime=False,
debug_mode=True,
memory_growth_tracking=True,
top_n_memory_allocations=10,
)

if __name__ == "__main__":
asyncio.run(
team_response_with_memory_impact.arun(
print_results=True, print_summary=True, with_growth_tracking=True
)
team_response_with_memory_impact.arun(print_results=True, print_summary=True)
)
Original file line number Diff line number Diff line change
Expand Up @@ -85,11 +85,10 @@ async def run_team():
warmup_runs=0,
measure_runtime=False,
debug_mode=True,
memory_growth_tracking=True,
)

if __name__ == "__main__":
asyncio.run(
team_response_with_memory_impact.arun(
print_results=True, print_summary=True, with_growth_tracking=True
)
team_response_with_memory_impact.arun(print_results=True, print_summary=True)
)
11 changes: 5 additions & 6 deletions libs/agno/agno/eval/performance.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,8 +206,7 @@ class PerformanceEval:
# Print detailed results
print_results: bool = False
# Print detailed memory growth analysis
with_growth_tracking: bool = False

memory_growth_tracking: bool = False
# Number of memory allocations to track
top_n_memory_allocations: int = 5

Expand Down Expand Up @@ -468,7 +467,7 @@ async def _async_measure_memory_with_growth_tracking(
return adjusted_usage, current_snapshot

def run(
self, *, print_summary: bool = False, print_results: bool = False, with_growth_tracking: bool = False
self, *, print_summary: bool = False, print_results: bool = False, memory_growth_tracking: bool = False
) -> PerformanceResult:
"""
Main method to run the performance evaluation.
Expand Down Expand Up @@ -537,7 +536,7 @@ def run(
live_log.update(status)

# Measure memory
if self.with_growth_tracking or with_growth_tracking:
if self.memory_growth_tracking or memory_growth_tracking:
usage, current_snapshot = self._measure_memory_with_growth_tracking(
memory_baseline, previous_snapshot
)
Expand Down Expand Up @@ -584,7 +583,7 @@ def run(
return self.result

async def arun(
self, *, print_summary: bool = False, print_results: bool = False, with_growth_tracking: bool = False
self, *, print_summary: bool = False, print_results: bool = False, memory_growth_tracking: bool = False
) -> PerformanceResult:
"""
Async method to run the performance evaluation of async functions.
Expand Down Expand Up @@ -659,7 +658,7 @@ async def arun(
live_log.update(status)

# Measure memory
if self.with_growth_tracking or with_growth_tracking:
if self.memory_growth_tracking or memory_growth_tracking:
usage, current_snapshot = await self._async_measure_memory_with_growth_tracking(
memory_baseline, previous_snapshot
)
Expand Down
3 changes: 3 additions & 0 deletions libs/agno/agno/memory/v2/db/redis.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ def __init__(
port: int = 6379,
db: int = 0,
password: Optional[str] = None,
ssl: Optional[bool] = False,
expire: Optional[int] = None,
):
"""
Expand All @@ -31,6 +32,7 @@ def __init__(
port (int): Redis port number
db (int): Redis database number
password (Optional[str]): Redis password if authentication is required
ssl (Optional[bool]): Whether to use SSL for Redis connection
expire (Optional[int]): TTL (time to live) in seconds for Redis keys. None means no expiration.
"""
self.prefix = prefix
Expand All @@ -41,6 +43,7 @@ def __init__(
db=db,
password=password,
decode_responses=True, # Automatically decode responses to str
ssl=ssl,
)
log_debug(f"Created RedisMemoryDb with prefix: '{self.prefix}'")

Expand Down
4 changes: 2 additions & 2 deletions libs/agno/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "agno"
version = "1.7.1"
version = "1.7.2"
description = "Agno: a lightweight library for building Multi-Agent Systems"
requires-python = ">=3.7,<4"
readme = "README.md"
Expand Down Expand Up @@ -434,4 +434,4 @@ module = [
"zep_cloud.*",
"oxylabs.*"
]
ignore_missing_imports = true
ignore_missing_imports = true
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,9 @@ async def get_the_weather(city: str):
assert response.tools[0].tool_args == {"city": "Tokyo"}

# Mark the tool as confirmed
response.tools[0].confirmed = True
for tool_response in response.tools:
if tool_response.requires_confirmation:
tool_response.confirmed = True
found_confirmation = True
assert found_confirmation, "No tools were found to require confirmation"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,9 @@ async def get_the_weather(city: str):
assert response.tools[0].tool_args == {"city": "Tokyo"}

# Provide user input
response.tools[0].user_input_schema[0].value = "Tokyo"
for tool_response in response.tools:
if tool_response.requires_user_input:
tool_response.user_input_schema[0].value = "Tokyo"

response = await agent.acontinue_run(response)
assert response.is_paused is False
Expand Down
2 changes: 1 addition & 1 deletion libs/agno/tests/integration/teams/test_memory_impact.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ def simple_function(input_text: str) -> str:
mode="route",
model=OpenAIChat(id="gpt-4o-mini"),
members=[agent],
storage=agent_storage,
storage=team_storage,
memory=memory,
enable_user_memories=True,
)
Expand Down
Loading