ADK Interaction with local Ollama model with Weather tool Integration not working #442
Unanswered
santhosh-mp
asked this question in
Q&A
Replies: 1 comment
-
@selcukgun could you take a look? |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
I am trying to build ADK Agent that interacts with Ollama/llama3:instruct model running locally and with Tools integration to pull weather data. Seems like there is an issue with ADK library, kindly help.
ERROR I AM GETTING IS -
=== Final Response ===
An error occurred during agent execution: 1 validation error for InvocationContext
user_content
Input should be a valid dictionary or object to extract fields from [type=model_attributes_type, input_value='What is the weather in Leh?', input_type=str]
For further information visit https://errors.pydantic.dev/2.11/v/model_attributes_type
CODE THAT I HAVE -
`
import os
import asyncio
import requests
import json
import inspect
from docstring_parser import parse # Install: pip install docstring_parser
--- ADK Imports ---
from google.adk.agents import Agent # Base Agent type
from google.adk.agents.llm_agent import LlmAgent # Using LlmAgent explicitly
from google.adk.models.lite_llm import LiteLlm
from google.adk.sessions import InMemorySessionService
from google.adk.runners import Runner
--- Type Imports (Safely) ---
try:
# Import types needed for creating messages and potentially for FunctionCall/Response if needed later
from google.generativeai.types import Content, Part, FunctionCall, FunctionResponse, ExecutableCode, CodeExecutionResult
# Import Enums needed to potentially set None explicitly if needed (though Pydantic usually handles None)
# from google.ai.generativelanguage import Language, Outcome # Import specific enums if direct setting is needed
except ImportError:
print("WARN: Could not import specific types from google.generativeai.types. Using glm fallback.")
try:
import google.ai.generativelanguage as glm
Content = glm.Content # type: ignore
Part = glm.Part # type: ignore
FunctionCall = glm.FunctionCall # type: ignore
FunctionResponse = glm.FunctionResponse # type: ignore
ExecutableCode = glm.ExecutableCode # type: ignore
CodeExecutionResult = glm.CodeExecutionResult # type: ignore
print("INFO: Using Content/Part/etc. from google.ai.generativelanguage (glm).")
except (ImportError, AttributeError):
print("CRITICAL ERROR: Could not find necessary ADK/GenerativeAI types. Aborting.")
exit()
import litellm
litellm.set_verbose = True # Optional for debugging
import warnings
warnings.filterwarnings("ignore")
import logging
logging.basicConfig(level=logging.INFO) # Use INFO for more logs if needed
logging.basicConfig(level=logging.ERROR) # Keep ERROR for cleaner output usually
logging.getLogger("LiteLLM").setLevel(logging.ERROR)
print("Libraries imported successfully.")
--- API Key ---
WEATHER_KEY = os.environ.get("WEATHER_API_KEY", "YOUR_VALID_API_KEY_HERE") # <-- REPLACE or set env var
if WEATHER_KEY == "YOUR_VALID_API_KEY_HERE" or not WEATHER_KEY:
print("CRITICAL ERROR: Weather API Key not configured.")
exit()
--- Model Config ---
MODEL_NAME = "llama3:instruct"
MODEL_URL = "http://localhost:11434"
litellm.api_base = MODEL_URL # Set if not default or using env var
--- Tool Definition (RENAME function to WeatherTool) ---
def WeatherTool(city: str) -> dict:
"""
Fetches the current weather for a given city using the WeatherAPI. (Function name: WeatherTool)
Args:
city: The name of the city.
Returns:
Dictionary with weather data or error status.
"""
# print(f"--- TOOL CALLED (WeatherTool Function): Fetching weather for city: {city} ---") # Debug
if not WEATHER_KEY or WEATHER_KEY == "YOUR_VALID_API_KEY_HERE":
return {"status": "error", "message": "Weather API Key is not configured."}
url = f"http://api.weatherapi.com/v1/current.json?key={WEATHER_KEY}&q={city}&aqi=no"
try:
response = requests.get(url, timeout=10)
response.raise_for_status(); weather_data = response.json()
# print(f"--- TOOL SUCCESS (WeatherTool Function): Received for {city} ---") # Debug
loc = weather_data.get("location", {}); curr = weather_data.get("current", {}); cond = curr.get("condition", {})
name = loc.get("name", city); temp = curr.get("temp_c"); text = cond.get("text", "N/A")
result = {"status": "success", "city": name, "temperature_celsius": temp, "condition": text}
# print(f"--- TOOL RETURNING (WeatherTool Function): {result} ---") # Debug
return result
except requests.exceptions.HTTPError as e:
status = e.response.status_code; msg = f"HTTP Error {status}"
# print(f"--- TOOL ERROR: {msg} for {city} ---") # Debug
if status == 400: msg = "City not found/invalid request."
elif status == 401: msg = "Invalid API key."
elif status == 403: msg = "API key disabled/quota exceeded."
return {"status": "error", "message": f"Could not get weather: {msg}"}
except Exception as e:
msg = f"Unexpected error in tool: {e}"; print(f"--- TOOL ERROR: {msg} for {city} ---") # Keep unexpected error print
return {"status": "error", "message": msg}
--- Helper Function to Format Tools (Needed if passing tools directly to LLM later, but not strictly for ADK Agent) ---
Keep it for potential future use, but ADK Agent usually handles internal formatting
def format_tools_for_openai(tools_list: list) -> list:
formatted_tools = []
type_mapping = {"str": "string", "int": "integer", "float": "number", "bool": "boolean"}
for func in tools_list:
try:
sig = inspect.signature(func); docstring = parse(inspect.getdoc(func)); func_name = func.name
func_desc = docstring.short_description if docstring.short_description else f"Call {func_name}"
properties = {}; required_params = []
for param in sig.parameters.values():
param_name = param.name; param_type = str(param.annotation).replace("<class '", "").replace("'>", "")
json_type = type_mapping.get(param_type, "string")
param_desc = next((dp.description for dp in docstring.params if dp.arg_name == param_name), "")
properties[param_name] = {"type": json_type, "description": param_desc}
if param.default is inspect.Parameter.empty: required_params.append(param_name)
tool_schema = {"type": "function", "function": {"name": func_name, "description": func_desc, "parameters": {"type": "object", "properties": properties, "required": required_params}}}
formatted_tools.append(tool_schema)
except Exception as e: print(f"WARN: Error formatting tool '{getattr(func, 'name', 'Unknown')}': {e}")
return formatted_tools
--- Agent Definition ---
Use minimal instruction and ensure force_ollama_tool_call=True
minimal_instruction = (
"You are an assistant that uses tools. Your primary tool is
WeatherTool
for getting weather information.""When asked for the weather in a specific city, call the
WeatherTool
tool with the city name.""Return the direct output from the tool." # Explicitly state raw output expectation
)
Define model adapter separately
model_adapter = LiteLlm(
model=f"ollama/{MODEL_NAME}",
api_base=MODEL_URL,
# *** ENSURE force_ollama_tool_call IS TRUE ***
force_ollama_tool_call=True,
# tool_choice="auto" # Usually handled by Agent/Runner when tools are present
)
Define agent instance
try:
weather_agent = LlmAgent( # Use LlmAgent
# *** RENAME Agent to MATCH Tool Function ***
name="WeatherTool",
model=model_adapter, # Pass the adapter instance
description="An agent that calls the WeatherTool function.",
instruction=minimal_instruction, # Use the minimal instruction
# *** Use the RENAMED Tool Function ***
tools=[WeatherTool], # Pass tool function object in a list
)
print(f"Weather agent created: Name='{weather_agent.name}'")
if weather_agent.tools and isinstance(weather_agent.tools, list):
registered_tool_names = [getattr(tool, 'name', str(tool)) for tool in weather_agent.tools]
print(f"Agent Tools Registered (as list): {registered_tool_names}")
else: print("Agent Tools Registered: None")
except Exception as agent_e:
print(f"CRITICAL ERROR: Failed to create Agent object: {agent_e}")
exit()
--- Session and Runner Setup (Use Runner again) ---
try:
session_service = InMemorySessionService()
APP_NAME = "WeatherAppADK" # App name
USER_ID = "adk_user" # Example user/session IDs
SESSION_ID = "adk_session"
except Exception as session_e:
print(f"CRITICAL ERROR: Failed to create Session Service/Session: {session_e}")
exit()
Create runner again
try:
runner = Runner(
agent=weather_agent, # Pass the created agent instance
session_service=session_service,
app_name=APP_NAME,
)
print(f"Runner created for agent: {runner.agent.name}")
except Exception as runner_e:
print(f"CRITICAL ERROR: Failed to create Runner object: {runner_e}")
exit()
--- Agent Interaction Function (Using Runner + External Formatting + Explicit Part Init) ---
async def agent_interaction(query: str):
print(f"\n--- User Query: {query} ---")
--- Run Conversation ---
async def run_conversation():
print("\nStarting conversation...")
await agent_interaction("What is the weather in Leh?")
print("-" * 50)
#await agent_interaction("What is the capital of France?")
#print("-" * 50)
#await agent_interaction("Tell me the weather in NonExistentCityAbc?")
print("\nConversation ended.")
--- Main block ---
if name == "main":
if not WEATHER_KEY or WEATHER_KEY == "YOUR_VALID_API_KEY_HERE": print("\nCRITICAL: Weather API Key not set.")
else:
print(f"Using Weather API Key ending with: ...{WEATHER_KEY[-4:]}");
try: import docstring_parser # type: ignore
except ImportError: print("\nINFO: Installing 'docstring-parser'..."); import subprocess, sys; subprocess.check_call([sys.executable, "-m", "pip", "install", "docstring-parser"])
# Run the main async function
asyncio.run(run_conversation())
`
Beta Was this translation helpful? Give feedback.
All reactions