diff --git a/sdk/evaluation/azure-ai-evaluation/CHANGELOG.md b/sdk/evaluation/azure-ai-evaluation/CHANGELOG.md
index 198709a706d6..f86d13ca69f0 100644
--- a/sdk/evaluation/azure-ai-evaluation/CHANGELOG.md
+++ b/sdk/evaluation/azure-ai-evaluation/CHANGELOG.md
@@ -25,6 +25,8 @@
### Bugs Fixed
- Significant improvements to IntentResolution evaluator. New version has less variance, is nearly 2x faster and consumes fewer tokens.
+
+- Fixes and improvements to ToolCallAccuracy evaluator. New version has less variance. and now works on all tool calls that happen in a turn at once. Previously, it worked on each tool call independently without having context on the other tool calls that happen in the same turn, and then aggregated the results to a score in the range [0-1]. The score range is now [1-5].
- Fixed MeteorScoreEvaluator and other threshold-based evaluators returning incorrect binary results due to integer conversion of decimal scores. Previously, decimal scores like 0.9375 were incorrectly converted to integers (0) before threshold comparison, causing them to fail even when above the threshold. [#41415](https://github.com/Azure/azure-sdk-for-python/issues/41415)
- Added a new enum `ADVERSARIAL_QA_DOCUMENTS` which moves all the "file_content" type prompts away from `ADVERSARIAL_QA` to the new enum
diff --git a/sdk/evaluation/azure-ai-evaluation/TROUBLESHOOTING.md b/sdk/evaluation/azure-ai-evaluation/TROUBLESHOOTING.md
index 7d8a17a549cd..a48a4973fdc5 100644
--- a/sdk/evaluation/azure-ai-evaluation/TROUBLESHOOTING.md
+++ b/sdk/evaluation/azure-ai-evaluation/TROUBLESHOOTING.md
@@ -46,9 +46,6 @@ This guide walks you through how to investigate failures, common errors in the `
- Risk and safety evaluators depend on the Azure AI Studio safety evaluation backend service. For a list of supported regions, please refer to the documentation [here](https://aka.ms/azureaisafetyeval-regionsupport).
- If you encounter a 403 Unauthorized error when using safety evaluators, verify that you have the `Contributor` role assigned to your Azure AI project. `Contributor` role is currently required to run safety evaluations.
-### Troubleshoot Quality Evaluator Issues
-- For `ToolCallAccuracyEvaluator`, if your input did not have a tool to evaluate, the current behavior is to output `null`.
-
## Handle Simulation Errors
### Adversarial Simulation Supported Regions
diff --git a/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_common/_base_eval.py b/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_common/_base_eval.py
index 19c800024d4e..f6890b6aceb5 100644
--- a/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_common/_base_eval.py
+++ b/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_common/_base_eval.py
@@ -4,14 +4,34 @@
import inspect
from abc import ABC, abstractmethod
-from typing import Any, Callable, Dict, Generic, List, TypedDict, TypeVar, Union, cast, final, Optional
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Generic,
+ List,
+ TypedDict,
+ TypeVar,
+ Union,
+ cast,
+ final,
+ Optional,
+)
from azure.ai.evaluation._legacy._adapters.utils import async_run_allowing_running_loop
from typing_extensions import ParamSpec, TypeAlias, get_overloads
-from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
+from azure.ai.evaluation._exceptions import (
+ ErrorBlame,
+ ErrorCategory,
+ ErrorTarget,
+ EvaluationException,
+)
from azure.ai.evaluation._common.utils import remove_optional_singletons
-from azure.ai.evaluation._constants import _AggregationType, EVALUATION_PASS_FAIL_MAPPING
+from azure.ai.evaluation._constants import (
+ _AggregationType,
+ EVALUATION_PASS_FAIL_MAPPING,
+)
from azure.ai.evaluation._model_configurations import Conversation
from azure.ai.evaluation._common._experimental import experimental
@@ -176,7 +196,9 @@ def _derive_singleton_inputs(self) -> List[str]:
singletons.extend([p for p in params if p != "self"])
return singletons
- def _derive_conversation_converter(self) -> Callable[[Dict], List[DerivedEvalInput]]:
+ def _derive_conversation_converter(
+ self,
+ ) -> Callable[[Dict], List[DerivedEvalInput]]:
"""Produce the function that will be used to convert conversations to a list of evaluable inputs.
This uses the inputs derived from the _derive_singleton_inputs function to determine which
aspects of a conversation ought to be extracted.
@@ -235,7 +257,9 @@ def converter(conversation: Dict) -> List[DerivedEvalInput]:
return converter
- def _derive_multi_modal_conversation_converter(self) -> Callable[[Dict], List[Dict[str, Any]]]:
+ def _derive_multi_modal_conversation_converter(
+ self,
+ ) -> Callable[[Dict], List[Dict[str, Any]]]:
"""Produce the function that will be used to convert multi-modal conversations to a list of evaluable inputs.
This uses the inputs derived from the _derive_singleton_inputs function to determine which
aspects of a conversation ought to be extracted.
@@ -288,7 +312,7 @@ def multi_modal_converter(conversation: Dict) -> List[Dict[str, Any]]:
return multi_modal_converter
- def _convert_kwargs_to_eval_input(self, **kwargs) -> Union[List[Dict], List[DerivedEvalInput]]:
+ def _convert_kwargs_to_eval_input(self, **kwargs) -> Union[List[Dict], List[DerivedEvalInput], Dict[str, Any]]:
"""Convert an arbitrary input into a list of inputs for evaluators.
It is assumed that evaluators generally make use of their inputs in one of two ways.
Either they receive a collection of keyname inputs that are all single values
diff --git a/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py b/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py
index b29f2cc7590f..8799fd422f0a 100644
--- a/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py
+++ b/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py
@@ -8,9 +8,13 @@
from typing import Dict, List, Union, TypeVar, cast
from typing_extensions import overload, override
from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
-from azure.ai.evaluation._common.utils import remove_optional_singletons, parse_quality_evaluator_reason_score
-from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
-from azure.ai.evaluation._common.constants import PROMPT_BASED_REASON_EVALUATORS
+from azure.ai.evaluation._exceptions import (
+ ErrorBlame,
+ ErrorCategory,
+ ErrorTarget,
+ EvaluationException,
+)
+from ..._common.utils import check_score_is_valid
from azure.ai.evaluation._common._experimental import experimental
logger = logging.getLogger(__name__)
@@ -21,13 +25,16 @@
@experimental
class ToolCallAccuracyEvaluator(PromptyEvaluatorBase[Union[str, float]]):
"""The Tool Call Accuracy evaluator assesses how accurately an AI uses tools by examining:
- - Relevance to the conversation
- - Parameter correctness according to tool definitions
- - Parameter value extraction from the conversation
+ - Relevance to the conversation.
+ - Parameter correctness according to tool definitions.
+ - Parameter value extraction from the conversation.
- The evaluator uses a binary scoring system (0 or 1):
- - Score 0: The tool call is irrelevant or contains information not in the conversation/definition
- - Score 1: The tool call is relevant with properly extracted parameters from the conversation
+ The evaluator uses a scoring rubric of 1 to 5:
+ - Score 1: The tool calls are irrelevant
+ - Score 2: The tool calls are partially relevant, but not enough tools were called or the parameters were not correctly passed.
+ - Score 3: The tool calls are relevant, but there were unnecessary, excessive tool calls made.
+ - Score 4: The tool calls are relevant, but some tools returned errors and agent retried calling them again and succeeded.
+ - Score 5: The tool calls are relevant, and all parameters were correctly passed.
This evaluation focuses on measuring whether tool calls meaningfully contribute to addressing
user needs while properly following tool definitions and using information present in the
@@ -64,12 +71,18 @@ class ToolCallAccuracyEvaluator(PromptyEvaluatorBase[Union[str, float]]):
"""
_PROMPTY_FILE = "tool_call_accuracy.prompty"
- _RESULT_KEY = "tool_call_accurate"
- _AGGREGATE_RESULT_KEY = "tool_call_accuracy"
+ _RESULT_KEY = "tool_call_accuracy"
- _MAX_TOOL_CALL_ACCURACY_SCORE = 1.0
- _MIN_TOOL_CALL_ACCURACY_SCORE = 0.0
- _DEFAULT_TOOL_CALL_ACCURACY_SCORE = 0.8
+ _MAX_TOOL_CALL_ACCURACY_SCORE = 5
+ _MIN_TOOL_CALL_ACCURACY_SCORE = 1
+ _DEFAULT_TOOL_CALL_ACCURACY_SCORE = 3
+
+ _NO_TOOL_CALLS_MESSAGE = "No tool calls found in response or provided tool_calls."
+ _NO_TOOL_DEFINITIONS_MESSAGE = "Tool definitions must be provided."
+ _TOOL_DEFINITIONS_MISSING_MESSAGE = "Tool definitions for all tool calls must be provided."
+ _INVALID_SCORE_MESSAGE = "Tool call accuracy score must be between 1 and 5."
+
+ _LLM_SCORE_KEY = "tool_calls_success_level"
id = "id"
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
@@ -79,7 +92,12 @@ def __init__(self, model_config, *, threshold=_DEFAULT_TOOL_CALL_ACCURACY_SCORE,
current_dir = os.path.dirname(__file__)
prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
self.threshold = threshold
- super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY, **kwargs)
+ super().__init__(
+ model_config=model_config,
+ prompty_file=prompty_path,
+ result_key=self._RESULT_KEY,
+ **kwargs,
+ )
@overload
def __call__(
@@ -134,84 +152,43 @@ def _convert_kwargs_to_eval_input(self, **kwargs):
"""
# TODO add warning that only tool calls of type function are supported
# Collect inputs
- tool_calls = kwargs.get("tool_calls", None)
+ tool_calls = kwargs.get("tool_calls")
tool_definitions = kwargs.get("tool_definitions")
- query = kwargs.get("query", None)
- response = kwargs.get("response", None)
-
- if response is None and tool_calls is None:
- raise EvaluationException(
- message="Either response or tool_calls must be provided.",
- blame=ErrorBlame.USER_ERROR,
- category=ErrorCategory.MISSING_FIELD,
- target=ErrorTarget.TOOL_CALL_ACCURACY_EVALUATOR,
- )
-
- if tool_definitions is None:
- raise EvaluationException(
- message="Tool definitions must be provided.",
- blame=ErrorBlame.USER_ERROR,
- category=ErrorCategory.MISSING_FIELD,
- target=ErrorTarget.TOOL_CALL_ACCURACY_EVALUATOR,
- )
+ query = kwargs.get("query")
+ response = kwargs.get("response")
# TODO : Support classes that represents tool calls, messages etc once client side definitions are available
- if tool_calls is None:
- # Extract tool calls from response if not provided
- tool_calls = []
- if isinstance(response, list):
- for message in response:
- if message.get("role") == "assistant":
- tool_calls.extend(
- [content for content in message.get("content") if content.get("type") == "tool_call"]
- )
- if len(tool_calls) == 0:
- raise EvaluationException(
- message="response does not have tool calls. Either provide tool_calls or response with tool calls.",
- blame=ErrorBlame.USER_ERROR,
- category=ErrorCategory.MISSING_FIELD,
- target=ErrorTarget.TOOL_CALL_ACCURACY_EVALUATOR,
- )
+ if response:
+ parsed_tool_calls = self._parse_tools_from_response(response)
+ if parsed_tool_calls:
+ tool_calls = parsed_tool_calls
+
+ if not tool_calls:
+ return {"error_message": self._NO_TOOL_CALLS_MESSAGE}
+ if not tool_definitions or len(tool_definitions) == 0:
+ return {"error_message": self._NO_TOOL_DEFINITIONS_MESSAGE}
if not isinstance(tool_calls, list):
tool_calls = [tool_calls]
-
if not isinstance(tool_definitions, list):
tool_definitions = [tool_definitions]
- eval_inputs = []
- # TODO : When evaluating an agent tool that depends on the output of a previous tool call,
- # we need to provide the output of the previous tool call as part of messages.
- for tool_call in tool_calls:
- if (
- isinstance(tool_call, dict) and tool_call.get("type") == "tool_call"
- ): # TODO assuming dict here but it can be a class
- function_name = tool_call.get("name")
- tool_definition = [tool for tool in tool_definitions if tool.get("name") == function_name]
- if len(tool_definition) > 0:
- tool_definition = tool_definition
- else:
- raise EvaluationException(
- message="Tool definition not found",
- blame=ErrorBlame.USER_ERROR,
- category=ErrorCategory.INVALID_VALUE,
- target=ErrorTarget.TOOL_CALL_ACCURACY_EVALUATOR,
- )
- eval_inputs.append({"query": query, "tool_call": tool_call, "tool_definition": tool_definition})
- else:
- raise EvaluationException(
- message="Tool definition not found",
- blame=ErrorBlame.USER_ERROR,
- category=ErrorCategory.INVALID_VALUE,
- target=ErrorTarget.TOOL_CALL_ACCURACY_EVALUATOR,
- )
+ try:
+ needed_tool_definitions = self._extract_needed_tool_definitions(tool_calls, tool_definitions)
+ except EvaluationException as e:
+ return {"error_message": self._TOOL_DEFINITIONS_MISSING_MESSAGE}
+ if len(needed_tool_definitions) == 0:
+ return {"error_message": self._TOOL_DEFINITIONS_MISSING_MESSAGE}
- return eval_inputs
+ return {
+ "query": query,
+ "tool_calls": tool_calls,
+ "tool_definitions": needed_tool_definitions,
+ }
@override
async def _do_eval(self, eval_input: Dict) -> Dict[str, Union[float, str]]: # type: ignore[override]
- """Do a relevance evaluation.
-
+ """Do a tool call accuracy evaluation.
:param eval_input: The input to the evaluator. Expected to contain
whatever inputs are needed for the _flow method, including context
and other fields depending on the child class.
@@ -219,23 +196,43 @@ async def _do_eval(self, eval_input: Dict) -> Dict[str, Union[float, str]]: # t
:return: The evaluation result.
:rtype: Dict
"""
+ # Single LLM call for all tool calls
llm_output = await self._flow(timeout=self._LLM_CALL_TIMEOUT, **eval_input)
- score = math.nan
- if llm_output:
- score, reason = parse_quality_evaluator_reason_score(llm_output, valid_score_range="[0-1]")
- if score >= 0 and score <= 1:
- return {
- self._result_key: bool(float(score)),
- f"{self._result_key}_reason": reason,
- "tool_call_id": eval_input.get("tool_call").get("tool_call_id"),
- }
- raise EvaluationException(
- message="Tool call accuracy evaluator: Invalid score returned from LLM.",
- blame=ErrorBlame.SYSTEM_ERROR,
- category=ErrorCategory.INVALID_VALUE,
- target=ErrorTarget.TOOL_CALL_ACCURACY_EVALUATOR,
- )
+ if isinstance(llm_output, dict):
+ score = llm_output.get(self._LLM_SCORE_KEY, None)
+ if not score or not check_score_is_valid(
+ score,
+ ToolCallAccuracyEvaluator._MIN_TOOL_CALL_ACCURACY_SCORE,
+ ToolCallAccuracyEvaluator._MAX_TOOL_CALL_ACCURACY_SCORE,
+ ):
+ raise EvaluationException(
+ message=f"Invalid score value: {score}. Expected a number in range [{ToolCallAccuracyEvaluator._MIN_TOOL_CALL_ACCURACY_SCORE}, {ToolCallAccuracyEvaluator._MAX_TOOL_CALL_ACCURACY_SCORE}].",
+ internal_message="Invalid score value.",
+ category=ErrorCategory.FAILED_EXECUTION,
+ blame=ErrorBlame.SYSTEM_ERROR,
+ )
+
+ # Format the output
+ reason = llm_output.get("chain_of_thought", "")
+ score = float(score)
+ score_result = "pass" if score >= self.threshold else "fail"
+ response_dict = {
+ self._result_key: score,
+ f"{self._result_key}_result": score_result,
+ f"{self._result_key}_threshold": self.threshold,
+ f"{self._result_key}_reason": reason,
+ "details": llm_output.get("details", {}),
+ }
+ return response_dict
+
+ else:
+ raise EvaluationException(
+ message="Tool call accuracy evaluator returned invalid output.",
+ blame=ErrorBlame.SYSTEM_ERROR,
+ category=ErrorCategory.FAILED_EXECUTION,
+ target=ErrorTarget.TOOL_CALL_ACCURACY_EVALUATOR,
+ )
async def _real_call(self, **kwargs):
"""The asynchronous call where real end-to-end evaluation logic is performed.
@@ -246,106 +243,92 @@ async def _real_call(self, **kwargs):
:rtype: Union[DoEvalResult[T_EvalValue], AggregateResult[T_EvalValue]]
"""
# Convert inputs into list of evaluable inputs.
- eval_input_list = self._convert_kwargs_to_eval_input(**kwargs)
- if len(eval_input_list) == 0:
- return {
- self._AGGREGATE_RESULT_KEY: self._NOT_APPLICABLE_RESULT,
- f"{self._AGGREGATE_RESULT_KEY}_result": self._NOT_APPLICABLE_RESULT,
- f"{self._AGGREGATE_RESULT_KEY}_threshold": self.threshold,
- f"{self._AGGREGATE_RESULT_KEY}_reason": "No tool calls were made.",
- "per_tool_call_details": [],
- }
-
- per_turn_results = []
- # Evaluate all inputs.
- for eval_input in eval_input_list:
- if self._is_applicable_tool(eval_input):
- per_turn_results.append(await self._do_eval(eval_input))
- else:
- per_turn_results.append(self._not_applicable_result(eval_input))
-
- return self._aggregate_results(per_turn_results=per_turn_results)
-
- def _is_applicable_tool(self, eval_input):
- """Determine if a given tool should be evaluated, since we only evaluate tools that
- have sufficient context available.
-
- :type eval_input: Dict
- :return: True if the tool call should be evaluated
- :rtype: bool
- """
- tool_definition = eval_input.get("tool_definition")
- if tool_definition is None or len(tool_definition) != 1:
- return False
- tool_type = tool_definition[0].get("type")
- if tool_type is None or tool_type != "function":
- return False
- return True
-
- def _not_applicable_result(self, eval_input):
+ eval_input = self._convert_kwargs_to_eval_input(**kwargs)
+ if isinstance(eval_input, dict) and eval_input.get("error_message"):
+ # If there is an error message, return not applicable result
+ return self._not_applicable_result(eval_input.get("error_message"))
+ # Do the evaluation
+ result = await self._do_eval(eval_input)
+ # Return the result
+ return result
+
+ def _not_applicable_result(self, error_message):
"""Return a result indicating that the tool call is not applicable for evaluation.
-
:param eval_input: The input to the evaluator.
:type eval_input: Dict
:return: A dictionary containing the result of the evaluation.
:rtype: Dict[str, Union[str, float]]
"""
+ # If no tool calls were made or tool call type is not supported, return not applicable result
return {
- f"{self._result_key}": self._NOT_APPLICABLE_RESULT,
- f"{self._result_key}_reason": "Tool call not supported for evaluation",
- "tool_call_id": eval_input.get("tool_call").get("tool_call_id"),
+ self._result_key: self._NOT_APPLICABLE_RESULT,
+ f"{self._result_key}_result": "pass",
+ f"{self._result_key}_threshold": self.threshold,
+ f"{self._result_key}_reason": error_message,
+ "details": {},
}
- def _aggregate_results(self, per_turn_results):
- """Aggregate the evaluation results of each conversation turn into a single result.
-
- Exact implementation might need to vary slightly depending on the results produced.
- Default behavior is to average the all number-based outputs.
-
- :param per_turn_results: List of evaluation results for each turn in the conversation.
- :type per_turn_results: List[Dict]
- :return: A dictionary containing aggregated results, with numeric metrics having their
- means as top-level values in the dictionary, and all original
- values (including non-numerics) located in under the "evaluation_per_turn" key,
- which each sub-key being a metric and each sub-value being a the list of that metric's
- per-turn values.
- :rtype: AggregateResult[T_EvalValue]
+ def _parse_tools_from_response(self, response):
+ """Parse the response to extract tool calls and results.
+ :param response: The response to parse.
+ :type response: Union[str, List[dict]]
+ :return: List of tool calls extracted from the response.
+ :rtype: List[dict]
"""
-
- aggregated: Dict[str, Union[float, Dict[str, List[T_EvalValue]]]] = {}
- evaluation_per_turn: Dict[str, List[T_EvalValue]] = {}
-
- # Go over each turn, and rotate the results into a
- # metric: List[values] format for the evals_per_turn dictionary.
-
- num_evaluated = len(
- [
- per_turn_result
- for per_turn_result in per_turn_results
- if per_turn_result.get(self._result_key) != self._NOT_APPLICABLE_RESULT
- ]
- )
- if num_evaluated == 0:
- # None of the invoked tools were applicable, return not applicable result
- # (If a tool fails evaluation, we'll throw an exception)
- return {
- self._AGGREGATE_RESULT_KEY: self._NOT_APPLICABLE_RESULT,
- f"{self._AGGREGATE_RESULT_KEY}_result": self._NOT_APPLICABLE_RESULT,
- f"{self._AGGREGATE_RESULT_KEY}_threshold": self.threshold,
- f"{self._AGGREGATE_RESULT_KEY}_reason": "Tool call accuracy evaluation is not yet supported for the invoked tools.",
- "per_tool_call_details": [],
- }
- # ignore not_applicable results, where the _result_key will be "not applicable"
- score = (
- sum([per_turn_result.get(self._result_key) == True for per_turn_result in per_turn_results]) / num_evaluated
- )
- aggregated[self._AGGREGATE_RESULT_KEY] = score
- aggregated[f"{self._AGGREGATE_RESULT_KEY}_result"] = (
- self._PASS_RESULT if score >= self.threshold else self._FAIL_RESULT
- )
- aggregated[f"{self._AGGREGATE_RESULT_KEY}_threshold"] = self.threshold
- aggregated["per_tool_call_details"] = per_turn_results
- return aggregated
+ tool_calls = []
+ tool_results_map = {}
+ if isinstance(response, list):
+ for message in response:
+ # Extract tool calls from assistant messages
+ if message.get("role") == "assistant" and isinstance(message.get("content"), list):
+ for content_item in message.get("content"):
+ if isinstance(content_item, dict) and content_item.get("type") == "tool_call":
+ tool_calls.append(content_item)
+
+ # Extract tool results from tool messages
+ elif message.get("role") == "tool" and message.get("tool_call_id"):
+ tool_call_id = message.get("tool_call_id")
+ if isinstance(message.get("content"), list) and len(message.get("content")) > 0:
+ result_content = message.get("content")[0]
+ if isinstance(result_content, dict) and result_content.get("type") == "tool_result":
+ tool_results_map[tool_call_id] = result_content
+
+ # Attach results to their corresponding calls
+ for tool_call in tool_calls:
+ tool_call_id = tool_call.get("tool_call_id")
+ if tool_call_id in tool_results_map:
+ tool_call["tool_result"] = tool_results_map[tool_call_id]["tool_result"]
+
+ return tool_calls
+
+ def _extract_needed_tool_definitions(self, tool_calls, tool_definitions):
+ """Extract the tool definitions that are needed for the provided tool calls.
+ :param tool_calls: List of tool calls to evaluate.
+ :type tool_calls: List[dict]
+ :param tool_definitions: List of tool definitions to use for evaluation.
+ :type tool_definitions: List[dict]
+ :return: List of tool definitions that are needed for the provided tool calls.
+ :rtype: List[dict]
+ """
+ needed_tool_definitions = []
+ for tool_call in tool_calls:
+ if isinstance(tool_call, dict) and tool_call.get("type") == "tool_call":
+ tool_name = tool_call.get("name")
+ tool_definition = [
+ tool
+ for tool in tool_definitions
+ if tool.get("name") == tool_name and tool.get("type", "function") == "function"
+ ]
+ if len(tool_definition) > 0:
+ needed_tool_definitions.extend(tool_definition)
+ else:
+ raise EvaluationException(
+ message=f"Tool definition for {tool_name} not found",
+ blame=ErrorBlame.USER_ERROR,
+ category=ErrorCategory.INVALID_VALUE,
+ target=ErrorTarget.TOOL_CALL_ACCURACY_EVALUATOR,
+ )
+ return needed_tool_definitions
@override
def __call__( # pylint: disable=docstring-missing-param
diff --git a/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty b/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty
index 6b964cc54c0c..193d2174e9bb 100644
--- a/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty
+++ b/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty
@@ -5,20 +5,20 @@ model:
api: chat
parameters:
temperature: 0.0
- max_tokens: 800
+ max_tokens: 3000
top_p: 1.0
presence_penalty: 0
frequency_penalty: 0
response_format:
- type: text
+ type: json_object
inputs:
query:
- type: array
- tool_call:
- type: object
- tool_definition:
- type: object
+ type: List
+ tool_calls:
+ type: List
+ tool_definitions:
+ type: Dict
---
system:
@@ -27,7 +27,7 @@ system:
### Your are an expert in evaluating the accuracy of a tool call considering relevance and potential usefulness including syntactic and semantic correctness of a proposed tool call from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
- **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
- **Data**: Your input data include CONVERSATION , TOOL CALL and TOOL DEFINITION.
-- **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
+- **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways, and you need to be very precise in your evaluation.
user:
# Definition
@@ -42,30 +42,108 @@ user:
# Ratings
-## [Tool Call Accuracy: 0] (Irrelevant)
+## [Tool Call Accuracy: 1] (Irrelevant)
**Definition:**
- 1. The TOOL CALL is not relevant and will not help resolve the user's need.
- 2. TOOL CALL include parameters values that are not present or inferred from CONVERSATION.
- 3. TOOL CALL has parameters that is not present in TOOL DEFINITION.
+Tool calls were not relevant to the user's query, resulting in an irrelevant or unhelpful final output.
+This level is a 'fail'.
-## [Tool Call Accuracy: 1] (Relevant)
+**Example:**
+ The user's query is asking for most popular hotels in New York, but the agent calls a tool that does search in local files on a machine. This tool is not relevant to the user query, so this case is a Level 1 'fail'.
+
+
+## [Tool Call Accuracy: 2] (Partially Relevant - No correct output)
+**Definition:**
+Tool calls were somewhat related to the user's query, but the agent was not able to reach a final output that addresses the user query due to one or more of the following:
+• Tools returned errors, and no retrials for the tool call were successful.
+• Parameters passed to the tool were incorrect.
+• Not enough tools were called to fully address the query (missing tool calls).
+This level is a 'fail'.
+
+**Example:**
+ The user asks for the coordinates of Chicago. The agent calls the correct tool that retrieves the coordinates -which is the relevant tool for the user query- but passes 'New York' instead of 'Chicago' as the parameter to the tool. So this is a Level 2 'fail'.
+
+**Example:**
+ The user asks for the coordinates of Chicago. The agent calls the correct tool that retrieves the coordinates -which is the relevant tool for the user query- and passes 'Chicago' as the parameter to the tool which is also correct, but the tool returns an error so the agent can't reach the correct answer to the user's query. This is a Level 2 'fail'.
+
+**Example:**
+ The user asks a question that needs 3 tool calls for it to be answered. The agent calls only one of the three required tool calls. So this case is a Level 2 'fail'.
+
+
+## [Tool Call Accuracy: 3] (Slightly Correct - Reached Output)
**Definition:**
- 1. The TOOL CALL is directly relevant and very likely to help resolve the user's need.
- 2. TOOL CALL include parameters values that are present or inferred from CONVERSATION.
- 3. TOOL CALL has parameters that is present in TOOL DEFINITION.
+Tool calls were relevant, correct and grounded parameters were passed so that led to a correct output. However, multiple excessive, unnecessary tool calls were made.
+This level is a 'pass'.
+
+**Example:**
+ The user asked to do a modification in the database. The agent called the tool multiple times, resulting in multiple modifications in the database instead of one. This is a level 3 'pass'.
+
+**Example:**
+ The user asked for popular hotels in a certain place. The agent calls the same tool with the same parameters multiple times, even though a single tool call that returns an output is sufficient. So there were unnecessary tool calls. This is a Level 3 'pass'.
+
+
+## [Tool Call Accuracy: 4] (Mostly Correct - Reached output)
+**Definition:**
+Tool calls were fully relevant and efficient:
+• Correct tools were called with the correct and grounded parameters, whether they are extracted from the conversation history or the current user query.
+• A tool returned an error, but the agent retried calling the tool and successfully got an output.
+This level is a 'pass'.
+
+**Example:**
+ The user asks for the weather forecast in a certain place. The agent calls the correct tool that retrieves the weather forecast with the correct parameters, but the tool returns an error. The agent re-calls the tool once again and it returns the correct output. This is a Level 4 'pass'.
+
+
+## [Tool Call Accuracy: 5] (Optimal Solution - Reached output)
+**Definition:**
+Tool calls were fully relevant and efficient:
+• Correct tools were called with the correct and grounded parameters, whether they are extracted from the conversation history or the current user query.
+• No unnecessary or excessive tool calls were made.
+• No errors occurred in any of the tools.
+• The agent was able to reach the final output that addresses the user's query without facing any issues.
+This level is a 'pass'.
+
+**Example:**
+ The user asks for the distance between two places. The agent correctly calls the tools that retrieve the coordinates for the two places respectively, then calls the tool that calculates the distance between the two sets of coordinates, passing the correct arguments to all the tools, without calling other tools excessively or unnecessarily. This is the optimal solution for the user's query. This is a Level 5 'pass'.
+
+**Example:**
+ The user asks for the distance between two places. The agent retrieves the needed coordinates from the outputs of the tool calls in the conversation history, and then correctly passes these coordinates to the tool that calculates the distance to output it to the user. This is also an optimal solution for the user's query. This is a Level 5 'pass'.
+
+
+
+# IMPORTANT NOTES
+- There is a clear distinction between 'pass' levels and 'fail' levels. The distinction is that the tools are called correctly in order to reach the required output. If the agent was not able to reach the final output that addresses the user query, it cannot be either of the 'pass' levels, and vice versa. It is crucial that you ensure you are rating the agent's response with the correct level based on the tool calls made to address the user's query.
+- "Correct output" means correct tool with the correct, grounded parameters. You are NOT concerned with the correctness of the result of the tool. As long as the parameters passed were correct and the tool did not return an error, then the tool output is correct and accurate.
+- Ensure that every single parameter that is passed to the tools is correct and grounded from the user query or the conversation history. If the agent passes incorrect parameters or completely makes them up, then this is a fail, even if somehow the agent reaches a correct result.
# Data
CONVERSATION : {{query}}
-TOOL CALL: {{tool_call}}
+TOOL CALLS: {{tool_calls}}
TOOL DEFINITION: {{tool_definition}}
# Tasks
-## Please provide your assessment Score for the previous CONVERSATION , TOOL CALL and TOOL DEFINITION based on the Definitions above. Your output should include the following information:
-- **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
-- **Explanation**: a very short explanation of why you think the input Data should get that Score.
-- **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "0", "1") based on the levels of the definitions.
-
+## Please provide your evaluation for the assistant RESPONSE in relation to the user QUERY and tool definitions based on the Definitions and examples above.
+Your output should consist only of a JSON object, as provided in the examples, that has the following keys:
+ - chain_of_thought: a string that explains your thought process to decide on the tool call accuracy level. Start this string with 'Let's think step by step:', and think deeply and precisely about which level should be chosen based on the agent's tool calls and how they were able to address the user's query.
+ - tool_calls_success_level: a integer value between 1 and 5 that represents the level of tool call success, based on the level definitions mentioned before. You need to be very precise when deciding on this level. Ensure you are correctly following the rating system based on the description of each level.
+ - details: a dictionary that contains the following keys:
+ - tool_calls_made_by_agent: total number of tool calls made by the agent
+ - correct_tool_calls_made_by_agent: total number of correct tool calls made by the agent
+ - per_tool_call_details: a list of dictionaries, each containing:
+ - tool_name: name of the tool
+ - total_calls_required: total number of calls required for the tool
+ - correct_calls_made_by_agent: number of correct calls made by the agent
+ - correct_tool_percentage: percentage of correct calls made by the agent for this tool. It is a value between 0.0 and 1.0
+ - tool_call_errors: number of errors encountered during the tool call
+ - tool_success_result: 'pass' or 'fail' based on the evaluation of the tool call accuracy for this tool
+ - excess_tool_calls: a dictionary with the following keys:
+ - total: total number of excess, unnecessary tool calls made by the agent
+ - details: a list of dictionaries, each containing:
+ - tool_name: name of the tool
+ - excess_count: number of excess calls made for this query
+ - missing_tool_calls: a dictionary with the following keys:
+ - total: total number of missing tool calls that should have been made by the agent to be able to answer the query
+ - details: a list of dictionaries, each containing:
+ - tool_name: name of the tool
+ - missing_count: number of missing calls for this query
-## Please provide your answers between the tags: your chain of thoughts, your explanation, your Score.
# Output
\ No newline at end of file
diff --git a/sdk/evaluation/azure-ai-evaluation/samples/agent_evaluators/tool_call_accuracy.ipynb b/sdk/evaluation/azure-ai-evaluation/samples/agent_evaluators/tool_call_accuracy.ipynb
index bf8695d2122c..c08365505d6f 100644
--- a/sdk/evaluation/azure-ai-evaluation/samples/agent_evaluators/tool_call_accuracy.ipynb
+++ b/sdk/evaluation/azure-ai-evaluation/samples/agent_evaluators/tool_call_accuracy.ipynb
@@ -13,7 +13,7 @@
"source": [
"### Getting Started\n",
"\n",
- "This sample demonstrates how to use Intent Resolution Evaluator\n",
+ "This sample demonstrates how to use Tool Call Accuracy Evaluator\n",
"Before running the sample:\n",
"```bash\n",
"pip install azure-ai-projects azure-identity azure-ai-evaluation\n",
@@ -39,9 +39,12 @@
"- Parameter value extraction from the conversation\n",
"- Potential usefulness of the tool call\n",
"\n",
- "The evaluator uses a binary scoring system (0 or 1):\n",
- " - Score 0: The tool call is irrelevant or contains information not in the conversation/definition\n",
- " - Score 1: The tool call is relevant with properly extracted parameters from the conversation\n",
+ "The evaluator uses a scoring rubric of 1 to 5:\n",
+ " - Score 1: The tool calls are irrelevant\n",
+ " - Score 2: The tool calls are partially relevant, but not enough tools were called or the parameters were not correctly passed\n",
+ " - Score 3: The tool calls are relevant, but there were unncessary, excessive tool calls made\n",
+ " - Score 4: The tool calls are relevant, but some tools returned errors and agent retried calling them again and succeeded\n",
+ " - Score 5: The tool calls are relevant, and all parameters were correctly passed and no excessive calls were made.\n",
"\n",
"This evaluation focuses on measuring whether tool calls meaningfully contribute to addressing query while properly following tool definitions and using information present in the conversation history."
]
diff --git a/sdk/evaluation/azure-ai-evaluation/tests/unittests/test_agent_evaluators.py b/sdk/evaluation/azure-ai-evaluation/tests/unittests/test_agent_evaluators.py
index 3614fdcf7e14..3b3580817eb5 100644
--- a/sdk/evaluation/azure-ai-evaluation/tests/unittests/test_agent_evaluators.py
+++ b/sdk/evaluation/azure-ai-evaluation/tests/unittests/test_agent_evaluators.py
@@ -9,94 +9,97 @@ class TestEvaluate:
def test_tool_call_accuracy_evaluator_missing_inputs(self, mock_model_config):
tool_call_accuracy = ToolCallAccuracyEvaluator(model_config=mock_model_config)
- # Test tool_calls provided but missing response
- with pytest.raises(EvaluationException) as exc_info:
- tool_call_accuracy(
- query="Where is the Eiffel Tower?",
- response="The Eiffel Tower is in Paris.",
- tool_calls="Test",
- tool_definitions={
+ # Test with missing tool_calls and response
+ result = tool_call_accuracy(
+ query="Where is the Eiffel Tower?",
+ tool_definitions=[
+ {
"name": "fetch_weather",
"description": "Fetches the weather information for the specified location.",
"parameters": {
"type": "object",
"properties": {
- "location": {"type": "string", "description": "The location to fetch weather for."}
+ "location": {
+ "type": "string",
+ "description": "The location to fetch weather for.",
+ }
},
},
- },
- )
+ }
+ ],
+ )
+ assert result[ToolCallAccuracyEvaluator._RESULT_KEY] == ToolCallAccuracyEvaluator._NOT_APPLICABLE_RESULT
+ assert (
+ ToolCallAccuracyEvaluator._NO_TOOL_CALLS_MESSAGE
+ in result[f"{ToolCallAccuracyEvaluator._RESULT_KEY}_reason"]
+ )
# Test with missing tool_definitions
- with pytest.raises(EvaluationException) as exc_info:
- tool_call_accuracy(
- query="Where is the Eiffel Tower?",
- tool_calls={
+ result = tool_call_accuracy(
+ query="Where is the Eiffel Tower?",
+ tool_definitions=[],
+ tool_calls=[
+ {
"type": "tool_call",
- "tool_call": {
- "id": "call_K21dwOxgCN2syn4qjutMVV7Z",
- "type": "function",
- "function": {"name": "fetch_weather", "arguments": {"location": "Tokyo"}},
- },
- },
- )
- assert "Tool definitions must be provided." in str(exc_info.value)
-
- # Test with missing tool_cools
- with pytest.raises(EvaluationException) as exc_info:
- tool_call_accuracy(
- query="Where is the Eiffel Tower?",
- tool_definitions={
"name": "fetch_weather",
- "description": "Fetches the weather information for the specified location.",
- "parameters": {
- "type": "object",
- "properties": {
- "location": {"type": "string", "description": "The location to fetch weather for."}
- },
- },
- },
- )
-
- assert "Either response or tool_calls must be provided." in str(exc_info.value)
+ "arguments": {"location": "Tokyo"},
+ }
+ ],
+ )
+ assert result[ToolCallAccuracyEvaluator._RESULT_KEY] == ToolCallAccuracyEvaluator._NOT_APPLICABLE_RESULT
+ assert (
+ ToolCallAccuracyEvaluator._NO_TOOL_DEFINITIONS_MESSAGE
+ in result[f"{ToolCallAccuracyEvaluator._RESULT_KEY}_reason"]
+ )
- # Test response provided but missing tool_calls
- with pytest.raises(EvaluationException) as exc_info:
- tool_call_accuracy(
- query="Where is the Eiffel Tower?",
- response="The Eiffel Tower is in Paris.",
- tool_definitions={
+ # Test with response that has no tool calls
+ result = tool_call_accuracy(
+ query="Where is the Eiffel Tower?",
+ response="The Eiffel Tower is in Paris.",
+ tool_definitions=[
+ {
"name": "fetch_weather",
"description": "Fetches the weather information for the specified location.",
"parameters": {
"type": "object",
"properties": {
- "location": {"type": "string", "description": "The location to fetch weather for."}
+ "location": {
+ "type": "string",
+ "description": "The location to fetch weather for.",
+ }
},
},
- },
- )
-
- assert "response does not have tool calls. Either provide tool_calls or response with tool calls." in str(
- exc_info.value
+ }
+ ],
+ )
+ assert result[ToolCallAccuracyEvaluator._RESULT_KEY] == ToolCallAccuracyEvaluator._NOT_APPLICABLE_RESULT
+ assert (
+ ToolCallAccuracyEvaluator._NO_TOOL_CALLS_MESSAGE
+ in result[f"{ToolCallAccuracyEvaluator._RESULT_KEY}_reason"]
)
- # Test tool_calls provided but missing response
- with pytest.raises(EvaluationException) as exc_info:
- tool_call_accuracy(
- query="Where is the Eiffel Tower?",
- response="The Eiffel Tower is in Paris.",
- tool_calls="Test",
- tool_definitions={
+ # Test with tool call for which definition is not provided
+ result = tool_call_accuracy(
+ query="Where is the Eiffel Tower?",
+ tool_calls=[{"type": "tool_call", "name": "some_other_tool", "arguments": {}}],
+ tool_definitions=[
+ {
"name": "fetch_weather",
"description": "Fetches the weather information for the specified location.",
"parameters": {
"type": "object",
"properties": {
- "location": {"type": "string", "description": "The location to fetch weather for."}
+ "location": {
+ "type": "string",
+ "description": "The location to fetch weather for.",
+ }
},
},
- },
- )
-
- assert "Tool definition not found" in str(exc_info.value)
+ }
+ ],
+ )
+ assert result[ToolCallAccuracyEvaluator._RESULT_KEY] == ToolCallAccuracyEvaluator._NOT_APPLICABLE_RESULT
+ assert (
+ ToolCallAccuracyEvaluator._TOOL_DEFINITIONS_MISSING_MESSAGE
+ in result[f"{ToolCallAccuracyEvaluator._RESULT_KEY}_reason"]
+ )
diff --git a/sdk/evaluation/azure-ai-evaluation/tests/unittests/test_tool_call_accuracy_evaluator.py b/sdk/evaluation/azure-ai-evaluation/tests/unittests/test_tool_call_accuracy_evaluator.py
index 00ac32ef5442..a82577a96bd1 100644
--- a/sdk/evaluation/azure-ai-evaluation/tests/unittests/test_tool_call_accuracy_evaluator.py
+++ b/sdk/evaluation/azure-ai-evaluation/tests/unittests/test_tool_call_accuracy_evaluator.py
@@ -5,14 +5,39 @@
from azure.ai.evaluation._exceptions import EvaluationException
-# Use tool_call_id convenience to specify whether eval result is good, bad, or invalid
+# This mock should return a dictionary that mimics the output of the prompty (the _flow call),
+# which is then processed by the _do_eval method.
async def flow_side_effect(timeout, **kwargs):
- if "good" in kwargs.get("tool_call").get("tool_call_id"):
- return """Let's think step by step. You're totally right! Tool is the best ever. 1"""
- elif "bad" in kwargs.get("tool_call").get("tool_call_id"):
- return """Let's think step by step. You're wrong! Tool is not good. 0"""
- else:
- return """Let's think Or not. Tool is...who knows. hello"""
+ tool_calls = kwargs.get("tool_calls", [])
+
+ good_calls = sum(1 for tc in tool_calls if "good" in tc.get("tool_call_id", ""))
+ bad_calls = sum(1 for tc in tool_calls if "bad" in tc.get("tool_call_id", ""))
+ invalid_calls = sum(1 for tc in tool_calls if "invalid" in tc.get("tool_call_id", ""))
+ total_calls = len(tool_calls)
+
+ if invalid_calls > 0:
+ # Return a non-numeric score to trigger an exception in the evaluator's check_score_is_valid
+ return {
+ "chain_of_thought": "The tool calls were very correct that I returned a huge number!",
+ "tool_calls_success_level": 25,
+ "additional_details": {},
+ }
+
+ score = 1 # Default score for "all bad"
+ if total_calls > 0:
+ if good_calls == total_calls:
+ score = 5 # All good
+ elif good_calls > 0:
+ score = 3 # Mixed good and bad
+
+ return {
+ "chain_of_thought": f"Evaluated {total_calls} tool calls with {good_calls} correct calls.",
+ "tool_calls_success_level": score,
+ "additional_details": {
+ "tool_calls_made_by_agent": total_calls,
+ "correct_tool_calls_made_by_agent": good_calls,
+ },
+ }
@pytest.mark.usefixtures("mock_model_config")
@@ -22,15 +47,14 @@ def test_evaluate_tools_valid1(self, mock_model_config):
evaluator = ToolCallAccuracyEvaluator(model_config=mock_model_config)
evaluator._flow = MagicMock(side_effect=flow_side_effect)
- # Test evaluation with valid input, one good tool call and one bad
+ # Test evaluation with one good and one bad tool call
query = "Where is the Eiffel Tower?"
- response = "The Eiffel Tower is in Paris."
tool_calls = [
{
"type": "tool_call",
"tool_call_id": "call_good",
"name": "fetch_weather",
- "arguments": {"location": "Tokyo"},
+ "arguments": {"location": "Paris"},
},
{
"type": "tool_call",
@@ -46,7 +70,12 @@ def test_evaluate_tools_valid1(self, mock_model_config):
"description": "Fetches the weather information for the specified location.",
"parameters": {
"type": "object",
- "properties": {"location": {"type": "string", "description": "The location to fetch weather for."}},
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The location to fetch weather for.",
+ }
+ },
},
},
{
@@ -55,40 +84,33 @@ def test_evaluate_tools_valid1(self, mock_model_config):
"description": "Buy a jacket of the given type.",
"parameters": {
"type": "object",
- "properties": {"type": {"type": "string", "description": "The type of jacket to buy."}},
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "The type of jacket to buy.",
+ }
+ },
},
},
]
- result = evaluator(query=query, response=response, tool_calls=tool_calls, tool_definitions=tool_definitions)
+ result = evaluator(query=query, tool_calls=tool_calls, tool_definitions=tool_definitions)
- key = ToolCallAccuracyEvaluator._AGGREGATE_RESULT_KEY
+ key = ToolCallAccuracyEvaluator._RESULT_KEY
assert result is not None
assert key in result and f"{key}_result" in result and f"{key}_threshold" in result
- assert result[key] == 0.5
- assert result[f"{key}_result"] == "fail"
+ assert result[key] == 3.0 # Mixed good/bad gets score 3
+ assert result[f"{key}_result"] == "pass"
assert result[f"{key}_threshold"] == ToolCallAccuracyEvaluator._DEFAULT_TOOL_CALL_ACCURACY_SCORE
- assert "per_tool_call_details" in result
- assert len(result["per_tool_call_details"]) == 2
- for tool_call in result["per_tool_call_details"]:
- assert "tool_call_accurate" in tool_call
- assert "tool_call_accurate_reason" in tool_call
- assert "tool_call_id" in tool_call
- if tool_call["tool_call_id"] == "call_good":
- assert tool_call["tool_call_accurate"] is True
- assert len(tool_call["tool_call_accurate_reason"]) > 0
- elif tool_call["tool_call_id"] == "call_bad":
- assert tool_call["tool_call_accurate"] is False
- assert len(tool_call["tool_call_accurate_reason"]) > 0
- else:
- pytest.fail()
+ assert f"{key}_reason" in result
+ assert result[f"{key}_reason"] == "Evaluated 2 tool calls with 1 correct calls."
+ assert "details" in result
def test_evaluate_tools_valid2(self, mock_model_config):
evaluator = ToolCallAccuracyEvaluator(model_config=mock_model_config)
evaluator._flow = MagicMock(side_effect=flow_side_effect)
- # Test evaluation with valid input, one good tool call and one bad
+ # Test evaluation with two bad tool calls
query = "Where is the Eiffel Tower?"
- response = "The Eiffel Tower is in Paris."
tool_calls = [
{
"type": "tool_call",
@@ -110,7 +132,12 @@ def test_evaluate_tools_valid2(self, mock_model_config):
"description": "Fetches the weather information for the specified location.",
"parameters": {
"type": "object",
- "properties": {"location": {"type": "string", "description": "The location to fetch weather for."}},
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The location to fetch weather for.",
+ }
+ },
},
},
{
@@ -119,52 +146,45 @@ def test_evaluate_tools_valid2(self, mock_model_config):
"description": "Buy a jacket of the given type.",
"parameters": {
"type": "object",
- "properties": {"type": {"type": "string", "description": "The type of jacket to buy."}},
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "The type of jacket to buy.",
+ }
+ },
},
},
]
- result = evaluator(query=query, response=response, tool_calls=tool_calls, tool_definitions=tool_definitions)
+ result = evaluator(query=query, tool_calls=tool_calls, tool_definitions=tool_definitions)
- key = ToolCallAccuracyEvaluator._AGGREGATE_RESULT_KEY
+ key = ToolCallAccuracyEvaluator._RESULT_KEY
assert result is not None
assert key in result and f"{key}_result" in result and f"{key}_threshold" in result
- assert result[key] == 0.0
+ assert result[key] == 1.0 # All bad gets score 1
assert result[f"{key}_result"] == "fail"
assert result[f"{key}_threshold"] == ToolCallAccuracyEvaluator._DEFAULT_TOOL_CALL_ACCURACY_SCORE
- assert "per_tool_call_details" in result
- assert len(result["per_tool_call_details"]) == 2
- for tool_call in result["per_tool_call_details"]:
- assert "tool_call_accurate" in tool_call
- assert "tool_call_accurate_reason" in tool_call
- assert "tool_call_id" in tool_call
- if tool_call["tool_call_id"] == "call_good":
- assert tool_call["tool_call_accurate"] is False
- assert len(tool_call["tool_call_accurate_reason"]) > 0
- elif tool_call["tool_call_id"] == "call_bad":
- assert tool_call["tool_call_accurate"] is False
- assert len(tool_call["tool_call_accurate_reason"]) > 0
- else:
- pytest.fail()
+ assert f"{key}_reason" in result
+ assert result[f"{key}_reason"] == "Evaluated 2 tool calls with 0 correct calls."
+ assert "details" in result
def test_evaluate_tools_valid3(self, mock_model_config):
evaluator = ToolCallAccuracyEvaluator(model_config=mock_model_config)
evaluator._flow = MagicMock(side_effect=flow_side_effect)
- # Test evaluation with valid input, one good tool call and one bad
+ # Test evaluation with two good tool calls
query = "Where is the Eiffel Tower?"
- response = "The Eiffel Tower is in Paris."
tool_calls = [
{
"type": "tool_call",
"tool_call_id": "call_good",
"name": "fetch_weather",
- "arguments": {"location": "Tokyo"},
+ "arguments": {"location": "Paris"},
},
{
"type": "tool_call",
"tool_call_id": "call_good",
"name": "buy_jacket",
- "arguments": {"type": "raincoat"},
+ "arguments": {"type": "jacket"},
},
]
tool_definitions = [
@@ -174,7 +194,12 @@ def test_evaluate_tools_valid3(self, mock_model_config):
"description": "Fetches the weather information for the specified location.",
"parameters": {
"type": "object",
- "properties": {"location": {"type": "string", "description": "The location to fetch weather for."}},
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The location to fetch weather for.",
+ }
+ },
},
},
{
@@ -183,55 +208,41 @@ def test_evaluate_tools_valid3(self, mock_model_config):
"description": "Buy a jacket of the given type.",
"parameters": {
"type": "object",
- "properties": {"type": {"type": "string", "description": "The type of jacket to buy."}},
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "The type of jacket to buy.",
+ }
+ },
},
},
]
- result = evaluator(query=query, response=response, tool_calls=tool_calls, tool_definitions=tool_definitions)
+ result = evaluator(query=query, tool_calls=tool_calls, tool_definitions=tool_definitions)
- key = ToolCallAccuracyEvaluator._AGGREGATE_RESULT_KEY
+ key = ToolCallAccuracyEvaluator._RESULT_KEY
assert result is not None
assert key in result and f"{key}_result" in result and f"{key}_threshold" in result
- assert result[key] == 1.0
+ assert result[key] == 5.0 # All good gets score 5
assert result[f"{key}_result"] == "pass"
assert result[f"{key}_threshold"] == ToolCallAccuracyEvaluator._DEFAULT_TOOL_CALL_ACCURACY_SCORE
- assert "per_tool_call_details" in result
- assert len(result["per_tool_call_details"]) == 2
- for tool_call in result["per_tool_call_details"]:
- assert "tool_call_accurate" in tool_call
- assert "tool_call_accurate_reason" in tool_call
- assert "tool_call_id" in tool_call
- if tool_call["tool_call_id"] == "call_good":
- assert tool_call["tool_call_accurate"] is True
- assert len(tool_call["tool_call_accurate_reason"]) > 0
- elif tool_call["tool_call_id"] == "call_bad":
- assert tool_call["tool_call_accurate"] is True
- assert len(tool_call["tool_call_accurate_reason"]) > 0
- else:
- pytest.fail()
+ assert f"{key}_reason" in result
+ assert result[f"{key}_reason"] == "Evaluated 2 tool calls with 2 correct calls."
+ assert "details" in result
def test_evaluate_tools_one_eval_fails(self, mock_model_config):
with pytest.raises(EvaluationException) as exc_info:
-
evaluator = ToolCallAccuracyEvaluator(model_config=mock_model_config)
evaluator._flow = MagicMock(side_effect=flow_side_effect)
- # Test evaluation with valid input, one good tool call and one bad
+ # Test evaluation with an invalid tool call ID to trigger failure
query = "Where is the Eiffel Tower?"
- response = "The Eiffel Tower is in Paris."
tool_calls = [
{
"type": "tool_call",
- "tool_call_id": "call_good",
+ "tool_call_id": "call_invalid",
"name": "fetch_weather",
"arguments": {"location": "Tokyo"},
},
- {
- "type": "tool_call",
- "tool_call_id": "call_invalid",
- "name": "buy_jacket",
- "arguments": {"type": "raincoat"},
- },
]
tool_definitions = [
{
@@ -241,31 +252,24 @@ def test_evaluate_tools_one_eval_fails(self, mock_model_config):
"parameters": {
"type": "object",
"properties": {
- "location": {"type": "string", "description": "The location to fetch weather for."}
+ "location": {
+ "type": "string",
+ "description": "The location to fetch weather for.",
+ }
},
},
},
- {
- "name": "buy_jacket",
- "type": "function",
- "description": "Buy a jacket of the given type.",
- "parameters": {
- "type": "object",
- "properties": {"type": {"type": "string", "description": "The type of jacket to buy."}},
- },
- },
]
- result = evaluator(query=query, response=response, tool_calls=tool_calls, tool_definitions=tool_definitions)
- # if one tool call evaluation fails, we'll fail the whole thing
- assert "Tool call accuracy evaluator" in str(exc_info.value)
+ evaluator(query=query, tool_calls=tool_calls, tool_definitions=tool_definitions)
+
+ assert "Invalid score value" in str(exc_info.value)
def test_evaluate_tools_some_not_applicable(self, mock_model_config):
evaluator = ToolCallAccuracyEvaluator(model_config=mock_model_config)
evaluator._flow = MagicMock(side_effect=flow_side_effect)
- # Test evaluation with valid input, one good tool call and one bad
+ # Test with one function tool and one non-function tool
query = "Where is the Eiffel Tower?"
- response = "The Eiffel Tower is in Paris."
tool_calls = [
{
"type": "tool_call",
@@ -287,49 +291,45 @@ def test_evaluate_tools_some_not_applicable(self, mock_model_config):
"description": "Fetches the weather information for the specified location.",
"parameters": {
"type": "object",
- "properties": {"location": {"type": "string", "description": "The location to fetch weather for."}},
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The location to fetch weather for.",
+ }
+ },
},
},
{
"name": "buy_jacket",
- "type": "another_built_in",
+ "type": "another_built_in", # This tool will be filtered out
"description": "Buy a jacket of the given type.",
"parameters": {
"type": "object",
- "properties": {"type": {"type": "string", "description": "The type of jacket to buy."}},
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "The type of jacket to buy.",
+ }
+ },
},
},
]
- result = evaluator(query=query, response=response, tool_calls=tool_calls, tool_definitions=tool_definitions)
+ result = evaluator(query=query, tool_calls=tool_calls, tool_definitions=tool_definitions)
- key = ToolCallAccuracyEvaluator._AGGREGATE_RESULT_KEY
+ key = ToolCallAccuracyEvaluator._RESULT_KEY
assert result is not None
- assert key in result and f"{key}_result" in result and f"{key}_threshold" in result
- assert result[key] == 1.0
+ assert result[key] == ToolCallAccuracyEvaluator._NOT_APPLICABLE_RESULT
assert result[f"{key}_result"] == "pass"
assert result[f"{key}_threshold"] == ToolCallAccuracyEvaluator._DEFAULT_TOOL_CALL_ACCURACY_SCORE
- assert "per_tool_call_details" in result
- assert len(result["per_tool_call_details"]) == 2
- for tool_call in result["per_tool_call_details"]:
- assert "tool_call_accurate" in tool_call
- assert "tool_call_accurate_reason" in tool_call
- assert "tool_call_id" in tool_call
- if tool_call["tool_call_id"] == "call_good":
- assert tool_call["tool_call_accurate"] is True
- assert len(tool_call["tool_call_accurate_reason"]) > 0
- elif tool_call["tool_call_id"] == "call_bad":
- assert tool_call["tool_call_accurate"] == "not applicable"
- assert tool_call["tool_call_accurate_reason"] == "Tool call not supported for evaluation"
- else:
- pytest.fail()
+ assert result[f"{key}_reason"] == ToolCallAccuracyEvaluator._TOOL_DEFINITIONS_MISSING_MESSAGE
+ assert result["details"] == {}
def test_evaluate_tools_all_not_applicable(self, mock_model_config):
evaluator = ToolCallAccuracyEvaluator(model_config=mock_model_config)
evaluator._flow = MagicMock(side_effect=flow_side_effect)
- # Test evaluation with valid input, one good tool call and one bad
+ # Test with only non-function tools
query = "Where is the Eiffel Tower?"
- response = "The Eiffel Tower is in Paris."
tool_calls = [
{
"type": "tool_call",
@@ -337,52 +337,39 @@ def test_evaluate_tools_all_not_applicable(self, mock_model_config):
"name": "fetch_weather",
"arguments": {"location": "Tokyo"},
},
- {
- "type": "tool_call",
- "tool_call_id": "call_good",
- "name": "buy_jacket",
- "arguments": {"type": "raincoat"},
- },
]
tool_definitions = [
{
"name": "fetch_weather",
- "type": "some_built_in",
+ "type": "some_built_in", # Not a 'function' type
"description": "Fetches the weather information for the specified location.",
"parameters": {
"type": "object",
- "properties": {"location": {"type": "string", "description": "The location to fetch weather for."}},
- },
- },
- {
- "name": "buy_jacket",
- "type": "another_built_in",
- "description": "Buy a jacket of the given type.",
- "parameters": {
- "type": "object",
- "properties": {"type": {"type": "string", "description": "The type of jacket to buy."}},
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The location to fetch weather for.",
+ }
+ },
},
},
]
- result = evaluator(query=query, response=response, tool_calls=tool_calls, tool_definitions=tool_definitions)
+ result = evaluator(query=query, tool_calls=tool_calls, tool_definitions=tool_definitions)
- key = ToolCallAccuracyEvaluator._AGGREGATE_RESULT_KEY
+ key = ToolCallAccuracyEvaluator._RESULT_KEY
assert result is not None
- assert key in result and f"{key}_result" in result and f"{key}_threshold" in result
- assert result[key] == "not applicable"
- assert result[f"{key}_result"] == "not applicable"
+ assert result[key] == ToolCallAccuracyEvaluator._NOT_APPLICABLE_RESULT
+ assert result[f"{key}_result"] == "pass"
assert result[f"{key}_threshold"] == ToolCallAccuracyEvaluator._DEFAULT_TOOL_CALL_ACCURACY_SCORE
- assert "per_tool_call_details" in result
- assert len(result["per_tool_call_details"]) == 0
- assert result[f"{key}_reason"] == "Tool call accuracy evaluation is not yet supported for the invoked tools."
+ assert result[f"{key}_reason"] == ToolCallAccuracyEvaluator._TOOL_DEFINITIONS_MISSING_MESSAGE
+ assert result["details"] == {}
def test_evaluate_tools_no_tools(self, mock_model_config):
evaluator = ToolCallAccuracyEvaluator(model_config=mock_model_config)
evaluator._flow = MagicMock(side_effect=flow_side_effect)
- # Test evaluation with valid input, one good tool call and one bad
+ # Test with no tool calls provided
query = "Where is the Eiffel Tower?"
- response = "The Eiffel Tower is in Paris."
tool_calls = []
tool_definitions = [
{
@@ -391,27 +378,21 @@ def test_evaluate_tools_no_tools(self, mock_model_config):
"description": "Fetches the weather information for the specified location.",
"parameters": {
"type": "object",
- "properties": {"location": {"type": "string", "description": "The location to fetch weather for."}},
- },
- },
- {
- "name": "buy_jacket",
- "type": "another_built_in",
- "description": "Buy a jacket of the given type.",
- "parameters": {
- "type": "object",
- "properties": {"type": {"type": "string", "description": "The type of jacket to buy."}},
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The location to fetch weather for.",
+ }
+ },
},
},
]
- result = evaluator(query=query, response=response, tool_calls=tool_calls, tool_definitions=tool_definitions)
+ result = evaluator(query=query, tool_calls=tool_calls, tool_definitions=tool_definitions)
- key = ToolCallAccuracyEvaluator._AGGREGATE_RESULT_KEY
+ key = ToolCallAccuracyEvaluator._RESULT_KEY
assert result is not None
- assert key in result and f"{key}_result" in result and f"{key}_threshold" in result
- assert result[key] == "not applicable"
- assert result[f"{key}_result"] == "not applicable"
+ assert result[key] == ToolCallAccuracyEvaluator._NOT_APPLICABLE_RESULT
+ assert result[f"{key}_result"] == "pass"
assert result[f"{key}_threshold"] == ToolCallAccuracyEvaluator._DEFAULT_TOOL_CALL_ACCURACY_SCORE
- assert "per_tool_call_details" in result
- assert len(result["per_tool_call_details"]) == 0
- assert result[f"{key}_reason"] == "No tool calls were made."
+ assert result[f"{key}_reason"] == ToolCallAccuracyEvaluator._NO_TOOL_CALLS_MESSAGE
+ assert result["details"] == {}