Skip to content

Commit 44814b4

Browse files
committed
Adjusts the evaluation metrics config.
1 parent ac541e4 commit 44814b4

File tree

2 files changed

+20
-15
lines changed

2 files changed

+20
-15
lines changed

ads/aqua/evaluation/evaluation.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -904,7 +904,9 @@ def get_status(self, eval_id: str) -> dict:
904904

905905
def get_supported_metrics(self) -> dict:
906906
"""Gets a list of supported metrics for evaluation."""
907-
return [item.to_dict() for item in evaluation_service_config().metrics]
907+
return [
908+
item.to_dict() for item in evaluation_service_config().ui_config.metrics
909+
]
908910

909911
@telemetry(entry_point="plugin=evaluation&action=load_metrics", name="aqua")
910912
def load_metrics(self, eval_id: str) -> AquaEvalMetrics:

tests/unitary/with_extras/aqua/test_evaluation.py

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from ads.aqua.config.evaluation.evaluation_service_config import (
2626
EvaluationServiceConfig,
2727
MetricConfig,
28+
UIConfig,
2829
)
2930
from ads.aqua.constants import EVALUATION_REPORT_JSON, EVALUATION_REPORT_MD, UNKNOWN
3031
from ads.aqua.evaluation import AquaEvaluationApp
@@ -886,25 +887,27 @@ def test_get_supported_metrics(self, mock_evaluation_service_config):
886887
"""
887888

888889
test_evaluation_service_config = EvaluationServiceConfig(
889-
metrics=[
890-
MetricConfig(
891-
**{
892-
"args": {},
893-
"description": "BERT Score.",
894-
"key": "bertscore",
895-
"name": "BERT Score",
896-
"tags": [],
897-
"task": ["text-generation"],
898-
},
899-
)
900-
]
890+
ui_config=UIConfig(
891+
metrics=[
892+
MetricConfig(
893+
**{
894+
"args": {},
895+
"description": "BERT Score.",
896+
"key": "bertscore",
897+
"name": "BERT Score",
898+
"tags": [],
899+
"task": ["text-generation"],
900+
},
901+
)
902+
]
903+
)
901904
)
902905
mock_evaluation_service_config.return_value = test_evaluation_service_config
903906
response = self.app.get_supported_metrics()
904907
assert isinstance(response, list)
905-
assert len(response) == len(test_evaluation_service_config.metrics)
908+
assert len(response) == len(test_evaluation_service_config.ui_config.metrics)
906909
assert response == [
907-
item.to_dict() for item in test_evaluation_service_config.metrics
910+
item.to_dict() for item in test_evaluation_service_config.ui_config.metrics
908911
]
909912

910913
def test_load_evaluation_config(self):

0 commit comments

Comments
 (0)