Skip to content

Commit 9f69f37

Browse files
committed
ODSC-61986: Get evaluation shapes list form the service config.
1 parent e5710ba commit 9f69f37

File tree

10 files changed

+150
-117
lines changed

10 files changed

+150
-117
lines changed

ads/aqua/config/config.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
DEFAULT_EVALUATION_CONTAINER = "odsc-llm-evaluate"
1616

1717

18-
@cached(cache=TTLCache(maxsize=1, ttl=timedelta(hours=5), timer=datetime.now))
18+
@cached(cache=TTLCache(maxsize=1, ttl=timedelta(hours=1), timer=datetime.now))
1919
def evaluation_service_config(
2020
container: Optional[str] = DEFAULT_EVALUATION_CONTAINER,
2121
) -> EvaluationServiceConfig:
@@ -27,6 +27,7 @@ def evaluation_service_config(
2727
EvaluationServiceConfig: The evaluation common config.
2828
"""
2929

30+
container = container or DEFAULT_EVALUATION_CONTAINER
3031
return EvaluationServiceConfig(
3132
**get_container_config()
3233
.get(ContainerSpec.CONTAINER_SPEC, {})

ads/aqua/config/evaluation/evaluation_service_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ class EvaluationServiceConfig(Serializable):
250250
"""
251251

252252
version: Optional[str] = "1.0"
253-
kind: Optional[str] = "evaluation"
253+
kind: Optional[str] = "evaluation_service_config"
254254
report_params: Optional[ReportParams] = Field(default_factory=ReportParams)
255255
inference_params: Optional[InferenceParamsConfig] = Field(
256256
default_factory=InferenceParamsConfig

ads/aqua/evaluation/evaluation.py

Lines changed: 33 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from datetime import datetime, timedelta
1212
from pathlib import Path
1313
from threading import Lock
14-
from typing import Any, Dict, List, Union
14+
from typing import Any, Dict, List, Optional, Union
1515

1616
import oci
1717
from cachetools import TTLCache
@@ -45,6 +45,7 @@
4545
is_valid_ocid,
4646
upload_local_to_os,
4747
)
48+
from ads.aqua.config.config import evaluation_service_config
4849
from ads.aqua.constants import (
4950
CONSOLE_LINK_RESOURCE_TYPE_MAPPING,
5051
EVALUATION_REPORT,
@@ -171,7 +172,7 @@ def create(
171172
"Specify either a model or model deployment id."
172173
)
173174
evaluation_source = None
174-
eval_inference_configuration = None
175+
eval_inference_configuration: Dict = {}
175176
if (
176177
DataScienceResource.MODEL_DEPLOYMENT
177178
in create_aqua_evaluation_details.evaluation_source_id
@@ -187,17 +188,26 @@ def create(
187188
runtime = ModelDeploymentContainerRuntime.from_dict(
188189
evaluation_source.runtime.to_dict()
189190
)
190-
inference_config = AquaContainerConfig.from_container_index_json(
191+
container_config = AquaContainerConfig.from_container_index_json(
191192
enable_spec=True
192-
).inference
193-
for container in inference_config.values():
194-
if container.name == runtime.image[:runtime.image.rfind(":")]:
193+
)
194+
for (
195+
inference_container_family,
196+
inference_container_info,
197+
) in container_config.inference.items():
198+
if (
199+
inference_container_info.name
200+
== runtime.image[: runtime.image.rfind(":")]
201+
):
195202
eval_inference_configuration = (
196-
container.spec.evaluation_configuration
203+
evaluation_service_config()
204+
.get_merged_inference_params(inference_container_family)
205+
.to_dict()
197206
)
207+
198208
except Exception:
199209
logger.debug(
200-
f"Could not load inference config details for the evaluation id: "
210+
f"Could not load inference config details for the evaluation source id: "
201211
f"{create_aqua_evaluation_details.evaluation_source_id}. Please check if the container"
202212
f" runtime has the correct SMC image information."
203213
)
@@ -416,9 +426,7 @@ def create(
416426
report_path=create_aqua_evaluation_details.report_path,
417427
model_parameters=create_aqua_evaluation_details.model_parameters,
418428
metrics=create_aqua_evaluation_details.metrics,
419-
inference_configuration=eval_inference_configuration.to_filtered_dict()
420-
if eval_inference_configuration
421-
else {},
429+
inference_configuration=eval_inference_configuration or {},
422430
)
423431
).create(**kwargs) ## TODO: decide what parameters will be needed
424432
logger.debug(
@@ -1225,45 +1233,24 @@ def _delete_job_and_model(job, model):
12251233
f"Exception message: {ex}"
12261234
)
12271235

1228-
def load_evaluation_config(self, eval_id):
1236+
def load_evaluation_config(self, container: Optional[str] = None) -> Dict:
12291237
"""Loads evaluation config."""
1238+
1239+
# retrieve the evaluation config by container family name
1240+
evaluation_config = evaluation_service_config(container)
1241+
1242+
# convert the new config representation to the old one
12301243
return {
1231-
"model_params": {
1232-
"max_tokens": 500,
1233-
"temperature": 0.7,
1234-
"top_p": 1.0,
1235-
"top_k": 50,
1236-
"presence_penalty": 0.0,
1237-
"frequency_penalty": 0.0,
1238-
"stop": [],
1239-
},
1244+
"model_params": evaluation_config.ui_config.model_params.default,
12401245
"shape": {
1241-
"VM.Standard.E3.Flex": {
1242-
"ocpu": 8,
1243-
"memory_in_gbs": 128,
1244-
"block_storage_size": 200,
1245-
},
1246-
"VM.Standard.E4.Flex": {
1247-
"ocpu": 8,
1248-
"memory_in_gbs": 128,
1249-
"block_storage_size": 200,
1250-
},
1251-
"VM.Standard3.Flex": {
1252-
"ocpu": 8,
1253-
"memory_in_gbs": 128,
1254-
"block_storage_size": 200,
1255-
},
1256-
"VM.Optimized3.Flex": {
1257-
"ocpu": 8,
1258-
"memory_in_gbs": 128,
1259-
"block_storage_size": 200,
1260-
},
1261-
},
1262-
"default": {
1263-
"ocpu": 8,
1264-
"memory_in_gbs": 128,
1265-
"block_storage_size": 200,
1246+
shape.name: shape.to_dict()
1247+
for shape in evaluation_config.ui_config.shapes
12661248
},
1249+
"default": (
1250+
evaluation_config.ui_config.shapes[0].to_dict()
1251+
if len(evaluation_config.ui_config.shapes) > 0
1252+
else {}
1253+
),
12671254
}
12681255

12691256
def _get_attribute_from_model_metadata(

ads/aqua/extension/evaluation_handler.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
# Copyright (c) 2024 Oracle and/or its affiliates.
33
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
44

5+
from typing import Optional
56
from urllib.parse import urlparse
67

78
from tornado.web import HTTPError
@@ -30,7 +31,7 @@ def get(self, eval_id=""):
3031
return self.read(eval_id)
3132

3233
@handle_exceptions
33-
def post(self, *args, **kwargs):
34+
def post(self, *args, **kwargs): # noqa
3435
"""Handles post request for the evaluation APIs
3536
3637
Raises
@@ -117,10 +118,10 @@ class AquaEvaluationConfigHandler(AquaAPIhandler):
117118
"""Handler for Aqua Evaluation Config REST APIs."""
118119

119120
@handle_exceptions
120-
def get(self, model_id):
121+
def get(self, container: Optional[str] = None, **kwargs): # noqa
121122
"""Handle GET request."""
122123

123-
return self.finish(AquaEvaluationApp().load_evaluation_config(model_id))
124+
return self.finish(AquaEvaluationApp().load_evaluation_config(container))
124125

125126

126127
__handlers__ = [

ads/aqua/ui.py

Lines changed: 27 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,6 @@ class AquaContainerConfigSpec(DataClassSerializable):
8484
health_check_port: str = None
8585
env_vars: List[dict] = None
8686
restricted_params: List[str] = None
87-
evaluation_configuration: AquaContainerEvaluationConfig = field(
88-
default_factory=AquaContainerEvaluationConfig
89-
)
9087

9188

9289
@dataclass(repr=False)
@@ -184,32 +181,37 @@ def from_container_index_json(
184181
family=container_type,
185182
platforms=platforms,
186183
model_formats=model_formats,
187-
spec=AquaContainerConfigSpec(
188-
cli_param=container_spec.get(ContainerSpec.CLI_PARM, ""),
189-
server_port=container_spec.get(
190-
ContainerSpec.SERVER_PORT, ""
191-
),
192-
health_check_port=container_spec.get(
193-
ContainerSpec.HEALTH_CHECK_PORT, ""
194-
),
195-
env_vars=container_spec.get(ContainerSpec.ENV_VARS, []),
196-
restricted_params=container_spec.get(
197-
ContainerSpec.RESTRICTED_PARAMS, []
198-
),
199-
evaluation_configuration=AquaContainerEvaluationConfig.from_config(
200-
container_spec.get(
201-
ContainerSpec.EVALUATION_CONFIGURATION, {}
202-
)
203-
),
204-
)
205-
if container_spec
206-
else None,
184+
spec=(
185+
AquaContainerConfigSpec(
186+
cli_param=container_spec.get(
187+
ContainerSpec.CLI_PARM, ""
188+
),
189+
server_port=container_spec.get(
190+
ContainerSpec.SERVER_PORT, ""
191+
),
192+
health_check_port=container_spec.get(
193+
ContainerSpec.HEALTH_CHECK_PORT, ""
194+
),
195+
env_vars=container_spec.get(ContainerSpec.ENV_VARS, []),
196+
restricted_params=container_spec.get(
197+
ContainerSpec.RESTRICTED_PARAMS, []
198+
),
199+
)
200+
if container_spec
201+
else None
202+
),
207203
)
208204
if container.get("type") == "inference":
209205
inference_items[container_type] = container_item
210-
elif container_type == "odsc-llm-fine-tuning":
206+
elif (
207+
container.get("type") == "fine-tune"
208+
or container_type == "odsc-llm-fine-tuning"
209+
):
211210
finetune_items[container_type] = container_item
212-
elif container_type == "odsc-llm-evaluate":
211+
elif (
212+
container.get("type") == "evaluate"
213+
or container_type == "odsc-llm-evaluate"
214+
):
213215
evaluate_items[container_type] = container_item
214216

215217
return AquaContainerConfig(

tests/unitary/with_extras/aqua/test_data/config/evaluation_config.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@
102102
"inference_timeout": 120
103103
}
104104
},
105-
"kind": "evaluation",
105+
"kind": "evaluation_service_config",
106106
"report_params": {
107107
"default": {}
108108
},

tests/unitary/with_extras/aqua/test_data/config/evaluation_config_with_default_params.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
"inference_timeout": 120
1515
}
1616
},
17-
"kind": "evaluation",
17+
"kind": "evaluation_service_config",
1818
"report_params": {
1919
"default": {}
2020
},

tests/unitary/with_extras/aqua/test_evaluation.py

Lines changed: 81 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,12 @@
2222
AquaMissingKeyError,
2323
AquaRuntimeError,
2424
)
25+
from ads.aqua.config.evaluation.evaluation_service_config import (
26+
EvaluationServiceConfig,
27+
ModelParamsConfig,
28+
ShapeConfig,
29+
UIConfig,
30+
)
2531
from ads.aqua.constants import EVALUATION_REPORT_JSON, EVALUATION_REPORT_MD, UNKNOWN
2632
from ads.aqua.evaluation import AquaEvaluationApp
2733
from ads.aqua.evaluation.entities import (
@@ -887,16 +893,85 @@ def test_get_supported_metrics(self):
887893
for metric in response:
888894
assert check(metric_schema, metric)
889895

890-
def test_load_evaluation_config(self):
891-
"""Tests loading default config for evaluation.
896+
@patch("ads.aqua.evaluation.evaluation.evaluation_service_config")
897+
def test_load_evaluation_config(self, mock_evaluation_service_config):
898+
"""
899+
Tests loading default config for evaluation.
892900
This method currently hardcoded the return value.
893901
"""
894-
from .utils import EvaluationConfigFormat as config_schema
895-
from .utils import check
896902

897-
response = self.app.load_evaluation_config(eval_id=TestDataset.EVAL_ID)
903+
test_evaluation_service_config = EvaluationServiceConfig(
904+
ui_config=UIConfig(
905+
model_params=ModelParamsConfig(
906+
**{
907+
"default": {
908+
"model": "odsc-llm",
909+
"max_tokens": 500,
910+
"temperature": 0.7,
911+
"top_p": 0.9,
912+
"top_k": 50,
913+
"presence_penalty": 0.0,
914+
"frequency_penalty": 0.0,
915+
"stop": [],
916+
}
917+
}
918+
),
919+
shapes=[
920+
ShapeConfig(
921+
**{
922+
"name": "VM.Standard.E3.Flex",
923+
"ocpu": 8,
924+
"memory_in_gbs": 128,
925+
"block_storage_size": 200,
926+
"filter": {
927+
"evaluation_container": ["odsc-llm-evaluate"],
928+
"evaluation_target": ["datasciencemodeldeployment"],
929+
},
930+
}
931+
)
932+
],
933+
)
934+
)
935+
mock_evaluation_service_config.return_value = test_evaluation_service_config
936+
937+
expected_result = {
938+
"model_params": {
939+
"model": "odsc-llm",
940+
"max_tokens": 500,
941+
"temperature": 0.7,
942+
"top_p": 0.9,
943+
"top_k": 50,
944+
"presence_penalty": 0.0,
945+
"frequency_penalty": 0.0,
946+
"stop": [],
947+
},
948+
"shape": {
949+
"VM.Standard.E3.Flex": {
950+
"name": "VM.Standard.E3.Flex",
951+
"ocpu": 8,
952+
"memory_in_gbs": 128,
953+
"block_storage_size": 200,
954+
"filter": {
955+
"evaluation_container": ["odsc-llm-evaluate"],
956+
"evaluation_target": ["datasciencemodeldeployment"],
957+
},
958+
}
959+
},
960+
"default": {
961+
"name": "VM.Standard.E3.Flex",
962+
"ocpu": 8,
963+
"memory_in_gbs": 128,
964+
"block_storage_size": 200,
965+
"filter": {
966+
"evaluation_container": ["odsc-llm-evaluate"],
967+
"evaluation_target": ["datasciencemodeldeployment"],
968+
},
969+
},
970+
}
971+
972+
response = self.app.load_evaluation_config()
898973
assert isinstance(response, dict)
899-
assert check(config_schema, response)
974+
assert response == expected_result
900975

901976

902977
class TestAquaEvaluationList(unittest.TestCase):

0 commit comments

Comments
 (0)