Skip to content

Commit 4a6bb03

Browse files
committed
bugfix/aqua.evaluation (#761)
1 parent 389006a commit 4a6bb03

File tree

2 files changed

+16
-29
lines changed

2 files changed

+16
-29
lines changed

ads/aqua/evaluation.py

Lines changed: 12 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -833,13 +833,13 @@ def get(self, eval_id) -> AquaEvaluationDetail:
833833
logger.info(f"Fetching evaluation: {eval_id} details ...")
834834

835835
resource = utils.query_resource(eval_id)
836-
model_provenance = self.ds_client.get_model_provenance(eval_id).data
837-
838836
if not resource:
839837
raise AquaRuntimeError(
840838
f"Failed to retrieve evalution {eval_id}."
841839
"Please check if the OCID is correct."
842840
)
841+
model_provenance = self.ds_client.get_model_provenance(eval_id).data
842+
843843
jobrun_id = model_provenance.training_id
844844
job_run_details = self._fetch_jobrun(
845845
resource, use_rqs=False, jobrun_id=jobrun_id
@@ -1038,14 +1038,14 @@ def get_status(self, eval_id: str) -> dict:
10381038
"""
10391039
eval = utils.query_resource(eval_id)
10401040

1041-
# TODO: add job_run_id as input param to skip the query below
1042-
model_provenance = self.ds_client.get_model_provenance(eval_id).data
1043-
10441041
if not eval:
10451042
raise AquaRuntimeError(
10461043
f"Failed to retrieve evalution {eval_id}."
10471044
"Please check if the OCID is correct."
10481045
)
1046+
1047+
model_provenance = self.ds_client.get_model_provenance(eval_id).data
1048+
10491049
jobrun_id = model_provenance.training_id
10501050
job_run_details = self._fetch_jobrun(eval, use_rqs=False, jobrun_id=jobrun_id)
10511051

@@ -1295,7 +1295,10 @@ def cancel(self, eval_id) -> dict:
12951295
raise AquaRuntimeError(
12961296
f"Failed to get evaluation details for model {eval_id}"
12971297
)
1298-
job_run_id = model.provenance_metadata.training_id
1298+
1299+
job_run_id = (
1300+
model.provenance_metadata.training_id if model.provenance_metadata else None
1301+
)
12991302
if not job_run_id:
13001303
raise AquaMissingKeyError(
13011304
"Model provenance is missing job run training_id key"
@@ -1358,7 +1361,7 @@ def delete(self, eval_id):
13581361
job_id = model.custom_metadata_list.get(
13591362
EvaluationCustomMetadata.EVALUATION_JOB_ID.value
13601363
).value
1361-
except ValueError:
1364+
except Exception:
13621365
raise AquaMissingKeyError(
13631366
f"Custom metadata is missing {EvaluationCustomMetadata.EVALUATION_JOB_ID.value} key"
13641367
)
@@ -1390,7 +1393,7 @@ def _delete_job_and_model(job, model):
13901393
)
13911394

13921395
def load_evaluation_config(self, eval_id):
1393-
# TODO
1396+
"""Loads evaluation config."""
13941397
return {
13951398
"model_params": {
13961399
"max_tokens": 500,
@@ -1568,20 +1571,6 @@ def _build_resource_identifier(
15681571
)
15691572
return AquaResourceIdentifier()
15701573

1571-
def _get_jobrun(
1572-
self, model: oci.resource_search.models.ResourceSummary, mapping: dict = {}
1573-
) -> Union[
1574-
oci.resource_search.models.ResourceSummary, oci.data_science.models.JobRun
1575-
]:
1576-
jobrun_id = self._get_attribute_from_model_metadata(
1577-
model, EvaluationCustomMetadata.EVALUATION_JOB_RUN_ID.value
1578-
)
1579-
job_run = mapping.get(jobrun_id)
1580-
1581-
if not job_run:
1582-
job_run = self._fetch_jobrun(model, use_rqs=True, jobrun_id=jobrun_id)
1583-
return job_run
1584-
15851574
def _fetch_jobrun(
15861575
self,
15871576
resource: oci.resource_search.models.ResourceSummary,
@@ -1758,7 +1747,7 @@ def _extract_job_lifecycle_details(self, lifecycle_details: str) -> str:
17581747
Examples
17591748
--------
17601749
>>> _extract_job_lifecycle_details("Job run artifact execution failed with exit code 16")
1761-
'The evaluation configuration is invalid due to content validation errors.'
1750+
'Validation errors in the evaluation config. Exit code: 16.'
17621751
17631752
>>> _extract_job_lifecycle_details("Job completed successfully.")
17641753
'Job completed successfully.'

tests/unitary/with_extras/aqua/test_evaluation.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -569,7 +569,6 @@ def test_get(self, mock_get_model_provenance_response, mock_get_job_run_response
569569
def test_get_fail(self, mock_query_resource):
570570
"""Tests get evaluation details failed because of invalid eval id."""
571571
mock_query_resource.return_value = None
572-
self.app.ds_client.get_model_provenance = MagicMock()
573572
with self.assertRaises(AquaRuntimeError) as context:
574573
self.app.get(TestDataset.INVALID_EVAL_ID)
575574

@@ -677,10 +676,10 @@ def test_cancel_evaluation(
677676
@parameterized.expand(
678677
[
679678
(None, AquaRuntimeError),
680-
# (
681-
# DataScienceModel(),
682-
# AquaMissingKeyError,
683-
# ),
679+
(
680+
DataScienceModel(),
681+
AquaMissingKeyError,
682+
),
684683
]
685684
)
686685
@patch.object(DataScienceModel, "from_id")
@@ -822,7 +821,6 @@ def test_get_status_when_missing_jobrun(
822821
def test_get_status_failed(self, mock_query_resource):
823822
"""Tests when no correct evaluation found."""
824823
mock_query_resource.return_value = None
825-
self.app.ds_client.get_model_provenance = MagicMock()
826824
with self.assertRaises(AquaRuntimeError) as context:
827825
self.app.get_status(TestDataset.INVALID_EVAL_ID)
828826

0 commit comments

Comments
 (0)