Skip to content

Commit f05379b

Browse files
committed
Merge branch 'feature/aquav1.0.1' of https://github.com/oracle/accelerated-data-science into ODSC-55768/update_error
2 parents 1d778e6 + 4a6bb03 commit f05379b

File tree

8 files changed

+42
-35
lines changed

8 files changed

+42
-35
lines changed

.github/workflows/run-unittests-default_setup.yml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,12 @@ on:
44
workflow_dispatch:
55
pull_request:
66
branches: [ "main" ]
7+
paths:
8+
- "ads/**"
9+
- "!ads/opctl/operator/**"
10+
- "!ads/feature_store/**"
11+
- "pyproject.toml"
12+
713

814
# Cancel in progress workflows on pull_requests.
915
# https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value

.github/workflows/run-unittests-py38-cov-report.yml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,11 @@ on:
44
workflow_dispatch:
55
pull_request:
66
branches: [ "main" ]
7+
paths:
8+
- "ads/**"
9+
- "!ads/opctl/operator/**"
10+
- "!ads/feature_store/**"
11+
- "pyproject.toml"
712

813
# Cancel in progress workflows on pull_requests.
914
# https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value

.github/workflows/run-unittests-py39-py310.yml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,12 @@ name: "[Py3.9][Py3.10] - tests/unitary/**"
33
on:
44
workflow_dispatch:
55
pull_request:
6+
branches: [ "main" ]
7+
paths:
8+
- "ads/**"
9+
- "!ads/opctl/operator/**"
10+
- "!ads/feature_store/**"
11+
- "pyproject.toml"
612

713
# Cancel in progress workflows on pull_requests.
814
# https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value

ads/aqua/evaluation.py

Lines changed: 12 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -833,13 +833,13 @@ def get(self, eval_id) -> AquaEvaluationDetail:
833833
logger.info(f"Fetching evaluation: {eval_id} details ...")
834834

835835
resource = utils.query_resource(eval_id)
836-
model_provenance = self.ds_client.get_model_provenance(eval_id).data
837-
838836
if not resource:
839837
raise AquaRuntimeError(
840838
f"Failed to retrieve evalution {eval_id}."
841839
"Please check if the OCID is correct."
842840
)
841+
model_provenance = self.ds_client.get_model_provenance(eval_id).data
842+
843843
jobrun_id = model_provenance.training_id
844844
job_run_details = self._fetch_jobrun(
845845
resource, use_rqs=False, jobrun_id=jobrun_id
@@ -1038,14 +1038,14 @@ def get_status(self, eval_id: str) -> dict:
10381038
"""
10391039
eval = utils.query_resource(eval_id)
10401040

1041-
# TODO: add job_run_id as input param to skip the query below
1042-
model_provenance = self.ds_client.get_model_provenance(eval_id).data
1043-
10441041
if not eval:
10451042
raise AquaRuntimeError(
10461043
f"Failed to retrieve evalution {eval_id}."
10471044
"Please check if the OCID is correct."
10481045
)
1046+
1047+
model_provenance = self.ds_client.get_model_provenance(eval_id).data
1048+
10491049
jobrun_id = model_provenance.training_id
10501050
job_run_details = self._fetch_jobrun(eval, use_rqs=False, jobrun_id=jobrun_id)
10511051

@@ -1295,7 +1295,10 @@ def cancel(self, eval_id) -> dict:
12951295
raise AquaRuntimeError(
12961296
f"Failed to get evaluation details for model {eval_id}"
12971297
)
1298-
job_run_id = model.provenance_metadata.training_id
1298+
1299+
job_run_id = (
1300+
model.provenance_metadata.training_id if model.provenance_metadata else None
1301+
)
12991302
if not job_run_id:
13001303
raise AquaMissingKeyError(
13011304
"Model provenance is missing job run training_id key"
@@ -1358,7 +1361,7 @@ def delete(self, eval_id):
13581361
job_id = model.custom_metadata_list.get(
13591362
EvaluationCustomMetadata.EVALUATION_JOB_ID.value
13601363
).value
1361-
except ValueError:
1364+
except Exception:
13621365
raise AquaMissingKeyError(
13631366
f"Custom metadata is missing {EvaluationCustomMetadata.EVALUATION_JOB_ID.value} key"
13641367
)
@@ -1390,7 +1393,7 @@ def _delete_job_and_model(job, model):
13901393
)
13911394

13921395
def load_evaluation_config(self, eval_id):
1393-
# TODO
1396+
"""Loads evaluation config."""
13941397
return {
13951398
"model_params": {
13961399
"max_tokens": 500,
@@ -1568,20 +1571,6 @@ def _build_resource_identifier(
15681571
)
15691572
return AquaResourceIdentifier()
15701573

1571-
def _get_jobrun(
1572-
self, model: oci.resource_search.models.ResourceSummary, mapping: dict = {}
1573-
) -> Union[
1574-
oci.resource_search.models.ResourceSummary, oci.data_science.models.JobRun
1575-
]:
1576-
jobrun_id = self._get_attribute_from_model_metadata(
1577-
model, EvaluationCustomMetadata.EVALUATION_JOB_RUN_ID.value
1578-
)
1579-
job_run = mapping.get(jobrun_id)
1580-
1581-
if not job_run:
1582-
job_run = self._fetch_jobrun(model, use_rqs=True, jobrun_id=jobrun_id)
1583-
return job_run
1584-
15851574
def _fetch_jobrun(
15861575
self,
15871576
resource: oci.resource_search.models.ResourceSummary,
@@ -1758,7 +1747,7 @@ def _extract_job_lifecycle_details(self, lifecycle_details: str) -> str:
17581747
Examples
17591748
--------
17601749
>>> _extract_job_lifecycle_details("Job run artifact execution failed with exit code 16")
1761-
'The evaluation configuration is invalid due to content validation errors.'
1750+
'Validation errors in the evaluation config. Exit code: 16.'
17621751
17631752
>>> _extract_job_lifecycle_details("Job completed successfully.")
17641753
'Job completed successfully.'

ads/aqua/extension/common_handler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,10 @@
77
from importlib import metadata
88

99
from ads.aqua import ODSC_MODEL_COMPARTMENT_OCID
10-
from ads.aqua.exception import AquaResourceAccessError
11-
from ads.aqua.utils import known_realm
1210
from ads.aqua.decorator import handle_exceptions
11+
from ads.aqua.exception import AquaResourceAccessError
1312
from ads.aqua.extension.base_handler import AquaAPIhandler
13+
from ads.aqua.utils import known_realm
1414

1515

1616
class ADSVersionHandler(AquaAPIhandler):

docs/source/release_notes.rst

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,10 @@ Release Notes
44

55
2.11.7
66
------
7-
Release date: April 8, 2024
7+
Release date: April 18, 2024
88

9-
* Fixed bugs and introduced enhancements following our recent release, which included internal adjustments for future features and updates for the Jupyter Lab 3 upgrade.
9+
* Fixed the bug in ``ADSDataset.show_in_notebook()``.
10+
* Updated langchain version.
1011

1112

1213
2.11.6
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
11
This is a sample evaluation report.html.
2+
Standard deviation (σ)

tests/unitary/with_extras/aqua/test_evaluation.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -569,7 +569,6 @@ def test_get(self, mock_get_model_provenance_response, mock_get_job_run_response
569569
def test_get_fail(self, mock_query_resource):
570570
"""Tests get evaluation details failed because of invalid eval id."""
571571
mock_query_resource.return_value = None
572-
self.app.ds_client.get_model_provenance = MagicMock()
573572
with self.assertRaises(AquaRuntimeError) as context:
574573
self.app.get(TestDataset.INVALID_EVAL_ID)
575574

@@ -624,9 +623,10 @@ def test_download_report(
624623
mock_dsc_model_from_id.assert_called_with(TestDataset.EVAL_ID)
625624
self.print_expected_response(response, "DOWNLOAD REPORT")
626625
self.assert_payload(response, AquaEvalReport)
627-
read_content = base64.b64decode(response.content)
626+
read_content = base64.b64decode(response.content).decode()
628627
assert (
629-
read_content == b"This is a sample evaluation report.html.\n"
628+
read_content
629+
== "This is a sample evaluation report.html.\nStandard deviation (σ)\n"
630630
), read_content
631631
assert self.app._report_cache.currsize == 1
632632

@@ -676,10 +676,10 @@ def test_cancel_evaluation(
676676
@parameterized.expand(
677677
[
678678
(None, AquaRuntimeError),
679-
# (
680-
# DataScienceModel(),
681-
# AquaMissingKeyError,
682-
# ),
679+
(
680+
DataScienceModel(),
681+
AquaMissingKeyError,
682+
),
683683
]
684684
)
685685
@patch.object(DataScienceModel, "from_id")
@@ -821,7 +821,6 @@ def test_get_status_when_missing_jobrun(
821821
def test_get_status_failed(self, mock_query_resource):
822822
"""Tests when no correct evaluation found."""
823823
mock_query_resource.return_value = None
824-
self.app.ds_client.get_model_provenance = MagicMock()
825824
with self.assertRaises(AquaRuntimeError) as context:
826825
self.app.get_status(TestDataset.INVALID_EVAL_ID)
827826

0 commit comments

Comments
 (0)