Skip to content

Commit a030882

Browse files
authored
[ready to review] ODSC-68841: API Returns Data Directly (#1048)
2 parents 1e99804 + 583b3db commit a030882

File tree

12 files changed

+292
-103
lines changed

12 files changed

+292
-103
lines changed

.github/workflows/run-forecast-unit-tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,6 @@ jobs:
5656
$CONDA/bin/conda init
5757
source /home/runner/.bashrc
5858
pip install -r test-requirements-operators.txt
59-
pip install "oracle-automlx[forecasting]>=24.4.1"
59+
pip install "oracle-automlx[forecasting]>=25.1.1"
6060
pip install pandas>=2.2.0
6161
python -m pytest -v -p no:warnings --durations=5 tests/operators/forecast

ads/opctl/anomaly_detection.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
#!/usr/bin/env python
2+
3+
# Copyright (c) 2025 Oracle and/or its affiliates.
4+
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5+
6+
from ads.opctl.operator.lowcode.anomaly.__main__ import operate
7+
from ads.opctl.operator.lowcode.anomaly.operator_config import AnomalyOperatorConfig
8+
9+
if __name__ == "__main__":
10+
config = AnomalyOperatorConfig()
11+
operate(config)

ads/opctl/forecast.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
#!/usr/bin/env python
2+
3+
# Copyright (c) 2025 Oracle and/or its affiliates.
4+
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5+
6+
from ads.opctl.operator.lowcode.forecast.__main__ import operate
7+
from ads.opctl.operator.lowcode.forecast.operator_config import ForecastOperatorConfig
8+
9+
if __name__ == "__main__":
10+
config = ForecastOperatorConfig()
11+
operate(config)

ads/opctl/operator/lowcode/forecast/__main__.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#!/usr/bin/env python
2-
# -*- coding: utf-8 -*--
32

4-
# Copyright (c) 2023, 2024 Oracle and/or its affiliates.
3+
# Copyright (c) 2023, 2025 Oracle and/or its affiliates.
54
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
65

76
import json
@@ -15,17 +14,17 @@
1514
from ads.opctl.operator.common.const import ENV_OPERATOR_ARGS
1615
from ads.opctl.operator.common.utils import _parse_input_args
1716

17+
from .model.forecast_datasets import ForecastDatasets, ForecastResults
1818
from .operator_config import ForecastOperatorConfig
19-
from .model.forecast_datasets import ForecastDatasets
2019
from .whatifserve import ModelDeploymentManager
2120

2221

23-
def operate(operator_config: ForecastOperatorConfig) -> None:
22+
def operate(operator_config: ForecastOperatorConfig) -> ForecastResults:
2423
"""Runs the forecasting operator."""
2524
from .model.factory import ForecastOperatorModelFactory
2625

2726
datasets = ForecastDatasets(operator_config)
28-
ForecastOperatorModelFactory.get_model(
27+
results = ForecastOperatorModelFactory.get_model(
2928
operator_config, datasets
3029
).generate_report()
3130
# saving to model catalog
@@ -36,6 +35,7 @@ def operate(operator_config: ForecastOperatorConfig) -> None:
3635
if spec.what_if_analysis.model_deployment:
3736
mdm.create_deployment()
3837
mdm.save_deployment_info()
38+
return results
3939

4040

4141
def verify(spec: Dict, **kwargs: Dict) -> bool:

ads/opctl/operator/lowcode/forecast/model/automlx.py

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
#!/usr/bin/env python
2-
# Copyright (c) 2023, 2024 Oracle and/or its affiliates.
2+
# Copyright (c) 2023, 2025 Oracle and/or its affiliates.
33
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
44
import logging
55
import os
@@ -66,8 +66,7 @@ def preprocess(self, data, series_id): # TODO: re-use self.le for explanations
6666
@runtime_dependency(
6767
module="automlx",
6868
err_msg=(
69-
"Please run `pip3 install oracle-automlx>=23.4.1` and "
70-
"`pip3 install oracle-automlx[forecasting]>=23.4.1` "
69+
"Please run `pip3 install oracle-automlx[forecasting]>=25.1.1` "
7170
"to install the required dependencies for automlx."
7271
),
7372
)
@@ -105,7 +104,7 @@ def _build_model(self) -> pd.DataFrame:
105104
engine_opts = (
106105
None
107106
if engine_type == "local"
108-
else ({"ray_setup": {"_temp_dir": "/tmp/ray-temp"}},)
107+
else {"ray_setup": {"_temp_dir": "/tmp/ray-temp"}}
109108
)
110109
init(
111110
engine=engine_type,
@@ -272,11 +271,15 @@ def _generate_report(self):
272271
self.formatted_local_explanation = aggregate_local_explanations
273272

274273
if not self.target_cat_col:
275-
self.formatted_global_explanation = self.formatted_global_explanation.rename(
276-
{"Series 1": self.original_target_column},
277-
axis=1,
274+
self.formatted_global_explanation = (
275+
self.formatted_global_explanation.rename(
276+
{"Series 1": self.original_target_column},
277+
axis=1,
278+
)
279+
)
280+
self.formatted_local_explanation.drop(
281+
"Series", axis=1, inplace=True
278282
)
279-
self.formatted_local_explanation.drop("Series", axis=1, inplace=True)
280283

281284
# Create a markdown section for the global explainability
282285
global_explanation_section = rc.Block(
@@ -436,7 +439,9 @@ def explain_model(self):
436439

437440
# Generate explanations for the forecast
438441
explanations = explainer.explain_prediction(
439-
X=self.datasets.additional_data.get_data_for_series(series_id=s_id)
442+
X=self.datasets.additional_data.get_data_for_series(
443+
series_id=s_id
444+
)
440445
.drop(self.spec.datetime_column.name, axis=1)
441446
.tail(self.spec.horizon)
442447
if self.spec.additional_data
@@ -448,7 +453,9 @@ def explain_model(self):
448453
explanations_df = pd.concat(
449454
[exp.to_dataframe() for exp in explanations]
450455
)
451-
explanations_df["row"] = explanations_df.groupby("Feature").cumcount()
456+
explanations_df["row"] = explanations_df.groupby(
457+
"Feature"
458+
).cumcount()
452459
explanations_df = explanations_df.pivot(
453460
index="row", columns="Feature", values="Attribution"
454461
)
@@ -460,5 +467,7 @@ def explain_model(self):
460467
# Fall back to the default explanation generation method
461468
super().explain_model()
462469
except Exception as e:
463-
logger.warning(f"Failed to generate explanations for series {s_id} with error: {e}.")
470+
logger.warning(
471+
f"Failed to generate explanations for series {s_id} with error: {e}."
472+
)
464473
logger.debug(f"Full Traceback: {traceback.format_exc()}")

ads/opctl/operator/lowcode/forecast/model/base_model.py

Lines changed: 55 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/usr/bin/env python
22

3-
# Copyright (c) 2023, 2024 Oracle and/or its affiliates.
3+
# Copyright (c) 2023, 2025 Oracle and/or its affiliates.
44
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
55

66
import logging
@@ -19,6 +19,7 @@
1919
from ads.common.decorator.runtime_dependency import runtime_dependency
2020
from ads.common.object_storage_details import ObjectStorageDetails
2121
from ads.opctl import logger
22+
from ads.opctl.operator.lowcode.common.const import DataColumns
2223
from ads.opctl.operator.lowcode.common.utils import (
2324
datetime_to_seconds,
2425
disable_print,
@@ -28,7 +29,6 @@
2829
seconds_to_datetime,
2930
write_data,
3031
)
31-
from ads.opctl.operator.lowcode.common.const import DataColumns
3232
from ads.opctl.operator.lowcode.forecast.model.forecast_datasets import TestData
3333
from ads.opctl.operator.lowcode.forecast.utils import (
3434
_build_metrics_df,
@@ -49,10 +49,9 @@
4949
SpeedAccuracyMode,
5050
SupportedMetrics,
5151
SupportedModels,
52-
BACKTEST_REPORT_NAME,
5352
)
5453
from ..operator_config import ForecastOperatorConfig, ForecastOperatorSpec
55-
from .forecast_datasets import ForecastDatasets
54+
from .forecast_datasets import ForecastDatasets, ForecastResults
5655

5756
logging.getLogger("report_creator").setLevel(logging.WARNING)
5857

@@ -127,8 +126,9 @@ def generate_report(self):
127126
if self.spec.generate_report or self.spec.generate_metrics:
128127
self.eval_metrics = self.generate_train_metrics()
129128
if not self.target_cat_col:
130-
self.eval_metrics.rename({"Series 1": self.original_target_column},
131-
axis=1, inplace=True)
129+
self.eval_metrics.rename(
130+
{"Series 1": self.original_target_column}, axis=1, inplace=True
131+
)
132132

133133
if self.spec.test_data:
134134
try:
@@ -140,8 +140,11 @@ def generate_report(self):
140140
elapsed_time=elapsed_time,
141141
)
142142
if not self.target_cat_col:
143-
self.test_eval_metrics.rename({"Series 1": self.original_target_column},
144-
axis=1, inplace=True)
143+
self.test_eval_metrics.rename(
144+
{"Series 1": self.original_target_column},
145+
axis=1,
146+
inplace=True,
147+
)
145148
except Exception:
146149
logger.warn("Unable to generate Test Metrics.")
147150
logger.debug(f"Full Traceback: {traceback.format_exc()}")
@@ -223,17 +226,23 @@ def generate_report(self):
223226
rc.Block(
224227
first_10_title,
225228
# series_subtext,
226-
rc.Select(blocks=first_5_rows_blocks) if self.target_cat_col else first_5_rows_blocks[0],
229+
rc.Select(blocks=first_5_rows_blocks)
230+
if self.target_cat_col
231+
else first_5_rows_blocks[0],
227232
),
228233
rc.Block(
229234
last_10_title,
230235
# series_subtext,
231-
rc.Select(blocks=last_5_rows_blocks) if self.target_cat_col else last_5_rows_blocks[0],
236+
rc.Select(blocks=last_5_rows_blocks)
237+
if self.target_cat_col
238+
else last_5_rows_blocks[0],
232239
),
233240
rc.Block(
234241
summary_title,
235242
# series_subtext,
236-
rc.Select(blocks=data_summary_blocks) if self.target_cat_col else data_summary_blocks[0],
243+
rc.Select(blocks=data_summary_blocks)
244+
if self.target_cat_col
245+
else data_summary_blocks[0],
237246
),
238247
rc.Separator(),
239248
)
@@ -308,7 +317,7 @@ def generate_report(self):
308317
horizon=self.spec.horizon,
309318
test_data=test_data,
310319
ci_interval_width=self.spec.confidence_interval_width,
311-
target_category_column=self.target_cat_col
320+
target_category_column=self.target_cat_col,
312321
)
313322
if (
314323
series_name is not None
@@ -341,11 +350,12 @@ def generate_report(self):
341350
)
342351

343352
# save the report and result CSV
344-
self._save_report(
353+
return self._save_report(
345354
report_sections=report_sections,
346355
result_df=result_df,
347356
metrics_df=self.eval_metrics,
348357
test_metrics_df=self.test_eval_metrics,
358+
test_data=test_data,
349359
)
350360

351361
def _test_evaluate_metrics(self, elapsed_time=0):
@@ -462,10 +472,12 @@ def _save_report(
462472
result_df: pd.DataFrame,
463473
metrics_df: pd.DataFrame,
464474
test_metrics_df: pd.DataFrame,
475+
test_data: pd.DataFrame,
465476
):
466477
"""Saves resulting reports to the given folder."""
467478

468479
unique_output_dir = self.spec.output_directory.url
480+
results = ForecastResults()
469481

470482
if ObjectStorageDetails.is_oci_path(unique_output_dir):
471483
storage_options = default_signer()
@@ -491,13 +503,22 @@ def _save_report(
491503
f2.write(f1.read())
492504

493505
# forecast csv report
494-
result_df = result_df if self.target_cat_col else result_df.drop(DataColumns.Series, axis=1)
506+
# if self.spec.test_data is not None:
507+
# test_data_dict = test_data.get_dict_by_series()
508+
# for series_id, test_data_values in test_data_dict.items():
509+
# result_df[DataColumns.Series] = test_data_values[]
510+
result_df = (
511+
result_df
512+
if self.target_cat_col
513+
else result_df.drop(DataColumns.Series, axis=1)
514+
)
495515
write_data(
496516
data=result_df,
497517
filename=os.path.join(unique_output_dir, self.spec.forecast_filename),
498518
format="csv",
499519
storage_options=storage_options,
500520
)
521+
results.set_forecast(result_df)
501522

502523
# metrics csv report
503524
if self.spec.generate_metrics:
@@ -507,17 +528,19 @@ def _save_report(
507528
else "Series 1"
508529
)
509530
if metrics_df is not None:
531+
metrics_df_formatted = metrics_df.reset_index().rename(
532+
{"index": "metrics", "Series 1": metrics_col_name}, axis=1
533+
)
510534
write_data(
511-
data=metrics_df.reset_index().rename(
512-
{"index": "metrics", "Series 1": metrics_col_name}, axis=1
513-
),
535+
data=metrics_df_formatted,
514536
filename=os.path.join(
515537
unique_output_dir, self.spec.metrics_filename
516538
),
517539
format="csv",
518540
storage_options=storage_options,
519541
index=False,
520542
)
543+
results.set_metrics(metrics_df_formatted)
521544
else:
522545
logger.warn(
523546
f"Attempted to generate the {self.spec.metrics_filename} file with the training metrics, however the training metrics could not be properly generated."
@@ -526,17 +549,19 @@ def _save_report(
526549
# test_metrics csv report
527550
if self.spec.test_data is not None:
528551
if test_metrics_df is not None:
552+
test_metrics_df_formatted = test_metrics_df.reset_index().rename(
553+
{"index": "metrics", "Series 1": metrics_col_name}, axis=1
554+
)
529555
write_data(
530-
data=test_metrics_df.reset_index().rename(
531-
{"index": "metrics", "Series 1": metrics_col_name}, axis=1
532-
),
556+
data=test_metrics_df_formatted,
533557
filename=os.path.join(
534558
unique_output_dir, self.spec.test_metrics_filename
535559
),
536560
format="csv",
537561
storage_options=storage_options,
538562
index=False,
539563
)
564+
results.set_test_metrics(test_metrics_df_formatted)
540565
else:
541566
logger.warn(
542567
f"Attempted to generate the {self.spec.test_metrics_filename} file with the test metrics, however the test metrics could not be properly generated."
@@ -554,6 +579,7 @@ def _save_report(
554579
storage_options=storage_options,
555580
index=True,
556581
)
582+
results.set_global_explanations(self.formatted_global_explanation)
557583
else:
558584
logger.warn(
559585
f"Attempted to generate global explanations for the {self.spec.global_explanation_filename} file, but an issue occured in formatting the explanations."
@@ -569,6 +595,7 @@ def _save_report(
569595
storage_options=storage_options,
570596
index=True,
571597
)
598+
results.set_local_explanations(self.formatted_local_explanation)
572599
else:
573600
logger.warn(
574601
f"Attempted to generate local explanations for the {self.spec.local_explanation_filename} file, but an issue occured in formatting the explanations."
@@ -589,10 +616,12 @@ def _save_report(
589616
index=True,
590617
indent=4,
591618
)
619+
results.set_model_parameters(self.model_parameters)
592620

593621
# model pickle
594622
if self.spec.generate_model_pickle:
595623
self._save_model(unique_output_dir, storage_options)
624+
results.set_models(self.models)
596625

597626
logger.info(
598627
f"The outputs have been successfully "
@@ -612,8 +641,10 @@ def _save_report(
612641
index=True,
613642
indent=4,
614643
)
644+
results.set_errors_dict(self.errors_dict)
615645
else:
616646
logger.info("All modeling completed successfully.")
647+
return results
617648

618649
def preprocess(self, df, series_id):
619650
"""The method that needs to be implemented on the particular model level."""
@@ -667,7 +698,10 @@ def _save_model(self, output_dir, storage_options):
667698
)
668699

669700
def _validate_automlx_explanation_mode(self):
670-
if self.spec.model != SupportedModels.AutoMLX and self.spec.explanations_accuracy_mode == SpeedAccuracyMode.AUTOMLX:
701+
if (
702+
self.spec.model != SupportedModels.AutoMLX
703+
and self.spec.explanations_accuracy_mode == SpeedAccuracyMode.AUTOMLX
704+
):
671705
raise ValueError(
672706
"AUTOMLX explanation accuracy mode is only supported for AutoMLX models. "
673707
"Please select mode other than AUTOMLX from the available explanations_accuracy_mode options"

0 commit comments

Comments
 (0)