1
1
#!/usr/bin/env python
2
2
3
- # Copyright (c) 2023, 2024 Oracle and/or its affiliates.
3
+ # Copyright (c) 2023, 2025 Oracle and/or its affiliates.
4
4
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
5
6
6
import logging
19
19
from ads .common .decorator .runtime_dependency import runtime_dependency
20
20
from ads .common .object_storage_details import ObjectStorageDetails
21
21
from ads .opctl import logger
22
+ from ads .opctl .operator .lowcode .common .const import DataColumns
22
23
from ads .opctl .operator .lowcode .common .utils import (
23
24
datetime_to_seconds ,
24
25
disable_print ,
28
29
seconds_to_datetime ,
29
30
write_data ,
30
31
)
31
- from ads .opctl .operator .lowcode .common .const import DataColumns
32
32
from ads .opctl .operator .lowcode .forecast .model .forecast_datasets import TestData
33
33
from ads .opctl .operator .lowcode .forecast .utils import (
34
34
_build_metrics_df ,
49
49
SpeedAccuracyMode ,
50
50
SupportedMetrics ,
51
51
SupportedModels ,
52
- BACKTEST_REPORT_NAME ,
53
52
)
54
53
from ..operator_config import ForecastOperatorConfig , ForecastOperatorSpec
55
- from .forecast_datasets import ForecastDatasets
54
+ from .forecast_datasets import ForecastDatasets , ForecastResults
56
55
57
56
logging .getLogger ("report_creator" ).setLevel (logging .WARNING )
58
57
@@ -127,8 +126,9 @@ def generate_report(self):
127
126
if self .spec .generate_report or self .spec .generate_metrics :
128
127
self .eval_metrics = self .generate_train_metrics ()
129
128
if not self .target_cat_col :
130
- self .eval_metrics .rename ({"Series 1" : self .original_target_column },
131
- axis = 1 , inplace = True )
129
+ self .eval_metrics .rename (
130
+ {"Series 1" : self .original_target_column }, axis = 1 , inplace = True
131
+ )
132
132
133
133
if self .spec .test_data :
134
134
try :
@@ -140,8 +140,11 @@ def generate_report(self):
140
140
elapsed_time = elapsed_time ,
141
141
)
142
142
if not self .target_cat_col :
143
- self .test_eval_metrics .rename ({"Series 1" : self .original_target_column },
144
- axis = 1 , inplace = True )
143
+ self .test_eval_metrics .rename (
144
+ {"Series 1" : self .original_target_column },
145
+ axis = 1 ,
146
+ inplace = True ,
147
+ )
145
148
except Exception :
146
149
logger .warn ("Unable to generate Test Metrics." )
147
150
logger .debug (f"Full Traceback: { traceback .format_exc ()} " )
@@ -223,17 +226,23 @@ def generate_report(self):
223
226
rc .Block (
224
227
first_10_title ,
225
228
# series_subtext,
226
- rc .Select (blocks = first_5_rows_blocks ) if self .target_cat_col else first_5_rows_blocks [0 ],
229
+ rc .Select (blocks = first_5_rows_blocks )
230
+ if self .target_cat_col
231
+ else first_5_rows_blocks [0 ],
227
232
),
228
233
rc .Block (
229
234
last_10_title ,
230
235
# series_subtext,
231
- rc .Select (blocks = last_5_rows_blocks ) if self .target_cat_col else last_5_rows_blocks [0 ],
236
+ rc .Select (blocks = last_5_rows_blocks )
237
+ if self .target_cat_col
238
+ else last_5_rows_blocks [0 ],
232
239
),
233
240
rc .Block (
234
241
summary_title ,
235
242
# series_subtext,
236
- rc .Select (blocks = data_summary_blocks ) if self .target_cat_col else data_summary_blocks [0 ],
243
+ rc .Select (blocks = data_summary_blocks )
244
+ if self .target_cat_col
245
+ else data_summary_blocks [0 ],
237
246
),
238
247
rc .Separator (),
239
248
)
@@ -308,7 +317,7 @@ def generate_report(self):
308
317
horizon = self .spec .horizon ,
309
318
test_data = test_data ,
310
319
ci_interval_width = self .spec .confidence_interval_width ,
311
- target_category_column = self .target_cat_col
320
+ target_category_column = self .target_cat_col ,
312
321
)
313
322
if (
314
323
series_name is not None
@@ -341,11 +350,12 @@ def generate_report(self):
341
350
)
342
351
343
352
# save the report and result CSV
344
- self ._save_report (
353
+ return self ._save_report (
345
354
report_sections = report_sections ,
346
355
result_df = result_df ,
347
356
metrics_df = self .eval_metrics ,
348
357
test_metrics_df = self .test_eval_metrics ,
358
+ test_data = test_data ,
349
359
)
350
360
351
361
def _test_evaluate_metrics (self , elapsed_time = 0 ):
@@ -462,10 +472,12 @@ def _save_report(
462
472
result_df : pd .DataFrame ,
463
473
metrics_df : pd .DataFrame ,
464
474
test_metrics_df : pd .DataFrame ,
475
+ test_data : pd .DataFrame ,
465
476
):
466
477
"""Saves resulting reports to the given folder."""
467
478
468
479
unique_output_dir = self .spec .output_directory .url
480
+ results = ForecastResults ()
469
481
470
482
if ObjectStorageDetails .is_oci_path (unique_output_dir ):
471
483
storage_options = default_signer ()
@@ -491,13 +503,22 @@ def _save_report(
491
503
f2 .write (f1 .read ())
492
504
493
505
# forecast csv report
494
- result_df = result_df if self .target_cat_col else result_df .drop (DataColumns .Series , axis = 1 )
506
+ # if self.spec.test_data is not None:
507
+ # test_data_dict = test_data.get_dict_by_series()
508
+ # for series_id, test_data_values in test_data_dict.items():
509
+ # result_df[DataColumns.Series] = test_data_values[]
510
+ result_df = (
511
+ result_df
512
+ if self .target_cat_col
513
+ else result_df .drop (DataColumns .Series , axis = 1 )
514
+ )
495
515
write_data (
496
516
data = result_df ,
497
517
filename = os .path .join (unique_output_dir , self .spec .forecast_filename ),
498
518
format = "csv" ,
499
519
storage_options = storage_options ,
500
520
)
521
+ results .set_forecast (result_df )
501
522
502
523
# metrics csv report
503
524
if self .spec .generate_metrics :
@@ -507,17 +528,19 @@ def _save_report(
507
528
else "Series 1"
508
529
)
509
530
if metrics_df is not None :
531
+ metrics_df_formatted = metrics_df .reset_index ().rename (
532
+ {"index" : "metrics" , "Series 1" : metrics_col_name }, axis = 1
533
+ )
510
534
write_data (
511
- data = metrics_df .reset_index ().rename (
512
- {"index" : "metrics" , "Series 1" : metrics_col_name }, axis = 1
513
- ),
535
+ data = metrics_df_formatted ,
514
536
filename = os .path .join (
515
537
unique_output_dir , self .spec .metrics_filename
516
538
),
517
539
format = "csv" ,
518
540
storage_options = storage_options ,
519
541
index = False ,
520
542
)
543
+ results .set_metrics (metrics_df_formatted )
521
544
else :
522
545
logger .warn (
523
546
f"Attempted to generate the { self .spec .metrics_filename } file with the training metrics, however the training metrics could not be properly generated."
@@ -526,17 +549,19 @@ def _save_report(
526
549
# test_metrics csv report
527
550
if self .spec .test_data is not None :
528
551
if test_metrics_df is not None :
552
+ test_metrics_df_formatted = test_metrics_df .reset_index ().rename (
553
+ {"index" : "metrics" , "Series 1" : metrics_col_name }, axis = 1
554
+ )
529
555
write_data (
530
- data = test_metrics_df .reset_index ().rename (
531
- {"index" : "metrics" , "Series 1" : metrics_col_name }, axis = 1
532
- ),
556
+ data = test_metrics_df_formatted ,
533
557
filename = os .path .join (
534
558
unique_output_dir , self .spec .test_metrics_filename
535
559
),
536
560
format = "csv" ,
537
561
storage_options = storage_options ,
538
562
index = False ,
539
563
)
564
+ results .set_test_metrics (test_metrics_df_formatted )
540
565
else :
541
566
logger .warn (
542
567
f"Attempted to generate the { self .spec .test_metrics_filename } file with the test metrics, however the test metrics could not be properly generated."
@@ -554,6 +579,7 @@ def _save_report(
554
579
storage_options = storage_options ,
555
580
index = True ,
556
581
)
582
+ results .set_global_explanations (self .formatted_global_explanation )
557
583
else :
558
584
logger .warn (
559
585
f"Attempted to generate global explanations for the { self .spec .global_explanation_filename } file, but an issue occured in formatting the explanations."
@@ -569,6 +595,7 @@ def _save_report(
569
595
storage_options = storage_options ,
570
596
index = True ,
571
597
)
598
+ results .set_local_explanations (self .formatted_local_explanation )
572
599
else :
573
600
logger .warn (
574
601
f"Attempted to generate local explanations for the { self .spec .local_explanation_filename } file, but an issue occured in formatting the explanations."
@@ -589,10 +616,12 @@ def _save_report(
589
616
index = True ,
590
617
indent = 4 ,
591
618
)
619
+ results .set_model_parameters (self .model_parameters )
592
620
593
621
# model pickle
594
622
if self .spec .generate_model_pickle :
595
623
self ._save_model (unique_output_dir , storage_options )
624
+ results .set_models (self .models )
596
625
597
626
logger .info (
598
627
f"The outputs have been successfully "
@@ -612,8 +641,10 @@ def _save_report(
612
641
index = True ,
613
642
indent = 4 ,
614
643
)
644
+ results .set_errors_dict (self .errors_dict )
615
645
else :
616
646
logger .info ("All modeling completed successfully." )
647
+ return results
617
648
618
649
def preprocess (self , df , series_id ):
619
650
"""The method that needs to be implemented on the particular model level."""
@@ -667,7 +698,10 @@ def _save_model(self, output_dir, storage_options):
667
698
)
668
699
669
700
def _validate_automlx_explanation_mode (self ):
670
- if self .spec .model != SupportedModels .AutoMLX and self .spec .explanations_accuracy_mode == SpeedAccuracyMode .AUTOMLX :
701
+ if (
702
+ self .spec .model != SupportedModels .AutoMLX
703
+ and self .spec .explanations_accuracy_mode == SpeedAccuracyMode .AUTOMLX
704
+ ):
671
705
raise ValueError (
672
706
"AUTOMLX explanation accuracy mode is only supported for AutoMLX models. "
673
707
"Please select mode other than AUTOMLX from the available explanations_accuracy_mode options"
0 commit comments