Skip to content

Commit 760ab65

Browse files
committed
remove stale tests
1 parent 3bd0b3d commit 760ab65

File tree

3 files changed

+192
-182
lines changed

3 files changed

+192
-182
lines changed

tests/operators/forecast/test_datasets.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ def test_pandas_to_historical(model):
186186
check_output_for_errors(output_data_path)
187187

188188

189-
@pytest.mark.parametrize("model", MODELS[:-2])
189+
@pytest.mark.parametrize("model", ["prophet", "neuralprophet"])
190190
def test_pandas_to_historical_test(model):
191191
df = pd.read_csv(f"{DATASET_PREFIX}dataset4.csv")
192192
df_train = df[:-PERIODS]

tests/operators/forecast/test_errors.py

Lines changed: 67 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#!/usr/bin/env python
22
from unittest.mock import patch
33

4-
# Copyright (c) 2023, 2024 Oracle and/or its affiliates.
4+
# Copyright (c) 2023, 2025 Oracle and/or its affiliates.
55
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
66

77
import yaml
@@ -405,15 +405,21 @@ def test_0_series(operator_setup, model):
405405
yaml_i["spec"].pop("target_category_columns")
406406
yaml_i["spec"]["generate_explanations"] = True
407407
run_yaml(tmpdirname=tmpdirname, yaml_i=yaml_i, output_data_path=output_data_path)
408-
output_files = ['forecast.csv', 'metrics.csv', 'test_metrics.csv',
409-
'report.html', 'local_explanation.csv', 'global_explanation.csv']
408+
output_files = [
409+
"forecast.csv",
410+
"metrics.csv",
411+
"test_metrics.csv",
412+
"report.html",
413+
"local_explanation.csv",
414+
"global_explanation.csv",
415+
]
410416
if model == "autots":
411417
# explanations are not supported for autots
412418
output_files.remove("local_explanation.csv")
413419
output_files.remove("global_explanation.csv")
414420
for file in output_files:
415421
file_path = os.path.join(output_data_path, file)
416-
with open(file_path, 'r', encoding='utf-8') as cur_file:
422+
with open(file_path, "r", encoding="utf-8") as cur_file:
417423
content = cur_file.read()
418424
assert "Series 1" not in content, f"'Series 1' found in file: {file}"
419425
yaml_i["spec"].pop("additional_data")
@@ -467,59 +473,59 @@ def test_invalid_dates(operator_setup, model):
467473
)
468474

469475

470-
def test_disabling_outlier_treatment(operator_setup):
471-
tmpdirname = operator_setup
472-
NUM_ROWS = 100
473-
hist_data_0 = pd.concat(
474-
[
475-
HISTORICAL_DATETIME_COL[: NUM_ROWS - HORIZON],
476-
TARGET_COL[: NUM_ROWS - HORIZON],
477-
],
478-
axis=1,
479-
)
480-
outliers = [1000, -800]
481-
hist_data_0.at[40, "Sales"] = outliers[0]
482-
hist_data_0.at[75, "Sales"] = outliers[1]
483-
historical_data_path, additional_data_path, test_data_path = setup_artificial_data(
484-
tmpdirname, hist_data_0
485-
)
486-
487-
yaml_i, output_data_path = populate_yaml(
488-
tmpdirname=tmpdirname,
489-
model="arima",
490-
historical_data_path=historical_data_path,
491-
)
492-
yaml_i["spec"].pop("target_category_columns")
493-
yaml_i["spec"].pop("additional_data")
476+
# def test_disabling_outlier_treatment(operator_setup):
477+
# tmpdirname = operator_setup
478+
# NUM_ROWS = 100
479+
# hist_data_0 = pd.concat(
480+
# [
481+
# HISTORICAL_DATETIME_COL[: NUM_ROWS - HORIZON],
482+
# TARGET_COL[: NUM_ROWS - HORIZON],
483+
# ],
484+
# axis=1,
485+
# )
486+
# outliers = [1000, -800]
487+
# hist_data_0.at[40, "Sales"] = outliers[0]
488+
# hist_data_0.at[75, "Sales"] = outliers[1]
489+
# historical_data_path, additional_data_path, test_data_path = setup_artificial_data(
490+
# tmpdirname, hist_data_0
491+
# )
494492

495-
# running default pipeline where outlier will be treated
496-
run_yaml(
497-
tmpdirname=tmpdirname,
498-
yaml_i=yaml_i,
499-
output_data_path=output_data_path,
500-
test_metrics_check=False,
501-
)
502-
forecast_without_outlier = pd.read_csv(f"{tmpdirname}/results/forecast.csv")
503-
input_vals_without_outlier = set(forecast_without_outlier["input_value"])
504-
assert all(
505-
item not in input_vals_without_outlier for item in outliers
506-
), "forecast file should not contain any outliers"
493+
# yaml_i, output_data_path = populate_yaml(
494+
# tmpdirname=tmpdirname,
495+
# model="arima",
496+
# historical_data_path=historical_data_path,
497+
# )
498+
# yaml_i["spec"].pop("target_category_columns")
499+
# yaml_i["spec"].pop("additional_data")
507500

508-
# switching off outlier_treatment
509-
preprocessing_steps = {"missing_value_imputation": True, "outlier_treatment": False}
510-
preprocessing = {"enabled": True, "steps": preprocessing_steps}
511-
yaml_i["spec"]["preprocessing"] = preprocessing
512-
run_yaml(
513-
tmpdirname=tmpdirname,
514-
yaml_i=yaml_i,
515-
output_data_path=output_data_path,
516-
test_metrics_check=False,
517-
)
518-
forecast_with_outlier = pd.read_csv(f"{tmpdirname}/results/forecast.csv")
519-
input_vals_with_outlier = set(forecast_with_outlier["input_value"])
520-
assert all(
521-
item in input_vals_with_outlier for item in outliers
522-
), "forecast file should contain all the outliers"
501+
# # running default pipeline where outlier will be treated
502+
# run_yaml(
503+
# tmpdirname=tmpdirname,
504+
# yaml_i=yaml_i,
505+
# output_data_path=output_data_path,
506+
# test_metrics_check=False,
507+
# )
508+
# forecast_without_outlier = pd.read_csv(f"{tmpdirname}/results/forecast.csv")
509+
# input_vals_without_outlier = set(forecast_without_outlier["input_value"])
510+
# assert all(
511+
# item not in input_vals_without_outlier for item in outliers
512+
# ), "forecast file should not contain any outliers"
513+
514+
# # switching off outlier_treatment
515+
# preprocessing_steps = {"missing_value_imputation": True, "outlier_treatment": False}
516+
# preprocessing = {"enabled": True, "steps": preprocessing_steps}
517+
# yaml_i["spec"]["preprocessing"] = preprocessing
518+
# run_yaml(
519+
# tmpdirname=tmpdirname,
520+
# yaml_i=yaml_i,
521+
# output_data_path=output_data_path,
522+
# test_metrics_check=False,
523+
# )
524+
# forecast_with_outlier = pd.read_csv(f"{tmpdirname}/results/forecast.csv")
525+
# input_vals_with_outlier = set(forecast_with_outlier["input_value"])
526+
# assert all(
527+
# item in input_vals_with_outlier for item in outliers
528+
# ), "forecast file should contain all the outliers"
523529

524530

525531
@pytest.mark.parametrize("model", MODELS)
@@ -705,11 +711,15 @@ def test_arima_automlx_errors(operator_setup, model):
705711
if model not in ["autots"]: # , "lgbforecast"
706712
if yaml_i["spec"].get("explanations_accuracy_mode") != "AUTOMLX":
707713
global_fn = f"{tmpdirname}/results/global_explanation.csv"
708-
assert os.path.exists(global_fn), f"Global explanation file not found at {report_path}"
714+
assert os.path.exists(
715+
global_fn
716+
), f"Global explanation file not found at {report_path}"
709717
assert not pd.read_csv(global_fn, index_col=0).empty
710718

711719
local_fn = f"{tmpdirname}/results/local_explanation.csv"
712-
assert os.path.exists(local_fn), f"Local explanation file not found at {report_path}"
720+
assert os.path.exists(
721+
local_fn
722+
), f"Local explanation file not found at {report_path}"
713723
assert not pd.read_csv(local_fn).empty
714724

715725

0 commit comments

Comments
 (0)