31
31
32
32
MODELS = [
33
33
"arima" ,
34
- # "automlx",
34
+ "automlx" ,
35
35
"prophet" ,
36
36
"neuralprophet" ,
37
- # "autots",
37
+ "autots" ,
38
38
# "lgbforecast",
39
39
"auto-select" ,
40
40
]
@@ -156,7 +156,7 @@ def test_load_datasets(model, data_details):
156
156
verify_explanations (
157
157
tmpdirname = tmpdirname ,
158
158
additional_cols = additional_cols ,
159
- target_category_columns = yaml_i ["spec" ][' target_category_columns' ]
159
+ target_category_columns = yaml_i ["spec" ][" target_category_columns" ],
160
160
)
161
161
if include_test_data :
162
162
test_metrics = pd .read_csv (f"{ tmpdirname } /results/test_metrics.csv" )
@@ -165,7 +165,7 @@ def test_load_datasets(model, data_details):
165
165
print (train_metrics )
166
166
167
167
168
- @pytest .mark .parametrize ("model" , MODELS [:1 ])
168
+ @pytest .mark .parametrize ("model" , MODELS [:- 1 ])
169
169
def test_pandas_to_historical (model ):
170
170
df = pd .read_csv (f"{ DATASET_PREFIX } dataset1.csv" )
171
171
@@ -184,7 +184,7 @@ def test_pandas_to_historical(model):
184
184
check_output_for_errors (output_data_path )
185
185
186
186
187
- @pytest .mark .parametrize ("model" , [ "neuralprophet" ])
187
+ @pytest .mark .parametrize ("model" , MODELS [: - 1 ])
188
188
def test_pandas_to_historical_test (model ):
189
189
df = pd .read_csv (f"{ DATASET_PREFIX } dataset4.csv" )
190
190
df_train = df [:- PERIODS ]
@@ -207,26 +207,33 @@ def test_pandas_to_historical_test(model):
207
207
test_metrics = pd .read_csv (f"{ output_data_path } /metrics.csv" )
208
208
print (test_metrics )
209
209
210
+
210
211
def check_output_for_errors (output_data_path ):
211
212
# try:
212
213
# List files in the directory
213
- result = subprocess .run (f"ls -a { output_data_path } " , shell = True , check = True , text = True , capture_output = True )
214
+ result = subprocess .run (
215
+ f"ls -a { output_data_path } " ,
216
+ shell = True ,
217
+ check = True ,
218
+ text = True ,
219
+ capture_output = True ,
220
+ )
214
221
files = result .stdout .splitlines ()
215
222
216
223
# Check if errors.json is in the directory
217
224
if "errors.json" in files :
218
225
errors_file_path = os .path .join (output_data_path , "errors.json" )
219
-
226
+
220
227
# Read the errors.json file
221
228
with open (errors_file_path , "r" ) as f :
222
229
errors_content = json .load (f )
223
-
230
+
224
231
# Extract and raise the error message
225
232
# error_message = errors_content.get("message", "An error occurred.")
226
233
raise Exception (errors_content )
227
234
228
235
print ("No errors.json file found. Directory is clear." )
229
-
236
+
230
237
# except subprocess.CalledProcessError as e:
231
238
# print(f"Error listing files in directory: {e}")
232
239
# except FileNotFoundError:
@@ -236,6 +243,7 @@ def check_output_for_errors(output_data_path):
236
243
# except Exception as e:
237
244
# print(f"Raised error: {e}")
238
245
246
+
239
247
def run_operator (
240
248
historical_data_path ,
241
249
additional_data_path ,
0 commit comments