Skip to content

Commit 205276f

Browse files
committed
clean up dp section
1 parent 78329fb commit 205276f

File tree

4 files changed

+69
-60
lines changed

4 files changed

+69
-60
lines changed

ads/opctl/operator/lowcode/forecast/model/base_model.py

Lines changed: 63 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -147,37 +147,39 @@ def generate_report(self):
147147
[f"{s_id} \n" for s_id in self.datasets.list_series_ids()]
148148
)
149149

150-
header_section = [
151-
dp.Text(f"You selected the **`{self.spec.model}`** model."),
152-
model_description,
153-
dp.Text(
154-
"Based on your dataset, you could have also selected "
155-
f"any of the models: `{'`, `'.join(SupportedModels.keys())}`."
156-
),
157-
dp.Group(
158-
dp.BigNumber(
159-
heading="Analysis was completed in ",
160-
value=human_time_friendly(elapsed_time),
150+
header_section = dp.Blocks(
151+
blocks=[
152+
dp.Text(f"You selected the **`{self.spec.model}`** model."),
153+
model_description,
154+
dp.Text(
155+
"Based on your dataset, you could have also selected "
156+
f"any of the models: `{'`, `'.join(SupportedModels.keys())}`."
161157
),
162-
dp.BigNumber(
163-
heading="Starting time index",
164-
value=self.datasets.get_earliest_timestamp().strftime(
165-
"%B %d, %Y"
158+
dp.Group(
159+
dp.BigNumber(
160+
heading="Analysis was completed in ",
161+
value=human_time_friendly(elapsed_time),
166162
),
167-
),
168-
dp.BigNumber(
169-
heading="Ending time index",
170-
value=self.datasets.get_latest_timestamp().strftime(
171-
"%B %d, %Y"
163+
dp.BigNumber(
164+
heading="Starting time index",
165+
value=self.datasets.get_earliest_timestamp().strftime(
166+
"%B %d, %Y"
167+
),
172168
),
169+
dp.BigNumber(
170+
heading="Ending time index",
171+
value=self.datasets.get_latest_timestamp().strftime(
172+
"%B %d, %Y"
173+
),
174+
),
175+
dp.BigNumber(
176+
heading="Num series",
177+
value=len(self.datasets.list_series_ids()),
178+
),
179+
columns=4,
173180
),
174-
dp.BigNumber(
175-
heading="Num series",
176-
value=len(self.datasets.list_series_ids()),
177-
),
178-
columns=4,
179-
),
180-
]
181+
]
182+
)
181183

182184
first_10_rows_blocks = [
183185
dp.DataTable(
@@ -214,33 +216,39 @@ def generate_report(self):
214216
last_10_title = dp.Text("### Last 10 Rows of Data")
215217
summary_title = dp.Text("### Data Summary Statistics")
216218

217-
if series_name is not None:
218-
first_10_section = [
219-
first_10_title,
220-
series_subtext,
221-
dp.Select(blocks=first_10_rows_blocks),
222-
]
223-
last_10_section = [
224-
last_10_title,
225-
series_subtext,
226-
dp.Select(blocks=last_10_rows_blocks),
227-
]
228-
summary_section = [
229-
summary_title,
230-
series_subtext,
231-
dp.Select(blocks=data_summary_blocks),
232-
]
219+
if series_name is not None and len(self.datasets.list_series_ids()) > 1:
220+
data_summary_sec = dp.Blocks(
221+
blocks=[
222+
first_10_title,
223+
series_subtext,
224+
dp.Select(blocks=first_10_rows_blocks),
225+
last_10_title,
226+
series_subtext,
227+
dp.Select(blocks=last_10_rows_blocks),
228+
summary_title,
229+
series_subtext,
230+
dp.Select(blocks=data_summary_blocks),
231+
dp.Text("----"),
232+
]
233+
)
233234
else:
234-
first_10_section = [first_10_title, first_10_rows_blocks[0]]
235-
last_10_section = [last_10_title, last_10_rows_blocks[0]]
236-
summary_section = [summary_title, data_summary_blocks[0]]
237-
238-
summary = dp.Blocks(
239-
blocks=header_section
240-
+ first_10_section
241-
+ last_10_section
242-
+ summary_section
243-
+ [dp.Text("----")]
235+
data_summary_sec = dp.Blocks(
236+
blocks=[
237+
first_10_title,
238+
first_10_rows_blocks[0],
239+
last_10_title,
240+
last_10_rows_blocks[0],
241+
summary_title,
242+
data_summary_blocks[0],
243+
dp.Text("----"),
244+
]
245+
)
246+
247+
summary = dp.Group(
248+
blocks=[
249+
header_section,
250+
data_summary_sec,
251+
]
244252
)
245253

246254
test_metrics_sections = []
@@ -275,7 +283,8 @@ def generate_report(self):
275283
yaml_appendix_title = dp.Text(f"## Reference: YAML File")
276284
yaml_appendix = dp.Code(code=self.config.to_yaml(), language="yaml")
277285
report_sections = (
278-
[title_text, summary]
286+
[title_text]
287+
+ [summary]
279288
+ forecast_plots
280289
+ other_sections
281290
+ test_metrics_sections

tests/operators/anomaly/test_anomaly_simple.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ def test_artificial_big(model):
9191
yaml_i["spec"]["target_category_columns"] = [TARGET_CATEGORY_COLUMN]
9292
yaml_i["spec"]["datetime_column"]["name"] = DATETIME_COLUMN
9393

94-
run(yaml_i, backend="local", debug=False)
94+
run(yaml_i, backend="operator.local", debug=False)
9595

9696
# with open(anomaly_yaml_filename, "w") as f:
9797
# f.write(yaml.dump(yaml_i))
@@ -131,7 +131,7 @@ def test_artificial_small(model):
131131
yaml_i["spec"]["output_directory"]["url"] = output_dirname
132132
yaml_i["spec"]["contamination"] = 0.3
133133

134-
run(yaml_i, backend="local", debug=False)
134+
run(yaml_i, backend="operator.local", debug=False)
135135

136136
# with open(anomaly_yaml_filename, "w") as f:
137137
# f.write(yaml.dump(yaml_i))
@@ -185,7 +185,7 @@ def test_validation(model):
185185
yaml_i["spec"]["output_directory"]["url"] = output_dirname
186186
yaml_i["spec"]["contamination"] = 0.05
187187

188-
run(yaml_i, backend="local", debug=False)
188+
run(yaml_i, backend="operator.local", debug=False)
189189
# with open(anomaly_yaml_filename, "w") as f:
190190
# f.write(yaml.dump(yaml_i))
191191
# sleep(0.1)
@@ -209,7 +209,7 @@ def test_load_datasets(model, data_dict):
209209
yaml_i["spec"]["datetime_column"]["name"] = data_dict["dt_col"]
210210
yaml_i["spec"]["output_directory"]["url"] = output_dirname
211211

212-
run(yaml_i, backend="local", debug=False)
212+
run(yaml_i, backend="operator.local", debug=False)
213213

214214
# with open(f"{tmpdirname}/anomaly.yaml", "w") as f:
215215
# f.write(yaml.dump(yaml_i))

tests/operators/forecast/test_datasets.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ def test_load_datasets(model, dataset_name):
162162
# f"ads operator run -f {forecast_yaml_filename} --debug", shell=True # --debug
163163
# )
164164
# sleep(0.1)
165-
run(yaml_i, backend="local", debug=False)
165+
run(yaml_i, backend="operator.local", debug=False)
166166
subprocess.run(f"ls -a {output_data_path}", shell=True)
167167
if yaml_i["spec"]["generate_explanations"] and model != "autots":
168168
glb_expl = pd.read_csv(f"{tmpdirname}/results/global_explanation.csv")

tests/operators/forecast/test_errors.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ def operator_setup():
169169

170170

171171
def run_yaml(tmpdirname, yaml_i, output_data_path):
172-
run(yaml_i, backend="local", debug=True)
172+
run(yaml_i, backend="operator.local", debug=True)
173173
subprocess.run(f"ls -a {output_data_path}", shell=True)
174174

175175
test_metrics = pd.read_csv(f"{tmpdirname}/results/test_metrics.csv")

0 commit comments

Comments
 (0)