Skip to content

Commit c943c0a

Browse files
committed
154 ruff apply unsafe fixes to unit test (#155)
1 parent fbd8b5a commit c943c0a

File tree

9 files changed

+24
-26
lines changed

9 files changed

+24
-26
lines changed

test_extras/test_chemprop/test_models.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def test_get_params(self) -> None:
7575
chemprop_model.set_params(**new_params)
7676
model_params = chemprop_model.get_params(deep=True)
7777
for param_name, param in new_params.items():
78-
if param_name in ["model__agg"]:
78+
if param_name in {"model__agg"}:
7979
self.assertIsInstance(model_params[param_name], type(param))
8080
continue
8181
self.assertEqual(param, model_params[param_name])

test_extras/test_notebooks/test_notebooks.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ def get_notebook_paths_from_dir(notebook_dir: Path) -> list[Path]:
5252
# Find all Jupyter notebook files in the directory
5353
notebooks_paths = []
5454
for notebook_path in notebook_dir.rglob("*.ipynb"):
55-
5655
if ".ipynb_checkpoints" in str(notebook_path.resolve()):
5756
# skip jetbrains checkpoints
5857
continue
@@ -115,7 +114,6 @@ def run_notebooks(
115114
nof_errors = 0
116115
# Loop through each notebook
117116
for notebooks_path in notebooks_paths:
118-
119117
# Execute the notebook and capture the error code
120118
cmd = [
121119
"jupyter",

tests/test_elements/test_mol2any/test_mol2concatenated.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ def test_generation(self) -> None:
5252
]
5353

5454
for fp_output_type in fingerprint_morgan_output_types:
55-
5655
concat_vector_element = MolToConcatenatedVector(
5756
[
5857
(

tests/test_estimators/test_leader_picker_clustering.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,6 @@ def test_leader_picker_pipeline(self) -> None:
8787
for dist, exp_labels, exp_centroids in zip(
8888
distances, expected_labels, expected_centroids
8989
):
90-
9190
leader_picker = LeaderPickerClustering(distance_threshold=dist)
9291
pipeline = Pipeline(
9392
[

tests/test_estimators/test_murcko_scaffold_clustering.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
"c1ccccc1",
1515
]
1616

17-
SCAFFOLD_SMILES_TEST_GENERIC: list[str] = SCAFFOLD_SMILES + ["c1ncccc1"]
17+
SCAFFOLD_SMILES_TEST_GENERIC: list[str] = [*SCAFFOLD_SMILES, "c1ncccc1"]
1818

1919
LINEAR_SMILES: list[str] = ["CC", "CCC", "CCCN"]
2020

tests/test_estimators/test_nearest_neighbors.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ def test_fit_predict_precomputed(self) -> None:
154154

155155
def test_fit_predict_invalid(self) -> None:
156156
"""Test the fit_predict method with invalid smiles."""
157-
with_invald_smiles = ["CC1CC"] + TEST_SMILES
157+
with_invald_smiles = ["CC1CC", *TEST_SMILES]
158158

159159
error_filter = ErrorFilter(filter_everything=True)
160160
model = Pipeline(
@@ -174,14 +174,14 @@ def test_fit_predict_invalid(self) -> None:
174174
]
175175
)
176176
result = model.fit_predict(with_invald_smiles, with_invald_smiles).tolist()
177-
self.assertListEqual(result, [["invalid", "invalid"]] + TWO_NN)
177+
self.assertListEqual(result, [["invalid", "invalid"], *TWO_NN])
178178

179179
result_only_valid = model.predict(TEST_SMILES).tolist()
180180
self.assertListEqual(result_only_valid, TWO_NN)
181181

182182
def test_fit_and_predict_invalid_with_distance(self) -> None:
183183
"""Test the fit_predict method with invalid smiles and distance."""
184-
with_invald_smiles = ["CC1CC"] + TEST_SMILES
184+
with_invald_smiles = ["CC1CC", *TEST_SMILES]
185185

186186
error_filter = ErrorFilter(filter_everything=True)
187187
model = Pipeline(
@@ -204,7 +204,7 @@ def test_fit_and_predict_invalid_with_distance(self) -> None:
204204
result = model.predict(with_invald_smiles, **{"return_distance": True})
205205
neighbors = result[:, :, 0]
206206
distances = result[:, :, 1]
207-
self.assertListEqual(neighbors.tolist(), [["invalid", "invalid"]] + TWO_NN)
207+
self.assertListEqual(neighbors.tolist(), [["invalid", "invalid"], *TWO_NN])
208208
self.assertTrue(
209209
1 - np.allclose(distances[1:, :].astype(np.float64), TWO_NN_SIMILARITIES)
210210
)

tests/test_estimators/test_similarity_transformation.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -212,8 +212,8 @@ def test_error_handling(self) -> None:
212212
("error_replacer", PostPredictionWrapper(error_replacer)),
213213
]
214214
)
215-
full_pipeline.fit(COMPOUND_LIST + ["C#C#C"], IS_AROMATIC + [False])
216-
prediction = full_pipeline.predict(COMPOUND_LIST + ["C#C#C"]).tolist()
215+
full_pipeline.fit([*COMPOUND_LIST, "C#C#C"], [*IS_AROMATIC, False])
216+
prediction = full_pipeline.predict([*COMPOUND_LIST, "C#C#C"]).tolist()
217217
self.assertListEqual(prediction[:-1], IS_AROMATIC)
218218
self.assertTrue(np.isnan(prediction[-1]))
219219

tests/test_experimental/test_explainability/test_shap_explainers.py

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,8 @@ def _test_valid_explanation(
8787

8888
self.assertIsInstance(explanation.feature_vector, np.ndarray)
8989
self.assertEqual(
90-
(nof_features,), explanation.feature_vector.shape # type: ignore[union-attr]
90+
(nof_features,),
91+
explanation.feature_vector.shape, # type: ignore[union-attr]
9192
)
9293

9394
# feature names should be a list of not empty strings
@@ -98,7 +99,8 @@ def _test_valid_explanation(
9899
)
99100
)
100101
self.assertEqual(
101-
len(explanation.feature_names), explanation.feature_vector.shape[0] # type: ignore
102+
len(explanation.feature_names), # type: ignore
103+
explanation.feature_vector.shape[0], # type: ignore
102104
)
103105

104106
self.assertIsInstance(explanation.molecule, RDKitMol)
@@ -112,7 +114,8 @@ def _test_valid_explanation(
112114
if is_regressor(estimator):
113115
self.assertTrue((1,), explanation.prediction.shape) # type: ignore[union-attr]
114116
self.assertEqual(
115-
(nof_features,), explanation.feature_weights.shape # type: ignore[union-attr]
117+
(nof_features,),
118+
explanation.feature_weights.shape, # type: ignore[union-attr]
116119
)
117120
elif is_classifier(estimator):
118121
self.assertTrue((2,), explanation.prediction.shape) # type: ignore[union-attr]
@@ -123,21 +126,25 @@ def _test_valid_explanation(
123126
# https://github.com/shap/shap/issues/3177 returning only one feature weight
124127
# which is also based on log odds. This check is a workaround until the bug is fixed.
125128
self.assertEqual(
126-
(nof_features,), explanation.feature_weights.shape # type: ignore[union-attr]
129+
(nof_features,),
130+
explanation.feature_weights.shape, # type: ignore[union-attr]
127131
)
128132
elif isinstance(estimator, SVC):
129133
# SVC seems to be handled differently by SHAP. It returns only a one dimensional
130134
# feature array for binary classification.
131135
self.assertTrue(
132-
(1,), explanation.prediction.shape # type: ignore[union-attr]
136+
(1,),
137+
explanation.prediction.shape, # type: ignore[union-attr]
133138
)
134139
self.assertEqual(
135-
(nof_features,), explanation.feature_weights.shape # type: ignore[union-attr]
140+
(nof_features,),
141+
explanation.feature_weights.shape, # type: ignore[union-attr]
136142
)
137143
else:
138144
# normal binary classification case
139145
self.assertEqual(
140-
(nof_features, 2), explanation.feature_weights.shape # type: ignore[union-attr]
146+
(nof_features, 2),
147+
explanation.feature_weights.shape, # type: ignore[union-attr]
141148
)
142149
else:
143150
raise ValueError("Error in unittest. Unsupported estimator.")
@@ -176,7 +183,6 @@ def test_explanations_fingerprint_pipeline( # pylint: disable=too-many-locals
176183
for estimators, explainer_type in zip(
177184
explainer_estimators, explainer_types, strict=True
178185
):
179-
180186
# test explanations with different estimators
181187
for estimator in estimators:
182188
pipeline = Pipeline(
@@ -241,7 +247,6 @@ def test_explanations_pipeline_with_invalid_inputs(self) -> None:
241247

242248
for estimator in estimators:
243249
for fill_value in fill_values:
244-
245250
# pipeline with ErrorFilter
246251
error_filter1 = ErrorFilter()
247252
pipeline1 = Pipeline(
@@ -271,11 +276,10 @@ def test_explanations_pipeline_with_invalid_inputs(self) -> None:
271276
)
272277

273278
for pipeline in [pipeline1, pipeline2]:
274-
275279
pipeline.fit(TEST_SMILES_WITH_BAD_SMILES, CONTAINS_OX_BAD_SMILES)
276280

277281
explainer = SHAPTreeExplainer(pipeline)
278-
log_block = rdBase.BlockLogs() # pylint: disable=unused-variable
282+
log_block = rdBase.BlockLogs()
279283
explanations = explainer.explain(TEST_SMILES_WITH_BAD_SMILES)
280284
del log_block
281285
self.assertEqual(
@@ -289,7 +293,7 @@ def test_explanations_pipeline_with_invalid_inputs(self) -> None:
289293
self.assertIsNotNone(mol_reader_subpipeline)
290294

291295
for i, explanation in enumerate(explanations):
292-
if i in [3, 7]:
296+
if i in {3, 7}:
293297
self.assertFalse(explanation.is_valid())
294298
continue
295299

tests/test_pipeline.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,6 @@ def test_caching(self) -> None:
233233
for cache_activated in [False, True]:
234234
pipeline = get_exec_counted_rf_regressor(_RANDOM_STATE)
235235
with tempfile.TemporaryDirectory() as temp_dir:
236-
237236
if cache_activated:
238237
cache_dir = Path(temp_dir) / ".cache"
239238
mem = Memory(location=cache_dir, verbose=0)
@@ -300,7 +299,6 @@ def test_gridsearchcv(self) -> None:
300299
]
301300

302301
for test_data_dict in descriptor_elements_to_test:
303-
304302
name = test_data_dict["name"]
305303
element = test_data_dict["element"]
306304
param_grid = test_data_dict["param_grid"]

0 commit comments

Comments
 (0)