Skip to content

Commit 9eea5b7

Browse files
MAINT unpack 0-dim NumPy array instead of implicit conversion (scikit-learn#26345)
Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
1 parent 66733c4 commit 9eea5b7

File tree

7 files changed

+18
-10
lines changed

7 files changed

+18
-10
lines changed

sklearn/linear_model/_logistic.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -506,6 +506,9 @@ def _logistic_regression_path(
506506
w0 = np.concatenate([coef_.ravel(), intercept_])
507507
else:
508508
w0 = coef_.ravel()
509+
# n_iter_i is an array for each class. However, `target` is always encoded
510+
# in {-1, 1}, so we only take the first element of n_iter_i.
511+
n_iter_i = n_iter_i.item()
509512

510513
elif solver in ["sag", "saga"]:
511514
if multi_class == "multinomial":

sklearn/linear_model/tests/test_sag.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ def sag(
9595

9696
for epoch in range(n_iter):
9797
for k in range(n_samples):
98-
idx = int(rng.rand(1) * n_samples)
98+
idx = int(rng.rand() * n_samples)
9999
# idx = k
100100
entry = X[idx]
101101
seen.add(idx)
@@ -167,7 +167,7 @@ def sag_sparse(
167167
for epoch in range(n_iter):
168168
for k in range(n_samples):
169169
# idx = k
170-
idx = int(rng.rand(1) * n_samples)
170+
idx = int(rng.rand() * n_samples)
171171
entry = X[idx]
172172
seen.add(idx)
173173

sklearn/metrics/tests/test_pairwise.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,8 @@ def test_pairwise_precomputed_non_negative():
298298
def callable_rbf_kernel(x, y, **kwds):
299299
# Callable version of pairwise.rbf_kernel.
300300
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
301-
return K
301+
# unpack the output since this is a scalar packed in a 0-dim array
302+
return K.item()
302303

303304

304305
@pytest.mark.parametrize(

sklearn/mixture/tests/test_bayesian_mixture.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def test_log_wishart_norm():
5858
),
5959
0,
6060
)
61-
)
61+
).item()
6262
predected_norm = _log_wishart_norm(
6363
degrees_of_freedom, log_det_precisions_chol, n_features
6464
)

sklearn/tests/test_discriminant_analysis.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ def generate_dataset(n_samples, centers, covariances, random_state=None):
187187
sample = np.array([[-22, 22]])
188188

189189
def discriminant_func(sample, coef, intercept, clazz):
190-
return np.exp(intercept[clazz] + np.dot(sample, coef[clazz]))
190+
return np.exp(intercept[clazz] + np.dot(sample, coef[clazz])).item()
191191

192192
prob = np.array(
193193
[

sklearn/tree/_export.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
# Li Li <aiki.nogard@gmail.com>
1212
# Giuseppe Vettigli <vettigli@gmail.com>
1313
# License: BSD 3 clause
14+
from collections.abc import Iterable
1415
from io import StringIO
1516
from numbers import Integral
1617

@@ -247,7 +248,7 @@ def get_color(self, value):
247248
color = list(self.colors["rgb"][np.argmax(value)])
248249
sorted_values = sorted(value, reverse=True)
249250
if len(sorted_values) == 1:
250-
alpha = 0
251+
alpha = 0.0
251252
else:
252253
alpha = (sorted_values[0] - sorted_values[1]) / (1 - sorted_values[1])
253254
else:
@@ -256,8 +257,6 @@ def get_color(self, value):
256257
alpha = (value - self.colors["bounds"][0]) / (
257258
self.colors["bounds"][1] - self.colors["bounds"][0]
258259
)
259-
# unpack numpy scalars
260-
alpha = float(alpha)
261260
# compute the color as alpha against white
262261
color = [int(round(alpha * c + (1 - alpha) * 255, 0)) for c in color]
263262
# Return html color code in #RRGGBB format
@@ -277,8 +276,12 @@ def get_fill_color(self, tree, node_id):
277276
if tree.n_outputs == 1:
278277
node_val = tree.value[node_id][0, :] / tree.weighted_n_node_samples[node_id]
279278
if tree.n_classes[0] == 1:
280-
# Regression
279+
# Regression or degraded classification with single class
281280
node_val = tree.value[node_id][0, :]
281+
if isinstance(node_val, Iterable) and self.colors["bounds"] is not None:
282+
# Only unpack the float only for the regression tree case.
283+
# Classification tree requires an Iterable in `get_color`.
284+
node_val = node_val.item()
282285
else:
283286
# If multi-output color node by impurity
284287
node_val = -tree.impurity[node_id]

sklearn/utils/random.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,8 @@ def _random_choice_csc(n_samples, classes, class_probability=None, random_state=
7575
# If there are nonzero classes choose randomly using class_probability
7676
rng = check_random_state(random_state)
7777
if classes[j].shape[0] > 1:
78-
p_nonzero = 1 - class_prob_j[classes[j] == 0]
78+
index_class_0 = np.flatnonzero(classes[j] == 0).item()
79+
p_nonzero = 1 - class_prob_j[index_class_0]
7980
nnz = int(n_samples * p_nonzero)
8081
ind_sample = sample_without_replacement(
8182
n_population=n_samples, n_samples=nnz, random_state=random_state

0 commit comments

Comments
 (0)