Skip to content

Commit 5d1a36d

Browse files
committed
higher rtol
1 parent bcb5cbe commit 5d1a36d

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

sklearn/linear_model/_glm/tests/test_glm.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
import numpy as np
99
import pytest
1010
import scipy
11-
from numpy.testing import assert_allclose
1211
from scipy import linalg
1312
from scipy.optimize import minimize, root
1413

@@ -28,6 +27,7 @@
2827
from sklearn.linear_model._linear_loss import LinearModelLoss
2928
from sklearn.metrics import d2_tweedie_score, mean_poisson_deviance
3029
from sklearn.model_selection import train_test_split
30+
from sklearn.utils._testing import assert_allclose
3131

3232
SOLVERS = ["lbfgs", "newton-cholesky"]
3333

@@ -636,11 +636,11 @@ def test_glm_identity_regression(fit_intercept):
636636
)
637637
if fit_intercept:
638638
glm.fit(X[:, 1:], y)
639-
assert_allclose(glm.coef_, coef[1:], rtol=1e-10)
640-
assert_allclose(glm.intercept_, coef[0], rtol=1e-10)
639+
assert_allclose(glm.coef_, coef[1:])
640+
assert_allclose(glm.intercept_, coef[0])
641641
else:
642642
glm.fit(X, y)
643-
assert_allclose(glm.coef_, coef, rtol=1e-12)
643+
assert_allclose(glm.coef_, coef)
644644

645645

646646
@pytest.mark.parametrize("fit_intercept", [False, True])
@@ -663,12 +663,12 @@ def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator):
663663
# sample_weight=np.ones(..) should be equivalent to sample_weight=None
664664
sample_weight = np.ones(y.shape)
665665
glm.fit(X, y, sample_weight=sample_weight)
666-
assert_allclose(glm.coef_, coef, rtol=1e-12)
666+
assert_allclose(glm.coef_, coef)
667667

668668
# sample_weight are normalized to 1 so, scaling them has no effect
669669
sample_weight = 2 * np.ones(y.shape)
670670
glm.fit(X, y, sample_weight=sample_weight)
671-
assert_allclose(glm.coef_, coef, rtol=1e-12)
671+
assert_allclose(glm.coef_, coef)
672672

673673
# setting one element of sample_weight to 0 is equivalent to removing
674674
# the corresponding sample
@@ -677,7 +677,7 @@ def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator):
677677
glm.fit(X, y, sample_weight=sample_weight)
678678
coef1 = glm.coef_.copy()
679679
glm.fit(X[:-1], y[:-1])
680-
assert_allclose(glm.coef_, coef1, rtol=1e-12)
680+
assert_allclose(glm.coef_, coef1)
681681

682682
# check that multiplying sample_weight by 2 is equivalent
683683
# to repeating corresponding samples twice

0 commit comments

Comments
 (0)