Skip to content

Commit 0ac76c4

Browse files
committed
Adjust tols in test_glm_sample_weight_consistency to avoid platform specific failures caused by rounding errors
1 parent 7e74797 commit 0ac76c4

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

sklearn/linear_model/_glm/tests/test_glm.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -656,19 +656,20 @@ def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator):
656656
X = rng.rand(n_samples, n_features)
657657
y = rng.rand(n_samples)
658658
glm_params = dict(alpha=alpha, fit_intercept=fit_intercept)
659+
tols = dict(rtol=1e-12, atol=1e-14)
659660

660661
glm = GLMEstimator(**glm_params).fit(X, y)
661662
coef = glm.coef_.copy()
662663

663664
# sample_weight=np.ones(..) should be equivalent to sample_weight=None
664665
sample_weight = np.ones(y.shape)
665666
glm.fit(X, y, sample_weight=sample_weight)
666-
assert_allclose(glm.coef_, coef, rtol=1e-12)
667+
assert_allclose(glm.coef_, coef, **tols)
667668

668669
# sample_weight are normalized to 1 so, scaling them has no effect
669670
sample_weight = 2 * np.ones(y.shape)
670671
glm.fit(X, y, sample_weight=sample_weight)
671-
assert_allclose(glm.coef_, coef, rtol=1e-12)
672+
assert_allclose(glm.coef_, coef, **tols)
672673

673674
# setting one element of sample_weight to 0 is equivalent to removing
674675
# the corresponding sample
@@ -677,7 +678,7 @@ def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator):
677678
glm.fit(X, y, sample_weight=sample_weight)
678679
coef1 = glm.coef_.copy()
679680
glm.fit(X[:-1], y[:-1])
680-
assert_allclose(glm.coef_, coef1, rtol=1e-12)
681+
assert_allclose(glm.coef_, coef1, **tols)
681682

682683
# check that multiplying sample_weight by 2 is equivalent
683684
# to repeating corresponding samples twice
@@ -687,9 +688,8 @@ def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator):
687688
sample_weight_1[: n_samples // 2] = 2
688689

689690
glm1 = GLMEstimator(**glm_params).fit(X, y, sample_weight=sample_weight_1)
690-
691691
glm2 = GLMEstimator(**glm_params).fit(X2, y2, sample_weight=None)
692-
assert_allclose(glm1.coef_, glm2.coef_)
692+
assert_allclose(glm1.coef_, glm2.coef_, **tols)
693693

694694

695695
@pytest.mark.parametrize("solver", SOLVERS)

0 commit comments

Comments
 (0)