@@ -43,10 +43,10 @@ and will store the coefficients :math:`w` of the linear model in its
43
43
``coef_ `` member::
44
44
45
45
>>> from sklearn import linear_model
46
- >>> clf = linear_model.LinearRegression()
47
- >>> clf .fit ([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
46
+ >>> reg = linear_model.LinearRegression()
47
+ >>> reg .fit ([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
48
48
LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False)
49
- >>> clf .coef_
49
+ >>> reg .coef_
50
50
array([ 0.5, 0.5])
51
51
52
52
However, coefficient estimates for Ordinary Least Squares rely on the
@@ -101,13 +101,13 @@ arrays X, y and will store the coefficients :math:`w` of the linear model in
101
101
its ``coef_ `` member::
102
102
103
103
>>> from sklearn import linear_model
104
- >>> clf = linear_model.Ridge (alpha = .5)
105
- >>> clf .fit ([[0, 0], [0, 0], [1, 1]], [0, .1, 1]) # doctest: +NORMALIZE_WHITESPACE
104
+ >>> reg = linear_model.Ridge (alpha = .5)
105
+ >>> reg .fit ([[0, 0], [0, 0], [1, 1]], [0, .1, 1]) # doctest: +NORMALIZE_WHITESPACE
106
106
Ridge(alpha=0.5, copy_X=True, fit_intercept=True, max_iter=None,
107
107
normalize=False, random_state=None, solver='auto', tol=0.001)
108
- >>> clf .coef_
108
+ >>> reg .coef_
109
109
array([ 0.34545455, 0.34545455])
110
- >>> clf .intercept_ #doctest: +ELLIPSIS
110
+ >>> reg .intercept_ #doctest: +ELLIPSIS
111
111
0.13636...
112
112
113
113
@@ -138,11 +138,11 @@ as GridSearchCV except that it defaults to Generalized Cross-Validation
138
138
(GCV), an efficient form of leave-one-out cross-validation::
139
139
140
140
>>> from sklearn import linear_model
141
- >>> clf = linear_model.RidgeCV(alphas=[0.1, 1.0, 10.0])
142
- >>> clf .fit([[0, 0], [0, 0], [1, 1]], [0, .1, 1]) # doctest: +SKIP
141
+ >>> reg = linear_model.RidgeCV(alphas=[0.1, 1.0, 10.0])
142
+ >>> reg .fit([[0, 0], [0, 0], [1, 1]], [0, .1, 1]) # doctest: +SKIP
143
143
RidgeCV(alphas=[0.1, 1.0, 10.0], cv=None, fit_intercept=True, scoring=None,
144
144
normalize=False)
145
- >>> clf .alpha_ # doctest: +SKIP
145
+ >>> reg .alpha_ # doctest: +SKIP
146
146
0.1
147
147
148
148
.. topic :: References
@@ -182,12 +182,12 @@ the algorithm to fit the coefficients. See :ref:`least_angle_regression`
182
182
for another implementation::
183
183
184
184
>>> from sklearn import linear_model
185
- >>> clf = linear_model.Lasso(alpha = 0.1)
186
- >>> clf .fit([[0, 0], [1, 1]], [0, 1])
185
+ >>> reg = linear_model.Lasso(alpha = 0.1)
186
+ >>> reg .fit([[0, 0], [1, 1]], [0, 1])
187
187
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
188
188
normalize=False, positive=False, precompute=False, random_state=None,
189
189
selection='cyclic', tol=0.0001, warm_start=False)
190
- >>> clf .predict([[1, 1]])
190
+ >>> reg .predict([[1, 1]])
191
191
array([ 0.8])
192
192
193
193
Also useful for lower-level tasks is the function :func: `lasso_path ` that
@@ -441,12 +441,12 @@ function of the norm of its coefficients.
441
441
::
442
442
443
443
>>> from sklearn import linear_model
444
- >>> clf = linear_model.LassoLars(alpha=.1)
445
- >>> clf .fit([[0, 0], [1, 1]], [0, 1]) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
444
+ >>> reg = linear_model.LassoLars(alpha=.1)
445
+ >>> reg .fit([[0, 0], [1, 1]], [0, 1]) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
446
446
LassoLars(alpha=0.1, copy_X=True, eps=..., fit_intercept=True,
447
447
fit_path=True, max_iter=500, normalize=True, positive=False,
448
448
precompute='auto', verbose=False)
449
- >>> clf .coef_ # doctest: +ELLIPSIS
449
+ >>> reg .coef_ # doctest: +ELLIPSIS
450
450
array([ 0.717157..., 0. ])
451
451
452
452
.. topic :: Examples:
@@ -604,21 +604,21 @@ Bayesian Ridge Regression is used for regression::
604
604
>>> from sklearn import linear_model
605
605
>>> X = [[0., 0.], [1., 1.], [2., 2.], [3., 3.]]
606
606
>>> Y = [0., 1., 2., 3.]
607
- >>> clf = linear_model.BayesianRidge()
608
- >>> clf .fit(X, Y)
607
+ >>> reg = linear_model.BayesianRidge()
608
+ >>> reg .fit(X, Y)
609
609
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False, copy_X=True,
610
610
fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06, n_iter=300,
611
611
normalize=False, tol=0.001, verbose=False)
612
612
613
613
After being fitted, the model can then be used to predict new values::
614
614
615
- >>> clf .predict ([[1, 0.]])
615
+ >>> reg .predict ([[1, 0.]])
616
616
array([ 0.50000013])
617
617
618
618
619
619
The weights :math: `w` of the model can be access::
620
620
621
- >>> clf .coef_
621
+ >>> reg .coef_
622
622
array([ 0.49999993, 0.49999993])
623
623
624
624
Due to the Bayesian framework, the weights found are slightly different to the
@@ -1233,12 +1233,19 @@ This way, we can solve the XOR problem with a linear classifier::
1233
1233
>>> import numpy as np
1234
1234
>>> X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
1235
1235
>>> y = X[:, 0] ^ X[:, 1]
1236
- >>> X = PolynomialFeatures(interaction_only=True).fit_transform(X)
1236
+ >>> y
1237
+ array([0, 1, 1, 0])
1238
+ >>> X = PolynomialFeatures(interaction_only=True).fit_transform(X).astype(int)
1237
1239
>>> X
1238
- array([[ 1., 0., 0., 0. ],
1239
- [ 1., 0., 1., 0. ],
1240
- [ 1., 1., 0., 0. ],
1241
- [ 1., 1., 1., 1. ]])
1240
+ array([[1, 0, 0, 0 ],
1241
+ [1, 0, 1, 0 ],
1242
+ [1, 1, 0, 0 ],
1243
+ [1, 1, 1, 1 ]])
1242
1244
>>> clf = Perceptron(fit_intercept=False, n_iter=10, shuffle=False).fit(X, y)
1245
+
1246
+ And the classifier "predictions" are perfect::
1247
+
1248
+ >>> clf.predict(X)
1249
+ array([0, 1, 1, 0])
1243
1250
>>> clf.score(X, y)
1244
1251
1.0
0 commit comments