Skip to content

Commit 5dd5811

Browse files
jeremiedbbadrinjalaliglemaitre
authored
DOC Release highlights for 1.3 (scikit-learn#26526)
Co-authored-by: adrinjalali <adrin.jalali@gmail.com> Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
1 parent 693a3ac commit 5dd5811

File tree

2 files changed

+159
-0
lines changed

2 files changed

+159
-0
lines changed

doc/whats_new/v1.3.rst

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@ Version 1.3.0
99

1010
**June 2023**
1111

12+
For a short description of the main highlights of the release, please refer to
13+
:ref:`sphx_glr_auto_examples_release_highlights_plot_release_highlights_1_3_0.py`.
14+
1215
.. include:: changelog_legend.inc
1316

1417
Changed models
Lines changed: 156 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,156 @@
1+
# flake8: noqa
2+
"""
3+
=======================================
4+
Release Highlights for scikit-learn 1.3
5+
=======================================
6+
7+
.. currentmodule:: sklearn
8+
9+
We are pleased to announce the release of scikit-learn 1.3! Many bug fixes
10+
and improvements were added, as well as some new key features. We detail
11+
below a few of the major features of this release. **For an exhaustive list of
12+
all the changes**, please refer to the :ref:`release notes <changes_1_3>`.
13+
14+
To install the latest version (with pip)::
15+
16+
pip install --upgrade scikit-learn
17+
18+
or with conda::
19+
20+
conda install -c conda-forge scikit-learn
21+
22+
"""
23+
24+
# %%
25+
# Metadata Routing
26+
# ----------------
27+
# We are in the process of introducing a new way to route metadata such as
28+
# ``sample_weight`` throughout the codebase, which would affect how
29+
# meta-estimators such as :class:`pipeline.Pipeline` and
30+
# :class:`model_selection.GridSearchCV` route metadata. While the
31+
# infrastructure for this feature is already included in this release, the work
32+
# is ongoing and not all meta-estimators support this new feature. You can read
33+
# more about this feature in the :ref:`Metadata Routing User Guide
34+
# <metadata_routing>`. Note that this feature is still under development and
35+
# not implemented for most meta-estimators.
36+
#
37+
# Third party developers can already start incorporating this into their
38+
# meta-estimators. For more details, see
39+
# :ref:`metadata routing developer guide
40+
# <sphx_glr_auto_examples_miscellaneous_plot_metadata_routing.py>`.
41+
42+
# %%
43+
# HDBSCAN: hierarchical density-based clustering
44+
# ----------------------------------------------
45+
# Originally hosted in the scikit-learn-contrib repository, :class:`cluster.HDBSCAN`
46+
# has been adpoted into scikit-learn. It's missing a few features from the original
47+
# implementation which will be added in future releases.
48+
# By performing a modified version of :class:`cluster.DBSCAN` over multiple epsilon
49+
# values simultaneously, :class:`cluster.HDBSCAN` finds clusters of varying densities
50+
# making it more robust to parameter selection than :class:`cluster.DBSCAN`.
51+
# More details in the :ref:`User Guide <hdbscan>`.
52+
import numpy as np
53+
from sklearn.cluster import HDBSCAN
54+
from sklearn.datasets import load_digits
55+
from sklearn.metrics import v_measure_score
56+
57+
X, true_labels = load_digits(return_X_y=True)
58+
print(f"number of digits: {len(np.unique(true_labels))}")
59+
60+
hdbscan = HDBSCAN(min_cluster_size=15).fit(X)
61+
non_noisy_labels = hdbscan.labels_[hdbscan.labels_ != -1]
62+
print(f"number of clusters found: {len(np.unique(non_noisy_labels))}")
63+
64+
print(v_measure_score(true_labels[hdbscan.labels_ != -1], non_noisy_labels))
65+
66+
# %%
67+
# TargetEncoder: a new category encoding strategy
68+
# -----------------------------------------------
69+
# Well suited for categorical features with high cardinality,
70+
# :class:`preprocessing.TargetEncoder` encodes the categories based on a shrunk
71+
# estimate of the average target values for observations belonging to that category.
72+
# More details in the :ref:`User Guide <target_encoder>`.
73+
import numpy as np
74+
from sklearn.preprocessing import TargetEncoder
75+
76+
X = np.array([["cat"] * 30 + ["dog"] * 20 + ["snake"] * 38], dtype=object).T
77+
y = [90.3] * 30 + [20.4] * 20 + [21.2] * 38
78+
79+
enc = TargetEncoder(random_state=0)
80+
X_trans = enc.fit_transform(X, y)
81+
82+
enc.encodings_
83+
84+
# %%
85+
# Missing values support in decision trees
86+
# ----------------------------------------
87+
# The classes :class:`tree.DecisionTreeClassifier` and
88+
# :class:`tree.DecisionTreeRegressor` now support missing values. For each potential
89+
# threshold on the non-missing data, the splitter will evaluate the split with all the
90+
# missing values going to the left node or the right node.
91+
# More details in the :ref:`User Guide <tree_missing_value_support>`.
92+
import numpy as np
93+
from sklearn.tree import DecisionTreeClassifier
94+
95+
X = np.array([0, 1, 6, np.nan]).reshape(-1, 1)
96+
y = [0, 0, 1, 1]
97+
98+
tree = DecisionTreeClassifier(random_state=0).fit(X, y)
99+
tree.predict(X)
100+
101+
# %%
102+
# New display `model_selection.ValidationCurveDisplay`
103+
# ----------------------------------------------------
104+
# :class:`model_selection.ValidationCurveDisplay` is now available to plot results
105+
# from :func:`model_selection.validation_curve`.
106+
from sklearn.datasets import make_classification
107+
from sklearn.linear_model import LogisticRegression
108+
from sklearn.model_selection import ValidationCurveDisplay
109+
110+
X, y = make_classification(1000, 10, random_state=0)
111+
112+
_ = ValidationCurveDisplay.from_estimator(
113+
LogisticRegression(),
114+
X,
115+
y,
116+
param_name="C",
117+
param_range=np.geomspace(1e-5, 1e3, num=9),
118+
score_type="both",
119+
score_name="Accuracy",
120+
)
121+
122+
# %%
123+
# Gamma loss for gradient boosting
124+
# --------------------------------
125+
# The class :class:`ensemble.HistGradientBoostingRegressor` supports the
126+
# Gamma deviance loss function via `loss="gamma"`. This loss function is useful for
127+
# modeling strictly positive targets with a right-skewed distribution.
128+
import numpy as np
129+
from sklearn.model_selection import cross_val_score
130+
from sklearn.datasets import make_low_rank_matrix
131+
from sklearn.ensemble import HistGradientBoostingRegressor
132+
133+
n_samples, n_features = 500, 10
134+
rng = np.random.RandomState(0)
135+
X = make_low_rank_matrix(n_samples, n_features, random_state=rng)
136+
coef = rng.uniform(low=-10, high=20, size=n_features)
137+
y = rng.gamma(shape=2, scale=np.exp(X @ coef) / 2)
138+
gbdt = HistGradientBoostingRegressor(loss="gamma")
139+
cross_val_score(gbdt, X, y).mean()
140+
141+
# %%
142+
# Grouping infrequent categories in :class:`preprocessing.OrdinalEncoder`
143+
# -----------------------------------------------------------------------
144+
# Similarly to :class:`preprocessing.OneHotEncoder`, the class
145+
# :class:`preprocessing.OrdinalEncoder` now supports aggregating infrequent categories
146+
# into a single output for each feature. The parameters to enable the gathering of
147+
# infrequent categories are `min_frequency` and `max_categories`.
148+
# See the :ref:`User Guide <encoder_infrequent_categories>` for more details.
149+
from sklearn.preprocessing import OrdinalEncoder
150+
import numpy as np
151+
152+
X = np.array(
153+
[["dog"] * 5 + ["cat"] * 20 + ["rabbit"] * 10 + ["snake"] * 3], dtype=object
154+
).T
155+
enc = OrdinalEncoder(min_frequency=6).fit(X)
156+
enc.infrequent_categories_

0 commit comments

Comments
 (0)