Skip to content

Commit 3fe3aa5

Browse files
BordaSkafteNicki
andauthored
releasing 1.7.0 (#3023)
* releasing `1.7.0` * fix tests for tracker * fix docs --------- Co-authored-by: Nicki Skafte <skaftenicki@gmail.com>
1 parent 9f4a001 commit 3fe3aa5

File tree

7 files changed

+26
-62
lines changed

7 files changed

+26
-62
lines changed

CHANGELOG.md

Lines changed: 10 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -6,47 +6,34 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
66

77
**Note: we move fast, but still we preserve 0.1 version (one feature release) back compatibility.**
88

9+
910
---
1011

11-
## [UnReleased] - 2024-MM-DD
12+
## [1.7.0] - 2025-03-20
1213

1314
### Added
1415

16+
- Additions to image domain:
17+
- Added `ARNIQA` metric ([#2953](https://github.com/Lightning-AI/torchmetrics/pull/2953))
18+
- Added `DeepImageStructureAndTextureSimilarity` ([#2993](https://github.com/Lightning-AI/torchmetrics/pull/2993))
19+
- Added support for more models and processors in `CLIPScore` ([#2978](https://github.com/Lightning-AI/torchmetrics/pull/2978))
1520
- Added `JensenShannonDivergence` metric to regression package ([#2992](https://github.com/Lightning-AI/torchmetrics/pull/2992))
16-
17-
1821
- Added `ClusterAccuracy` metric to cluster package ([#2777](https://github.com/Lightning-AI/torchmetrics/pull/2777))
19-
20-
21-
- Added `ARNIQA` metric to image domain ([#2953](https://github.com/PyTorchLightning/metrics/pull/2953))
22-
23-
24-
- Added support for more models and processors in `CLIPScore` ([#2978](https://github.com/PyTorchLightning/metrics/pull/2978))
25-
26-
27-
- Added `DeepImageStructureAndTextureSimilarity` to image package ([#2993](https://github.com/PyTorchLightning/metrics/pull/2993))
28-
29-
30-
- Added `Equal Error Rate (EER)` to classification package ([#3013](https://github.com/PyTorchLightning/metrics/pull/3013))
31-
32-
22+
- Added `Equal Error Rate (EER)` to classification package ([#3013](https://github.com/Lightning-AI/torchmetrics/pull/3013))
3323
- Added functional interface to `MeanAveragePrecision` metric ([#3011](https://github.com/Lightning-AI/torchmetrics/pull/3011))
3424

35-
3625
### Changed
3726

3827
- Making `num_classes` optional for `one-hot` inputs in `MeanIoU` ([#3012](https://github.com/Lightning-AI/torchmetrics/pull/3012))
3928

40-
4129
### Removed
4230

43-
- Removed `Dice` from classification ([#3017](https://github.com/PyTorchLightning/metrics/pull/3017))
44-
31+
- Removed `Dice` from classification ([#3017](https://github.com/Lightning-AI/torchmetrics/pull/3017))
4532

4633
### Fixed
4734

48-
- Fix edge case in integration between classwise wrapper and metric tracker ([#3008](https://github.com/Lightning-AI/torchmetrics/pull/3008))
49-
- Fix IndexError in MultiClassAccuracy when using top_k with single sample ([#3021](https://github.com/Lightning-AI/torchmetrics/pull/3021))
35+
- Fixed edge case in integration between class-wise wrapper and metric tracker ([#3008](https://github.com/Lightning-AI/torchmetrics/pull/3008))
36+
- Fixed `IndexError` in `MultiClassAccuracy` when using `top_k` with single sample ([#3021](https://github.com/Lightning-AI/torchmetrics/pull/3021))
5037

5138
---
5239

docs/source/pages/plotting.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ to rely on ``MetricTracker`` to keep track of the metrics over multiple steps.
224224
)
225225
226226
# Define tracker over the collection to easy keep track of the metrics over multiple steps
227-
tracker = torchmetrics.wrappers.MetricTracker(collection)
227+
tracker = torchmetrics.wrappers.MetricTracker(collection, maximize=True)
228228
229229
# Run "training" loop
230230
for step in range(num_steps):

docs/source/pyplots/tracker_binary.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,8 @@
2020
torchmetrics.Precision(task="binary"),
2121
confmat,
2222
roc,
23-
)
23+
),
24+
maximize=True,
2425
)
2526

2627
fig = plt.figure(layout="constrained", figsize=(6.8, 4.8), dpi=500)

src/torchmetrics/__about__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
__version__ = "1.7.0dev"
1+
__version__ = "1.7.0"
22
__author__ = "Lightning-AI et al."
33
__author_email__ = "name@pytorchlightning.ai"
44
__license__ = "Apache-2.0"

src/torchmetrics/clustering/davies_bouldin_score.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_
119119
>>> import torch
120120
>>> from torchmetrics.clustering import DaviesBouldinScore
121121
>>> metric = DaviesBouldinScore()
122-
>>> metric.update(torch.randn(10, 3), torch.randint(0, 2, (10,)))
122+
>>> metric.update(torch.randn(20, 3), torch.randint(0, 2, (20,)))
123123
>>> fig_, ax_ = metric.plot(metric.compute())
124124
125125
.. plot::
@@ -131,7 +131,7 @@ def plot(self, val: Union[Tensor, Sequence[Tensor], None] = None, ax: Optional[_
131131
>>> metric = DaviesBouldinScore()
132132
>>> values = [ ]
133133
>>> for _ in range(10):
134-
... values.append(metric(torch.randn(10, 3), torch.randint(0, 2, (10,))))
134+
... values.append(metric(torch.randn(20, 3), torch.randint(0, 2, (20,))))
135135
>>> fig_, ax_ = metric.plot(values)
136136
137137
"""

src/torchmetrics/wrappers/tracker.py

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,7 @@ class MetricTracker(ModuleList):
108108
maximize: Union[bool, list[bool]]
109109
_base_metric: Union[Metric, MetricCollection]
110110

111-
def __init__(
112-
self, metric: Union[Metric, MetricCollection], maximize: Optional[Union[bool, list[bool]]] = True
113-
) -> None:
111+
def __init__(self, metric: Union[Metric, MetricCollection], maximize: Union[bool, list[bool], None] = None) -> None:
114112
super().__init__()
115113
if not isinstance(metric, (Metric, MetricCollection)):
116114
raise TypeError(
@@ -140,14 +138,10 @@ def __init__(
140138
m_higher_is_better = [m.higher_is_better]
141139
self.maximize.extend(m_higher_is_better) # type: ignore[arg-type] # this is false alarm
142140
else:
143-
rank_zero_warn(
144-
"The default value for `maximize` will be changed from `True` to `None` in v1.7.0 of TorchMetrics,"
145-
"will automatically infer the value based on the `higher_is_better` attribute of the metric"
146-
" (if such attribute exists) or raise an error if it does not. If you are explicitly setting the"
147-
" `maximize` argument to either `True` or `False` already, you can ignore this warning.",
148-
FutureWarning,
149-
)
150-
141+
# The default value for `maximize` has be changed from `True` to `None` in v1.7.0 of TorchMetrics,
142+
# will automatically infer the value based on the `higher_is_better` attribute of the metric
143+
# (if such attribute exists) or raise an error if it does not. If you are explicitly setting the
144+
# `maximize` argument to either `True` or `False` already, you can ignore this warning.
151145
if not isinstance(maximize, (bool, list)):
152146
raise ValueError("Argument `maximize` should either be a single bool or list of bool")
153147
if isinstance(maximize, list) and not all(isinstance(m, bool) for m in maximize):
@@ -342,7 +336,7 @@ def plot(
342336
>>> import torch
343337
>>> from torchmetrics.wrappers import MetricTracker
344338
>>> from torchmetrics.classification import BinaryAccuracy
345-
>>> tracker = MetricTracker(BinaryAccuracy())
339+
>>> tracker = MetricTracker(BinaryAccuracy(), maximize=True)
346340
>>> for epoch in range(5):
347341
... tracker.increment()
348342
... for batch_idx in range(5):

tests/unittests/wrappers/test_tracker.py

Lines changed: 4 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import warnings
1615

1716
import pytest
1817
import torch
@@ -25,7 +24,6 @@
2524
MulticlassRecall,
2625
)
2726
from torchmetrics.regression import MeanAbsoluteError, MeanSquaredError
28-
from torchmetrics.utilities.imports import _TORCHMETRICS_GREATER_EQUAL_1_6
2927
from torchmetrics.wrappers import ClasswiseWrapper, MetricTracker, MultioutputWrapper
3028
from unittests._helpers import seed_all
3129

@@ -154,8 +152,8 @@ def test_tracker(base_metric, metric_input, maximize):
154152
@pytest.mark.parametrize(
155153
"base_metric",
156154
[
157-
MulticlassConfusionMatrix(3),
158-
MetricCollection([MulticlassConfusionMatrix(3), MulticlassAccuracy(3)]),
155+
pytest.param(MulticlassConfusionMatrix(3), id="Multiclass-confusion-matrix"),
156+
pytest.param(MetricCollection([MulticlassConfusionMatrix(3), MulticlassAccuracy(3)]), id="Metric-collection"),
159157
],
160158
)
161159
def test_best_metric_for_not_well_defined_metric_collection(base_metric):
@@ -165,7 +163,7 @@ def test_best_metric_for_not_well_defined_metric_collection(base_metric):
165163
warning and return None.
166164
167165
"""
168-
tracker = MetricTracker(base_metric)
166+
tracker = MetricTracker(base_metric, maximize=True)
169167
for _ in range(3):
170168
tracker.increment()
171169
for _ in range(5):
@@ -207,7 +205,7 @@ def test_best_metric_for_not_well_defined_metric_collection(base_metric):
207205
)
208206
def test_metric_tracker_and_collection_multioutput(input_to_tracker, assert_type):
209207
"""Check that MetricTracker support wrapper inputs and nested structures."""
210-
tracker = MetricTracker(input_to_tracker)
208+
tracker = MetricTracker(input_to_tracker, maximize=False)
211209
for _ in range(5):
212210
tracker.increment()
213211
for _ in range(5):
@@ -226,22 +224,6 @@ def test_metric_tracker_and_collection_multioutput(input_to_tracker, assert_type
226224
assert which_epoch is None
227225

228226

229-
def test_tracker_futurewarning():
230-
"""Check that future warning is raised for the maximize argument.
231-
232-
Also to make sure that we remove it in future versions of TM.
233-
234-
"""
235-
if _TORCHMETRICS_GREATER_EQUAL_1_6:
236-
# Check that for future versions that we remove the warning
237-
with warnings.catch_warnings():
238-
warnings.simplefilter("error")
239-
MetricTracker(MeanSquaredError(), maximize=True)
240-
else:
241-
with pytest.warns(FutureWarning, match="The default value for `maximize` will be changed from `True` to.*"):
242-
MetricTracker(MeanSquaredError(), maximize=True)
243-
244-
245227
@pytest.mark.parametrize(
246228
"base_metric",
247229
[

0 commit comments

Comments
 (0)