Skip to content

🔧 Maintenance and small improvements #114

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Sep 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 12 additions & 1 deletion docs/source/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,6 @@ Others
:nosignatures:
:template: class.rst

AUSE
GroupingLoss

Regression
Expand Down Expand Up @@ -294,6 +293,18 @@ Segmentation

MeanIntersectionOverUnion

Others
^^^^^^

.. currentmodule:: torch_uncertainty.metrics

.. autosummary::
:toctree: generated/
:nosignatures:
:template: class.rst

AUSE

Losses
------

Expand Down
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
f"{datetime.now().year!s}, Adrien Lafage and Olivier Laurent"
)
author = "Adrien Lafage and Olivier Laurent"
release = "0.2.2"
release = "0.2.2.post0"

# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
Expand Down
5 changes: 3 additions & 2 deletions docs/source/quickstart.rst
Original file line number Diff line number Diff line change
Expand Up @@ -160,10 +160,11 @@ backbone with the following code:

.. code:: python

from torch_uncertainty.models.resnet import packed_resnet18
from torch_uncertainty.models.resnet import packed_resnet

model = packed_resnet18(
model = packed_resnet(
in_channels = 3,
arch=18,
num_estimators = 4,
alpha = 2,
gamma = 2,
Expand Down
2 changes: 1 addition & 1 deletion docs/source/references.rst
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ For the conflictual loss, consider citing:
**On the Calibration of Epistemic Uncertainty: Principles, Paradoxes and Conflictual Loss**

* Authors: *Mohammed Fellaji, Frédéric Pennerath, Brieuc Conan-Guez, and Miguel Couceiro*
* Paper: `ArXiv 2024 <https://arxiv.org/pdf/2407.12211`__.
* Paper: `ArXiv 2024 <https://arxiv.org/pdf/2407.12211>`__.

Metrics
-------
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "flit_core.buildapi"

[project]
name = "torch_uncertainty"
version = "0.2.2"
version = "0.2.2.post0"
authors = [
{ name = "ENSTA U2IS", email = "olivier.laurent@ensta-paris.fr" },
{ name = "Adrien Lafage", email = "adrienlafage@outlook.com" },
Expand Down
13 changes: 0 additions & 13 deletions pyrightconfig.json

This file was deleted.

2 changes: 1 addition & 1 deletion tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import pytest
import torch
from huggingface_hub.utils._errors import (
from huggingface_hub.errors import (
HfHubHTTPError,
RepositoryNotFoundError,
)
Expand Down
3 changes: 3 additions & 0 deletions torch_uncertainty/baselines/classification/resnet.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from typing import Literal

from torch import nn
from torch.optim import Optimizer

from torch_uncertainty.models import mc_dropout
from torch_uncertainty.models.resnet import (
Expand Down Expand Up @@ -55,6 +56,7 @@ def __init__(
normalization_layer: type[nn.Module] = nn.BatchNorm2d,
num_estimators: int = 1,
dropout_rate: float = 0.0,
optim_recipe: dict | Optimizer | None = None,
mixup_params: dict | None = None,
last_layer_dropout: bool = False,
width_multiplier: float = 1.0,
Expand Down Expand Up @@ -229,6 +231,7 @@ def __init__(
model=model,
loss=loss,
is_ensemble=version in ENSEMBLE_METHODS,
optim_recipe=optim_recipe,
format_batch_fn=format_batch_fn,
mixup_params=mixup_params,
eval_ood=eval_ood,
Expand Down
7 changes: 7 additions & 0 deletions torch_uncertainty/baselines/classification/vgg.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from typing import Literal

from torch import nn
from torch.optim import Optimizer

from torch_uncertainty.models import mc_dropout
from torch_uncertainty.models.vgg import (
Expand Down Expand Up @@ -32,6 +33,7 @@ def __init__(
num_estimators: int = 1,
dropout_rate: float = 0.0,
last_layer_dropout: bool = False,
optim_recipe: dict | Optimizer | None = None,
mixup_params: dict | None = None,
groups: int = 1,
alpha: int | None = None,
Expand All @@ -52,6 +54,10 @@ def __init__(
num_classes (int): Number of classes to predict.
in_channels (int): Number of input channels.
loss (nn.Module): Training loss.
optim_recipe (Any): optimization recipe, corresponds to
what expect the `LightningModule.configure_optimizers()
<https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#configure-optimizers>`_
method.
version (str):
Determines which VGG version to use:

Expand Down Expand Up @@ -164,6 +170,7 @@ def __init__(
loss=loss,
is_ensemble=version in ENSEMBLE_METHODS,
format_batch_fn=format_batch_fn,
optim_recipe=optim_recipe,
mixup_params=mixup_params,
eval_ood=eval_ood,
ood_criterion=ood_criterion,
Expand Down
3 changes: 3 additions & 0 deletions torch_uncertainty/baselines/classification/wideresnet.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from typing import Literal

from torch import nn
from torch.optim import Optimizer

from torch_uncertainty.models import mc_dropout
from torch_uncertainty.models.wideresnet import (
Expand Down Expand Up @@ -39,6 +40,7 @@ def __init__(
style: str = "imagenet",
num_estimators: int = 1,
dropout_rate: float = 0.0,
optim_recipe: dict | Optimizer | None = None,
mixup_params: dict | None = None,
groups: int = 1,
last_layer_dropout: bool = False,
Expand Down Expand Up @@ -186,6 +188,7 @@ def __init__(
loss=loss,
is_ensemble=version in ENSEMBLE_METHODS,
format_batch_fn=format_batch_fn,
optim_recipe=optim_recipe,
mixup_params=mixup_params,
eval_ood=eval_ood,
eval_grouping_loss=eval_grouping_loss,
Expand Down
1 change: 0 additions & 1 deletion torch_uncertainty/datamodules/segmentation/cityscapes.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,6 @@ def __init__(
pin_memory=pin_memory,
persistent_workers=persistent_workers,
)

self.dataset = Cityscapes
self.mode = "fine"
self.crop_size = _pair(crop_size)
Expand Down
45 changes: 43 additions & 2 deletions torch_uncertainty/datasets/segmentation/cityscapes.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from collections.abc import Callable
from typing import Any

import torch
Expand All @@ -10,7 +11,35 @@


class Cityscapes(TVCityscapes):
def encode_target(self, target: Image.Image) -> Image.Image:
def __init__(
self,
root: str,
split: str = "train",
mode: str = "fine",
target_type: torch.List[str] | str = "instance",
transform: Callable[..., Any] | None = None,
target_transform: Callable[..., Any] | None = None,
transforms: Callable[..., Any] | None = None,
) -> None:
super().__init__(
root,
split,
mode,
target_type,
transform,
target_transform,
transforms,
)
train_id_to_color = [
c.color
for c in self.classes
if (c.train_id != -1 and c.train_id != 255)
]
train_id_to_color.append([0, 0, 0])
self.train_id_to_color = torch.tensor(train_id_to_color)

@classmethod
def encode_target(cls, target: Image.Image) -> Image.Image:
"""Encode target image to tensor.

Args:
Expand All @@ -23,7 +52,7 @@ def encode_target(self, target: Image.Image) -> Image.Image:
colored_target = rearrange(colored_target, "c h w -> h w c")
target = torch.zeros_like(colored_target[..., :1])
# convert target color to index
for cityscapes_class in self.classes:
for cityscapes_class in cls.classes:
target[
(
colored_target
Expand All @@ -33,6 +62,18 @@ def encode_target(self, target: Image.Image) -> Image.Image:

return F.to_pil_image(rearrange(target, "h w c -> c h w"))

def decode_target(self, target: torch.Tensor) -> torch.Tensor:
"""Decode target tensor to RGB tensor.

Args:
target (torch.Tensor): Target RGB tensor.

Returns:
Image.Image: Decoded target.
"""
target[target == 255] = -1
return self.train_id_to_color[target]

def __getitem__(self, index: int) -> tuple[Any, Any]:
"""Get the sample at the given index.

Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
from .classification import (
AUGRC,
AURC,
AUSE,
FPR95,
AdaptiveCalibrationError,
BrierScore,
Expand All @@ -29,3 +28,4 @@
SILog,
ThresholdAccuracy,
)
from .sparsification import AUSE
1 change: 0 additions & 1 deletion torch_uncertainty/metrics/classification/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,5 +17,4 @@
RiskAt80Cov,
RiskAtxCov,
)
from .sparsification import AUSE
from .variation_ratio import VariationRatio
4 changes: 2 additions & 2 deletions torch_uncertainty/metrics/classification/risk_coverage.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def plot(
ax.set_xlabel("Coverage (%)", fontsize=16)
ax.set_ylabel("Risk - Error Rate (%)", fontsize=16)
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
ax.set_ylim(0, min(100, np.ceil(error_rates.max() * 100)))
ax.set_aspect("equal", "box")
ax.legend(loc="upper right")
fig.tight_layout()
Expand Down Expand Up @@ -269,7 +269,7 @@ def plot(
ax.set_xlabel("Coverage (%)", fontsize=16)
ax.set_ylabel("Generalized Risk (%)", fontsize=16)
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
ax.set_ylim(0, min(100, np.ceil(error_rates.max() * 100)))
ax.set_aspect("equal", "box")
ax.legend(loc="upper right")
fig.tight_layout()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def __init__(self, **kwargs) -> None:
Inputs:
- :attr:`scores`: Uncertainty scores of shape :math:`(B,)`. A higher
score means a higher uncertainty.
- :attr:`errors`: Binary errors of shape :math:`(B,)`,
- :attr:`errors`: Errors of shape :math:`(B,)`,

where :math:`B` is the batch size.

Expand All @@ -52,7 +52,7 @@ def update(self, scores: Tensor, errors: Tensor) -> None:

Args:
scores (Tensor): uncertainty scores of shape :math:`(B,)`
errors (Tensor): binary errors of shape :math:`(B,)`
errors (Tensor): errors of shape :math:`(B,)`
"""
self.scores.append(scores)
self.errors.append(errors)
Expand Down Expand Up @@ -149,7 +149,7 @@ def _ause_rejection_rate_compute(

Args:
scores (Tensor): uncertainty scores of shape :math:`(B,)`
errors (Tensor): binary errors of shape :math:`(B,)`
errors (Tensor): errors of shape :math:`(B,)`
"""
num_samples = errors.size(0)

Expand Down
12 changes: 10 additions & 2 deletions torch_uncertainty/routines/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ def training_step(
loss = self.loss(logits, target, self.current_epoch)
if self.needs_step_update:
self.model.update_wrapper(self.current_epoch)
self.log("train_loss", loss)
self.log("train_loss", loss, prog_bar=True, logger=True)
return loss

def validation_step(
Expand Down Expand Up @@ -501,7 +501,15 @@ def test_step(
self.ood_logit_storage.append(logits.detach().cpu())

def on_validation_epoch_end(self) -> None:
self.log_dict(self.val_cls_metrics.compute(), sync_dist=True)
self.log_dict(
self.val_cls_metrics.compute(), logger=True, sync_dist=True
)
self.log(
"Acc%",
self.val_cls_metrics["cls/Acc"].compute() * 100,
prog_bar=True,
logger=False,
)
self.val_cls_metrics.reset()

if self.eval_grouping_loss:
Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/routines/pixel_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def training_step(

if self.needs_step_update:
self.model.update_wrapper(self.current_epoch)
self.log("train_loss", loss)
self.log("train_loss", loss, prog_bar=True, logger=True)
return loss

def validation_step(
Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/routines/regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ def training_step(

if self.needs_step_update:
self.model.update_wrapper(self.current_epoch)
self.log("train_loss", loss)
self.log("train_loss", loss, prog_bar=True, logger=True)
return loss

def validation_step(
Expand Down
11 changes: 9 additions & 2 deletions torch_uncertainty/routines/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def training_step(
loss = self.loss(logits[valid_mask], target[valid_mask])
if self.needs_step_update:
self.model.update_wrapper(self.current_epoch)
self.log("train_loss", loss)
self.log("train_loss", loss, prog_bar=True, logger=True)
return loss

def validation_step(
Expand Down Expand Up @@ -214,7 +214,14 @@ def test_step(self, batch: tuple[Tensor, Tensor], batch_idx: int) -> None:
self.test_sbsmpl_seg_metrics.update(*self.subsample(probs, targets))

def on_validation_epoch_end(self) -> None:
self.log_dict(self.val_seg_metrics.compute(), sync_dist=True)
self.log_dict(
self.val_seg_metrics.compute(), logger=True, sync_dist=True
)
self.log(
"mIoU%",
self.val_seg_metrics["seg/mIoU"].compute() * 100,
prog_bar=True,
)
self.log_dict(self.val_sbsmpl_seg_metrics.compute(), sync_dist=True)
self.val_seg_metrics.reset()
self.val_sbsmpl_seg_metrics.reset()
Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/utils/hub.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import torch
import yaml
from huggingface_hub import hf_hub_download
from huggingface_hub.utils._errors import EntryNotFoundError
from huggingface_hub.errors import EntryNotFoundError
from safetensors.torch import load_file


Expand Down
Loading