Skip to content

🔨 Update Regression support #180

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 8 commits into
base: dev
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# lightning.pytorch==2.1.3
seed_everything: false
eval_after_fit: true
trainer:
accelerator: gpu
devices: 1
precision: 16-mixed
max_epochs: 40
logger:
class_path: lightning.pytorch.loggers.TensorBoardLogger
init_args:
save_dir: logs/boston/mlp/normal
name: batch_ensembles
default_hp_metric: false
callbacks:
- class_path: torch_uncertainty.callbacks.TURegCheckpoint
init_args:
probabilistic: true
- class_path: lightning.pytorch.callbacks.LearningRateMonitor
init_args:
logging_interval: step
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
monitor: val/reg/MSE
patience: 1000
check_finite: true
model:
model:
class_path: torch_uncertainty.models.mlp.batched_mlp
init_args:
in_features: 13
num_outputs: 1
hidden_dims:
- 50
dist_family: normal
num_estimators: 5
output_dim: 1
loss: torch_uncertainty.losses.DistributionNLLLoss
dist_family: normal
save_in_csv: true
format_batch_fn:
class_path: torch_uncertainty.transforms.RepeatTarget
init_args:
num_repeats: 5
data:
root: ./data
batch_size: 128
dataset_name: boston
optimizer:
class_path: torch.optim.Adam
init_args:
lr: 5e-3
weight_decay: 0
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# lightning.pytorch==2.1.3
seed_everything: false
eval_after_fit: true
trainer:
accelerator: gpu
devices: 1
precision: 16-mixed
max_epochs: 40
logger:
class_path: lightning.pytorch.loggers.TensorBoardLogger
init_args:
save_dir: logs/boston/mlp/normal
name: bayesian
default_hp_metric: false
callbacks:
- class_path: torch_uncertainty.callbacks.TURegCheckpoint
init_args:
probabilistic: true
- class_path: lightning.pytorch.callbacks.LearningRateMonitor
init_args:
logging_interval: step
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
monitor: val/reg/MSE
patience: 1000
check_finite: true
model:
model:
class_path: torch_uncertainty.models.mlp.bayesian_mlp
init_args:
in_features: 13
num_outputs: 1
hidden_dims:
- 50
dist_family: normal
num_samples: 10
output_dim: 1
loss:
class_path: torch_uncertainty.losses.ELBOLoss
init_args:
kl_weight: 0.00002
inner_loss: torch_uncertainty.losses.DistributionNLLLoss
num_samples: 3
dist_family: normal
dist_family: normal
save_in_csv: true
data:
root: ./data
batch_size: 128
dataset_name: boston
optimizer:
class_path: torch.optim.Adam
init_args:
lr: 5e-3
weight_decay: 0
49 changes: 49 additions & 0 deletions experiments/regression/uci_datasets/configs/boston/mlp/cauchy.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# lightning.pytorch==2.1.3
seed_everything: false
eval_after_fit: true
trainer:
accelerator: gpu
devices: 1
precision: 16-mixed
max_epochs: 40
logger:
class_path: lightning.pytorch.loggers.TensorBoardLogger
init_args:
save_dir: logs/boston/mlp/cauchy
name: standard
default_hp_metric: false
callbacks:
- class_path: torch_uncertainty.callbacks.TURegCheckpoint
init_args:
probabilistic: true
- class_path: lightning.pytorch.callbacks.LearningRateMonitor
init_args:
logging_interval: step
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
monitor: val/reg/MSE
patience: 1000
check_finite: true
model:
model:
class_path: torch_uncertainty.models.mlp.mlp
init_args:
in_features: 13
num_outputs: 1
hidden_dims:
- 50
dist_family: cauchy
output_dim: 1
loss: torch_uncertainty.losses.DistributionNLLLoss
dist_family: cauchy
dist_estimate: mode
save_in_csv: true
data:
root: ./data
batch_size: 128
dataset_name: boston
optimizer:
class_path: torch.optim.Adam
init_args:
lr: 5e-3
weight_decay: 0
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# lightning.pytorch==2.1.3
seed_everything: false
eval_after_fit: true
trainer:
accelerator: gpu
devices: 1
precision: 16-mixed
max_epochs: 40
logger:
class_path: lightning.pytorch.loggers.TensorBoardLogger
init_args:
save_dir: logs/boston/mlp/normal
name: deep_ensembles
default_hp_metric: false
callbacks:
- class_path: torch_uncertainty.callbacks.TURegCheckpoint
init_args:
probabilistic: true
- class_path: lightning.pytorch.callbacks.LearningRateMonitor
init_args:
logging_interval: step
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
monitor: val/reg/MSE
patience: 1000
check_finite: true
model:
model:
class_path: torch_uncertainty.models.deep_ensembles
init_args:
models:
class_path: torch_uncertainty.models.mlp.mlp
init_args:
in_features: 13
num_outputs: 1
hidden_dims:
- 50
dist_family: normal
num_estimators: 5
task: regression
probabilistic: true
output_dim: 1
loss: torch_uncertainty.losses.DistributionNLLLoss
dist_family: normal
save_in_csv: true
format_batch_fn:
class_path: torch_uncertainty.transforms.RepeatTarget
init_args:
num_repeats: 5
data:
root: ./data
batch_size: 128
dataset_name: boston
optimizer:
class_path: torch.optim.Adam
init_args:
lr: 5e-3
weight_decay: 0
Original file line number Diff line number Diff line change
Expand Up @@ -21,21 +21,28 @@ trainer:
logging_interval: step
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
monitor: val/reg/NLL
monitor: val/reg/MSE
patience: 1000
check_finite: true
model:
model:
class_path: torch_uncertainty.models.mlp.mlp
init_args:
in_features: 13
num_outputs: 1
hidden_dims:
- 50
dist_family: laplace
output_dim: 1
in_features: 13
hidden_dims:
- 50
loss: torch_uncertainty.losses.DistributionNLLLoss
version: std
distribution: laplace
dist_family: laplace
save_in_csv: true
data:
root: ./data
batch_size: 128
dataset_name: boston
optimizer:
lr: 5e-3
weight_decay: 0
class_path: torch.optim.Adam
init_args:
lr: 5e-3
weight_decay: 0
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
# lightning.pytorch==2.1.3
seed_everything: false
eval_after_fit: true
trainer:
accelerator: gpu
devices: 1
precision: 16-mixed
max_epochs: 40
logger:
class_path: lightning.pytorch.loggers.TensorBoardLogger
init_args:
save_dir: logs/boston/mlp/normal
name: masksemble
default_hp_metric: false
callbacks:
- class_path: torch_uncertainty.callbacks.TURegCheckpoint
init_args:
probabilistic: true
- class_path: lightning.pytorch.callbacks.LearningRateMonitor
init_args:
logging_interval: step
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
monitor: val/reg/MSE
patience: 1000
check_finite: true
model:
model:
class_path: torch_uncertainty.models.mlp.masked_mlp
init_args:
in_features: 13
num_outputs: 1
hidden_dims:
- 50
dist_family: normal
num_estimators: 5
scale: 2.0
output_dim: 1
loss: torch_uncertainty.losses.DistributionNLLLoss
dist_family: normal
save_in_csv: true
format_batch_fn:
class_path: torch_uncertainty.transforms.RepeatTarget
init_args:
num_repeats: 5
data:
root: ./data
batch_size: 128
dataset_name: boston
optimizer:
class_path: torch.optim.Adam
init_args:
lr: 5e-3
weight_decay: 0
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# lightning.pytorch==2.1.3
seed_everything: false
eval_after_fit: true
trainer:
accelerator: gpu
devices: 1
precision: 16-mixed
max_epochs: 40
logger:
class_path: lightning.pytorch.loggers.TensorBoardLogger
init_args:
save_dir: logs/boston/mlp/normal
name: mc_dropout
default_hp_metric: false
callbacks:
- class_path: torch_uncertainty.callbacks.TURegCheckpoint
init_args:
probabilistic: true
- class_path: lightning.pytorch.callbacks.LearningRateMonitor
init_args:
logging_interval: step
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
monitor: val/reg/MSE
patience: 1000
check_finite: true
model:
model:
class_path: torch_uncertainty.models.mc_dropout
init_args:
model:
class_path: torch_uncertainty.models.mlp.mlp
init_args:
in_features: 13
num_outputs: 1
hidden_dims:
- 50
dist_family: normal
dropout_rate: 0.1
num_estimators: 10
on_batch: false
task: regression
probabilistic: true
output_dim: 1
loss: torch_uncertainty.losses.DistributionNLLLoss
dist_family: normal
save_in_csv: true
data:
root: ./data
batch_size: 128
dataset_name: boston
optimizer:
class_path: torch.optim.Adam
init_args:
lr: 5e-3
weight_decay: 0
Loading