This repository was archived by the owner on Oct 9, 2023. It is now read-only.
How to reshape target tensor #748
Unanswered
aykutcayir34
asked this question in
Q&A
Replies: 1 comment
-
Hi @aykutcayir34 thanks for the question! I don't have a great answer here. Perhaps we could just call
Let me know if you think the squeezing idea would work. Hope that helps 😃 |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
-
I have 0 and 1 values as targets. Thus, the number of classes of my model is one. However, I got an error about the target shape. How can I reshape my target tensor?
The code snippet I use:
model = ImageClassifier(backbone="efficientnet_b3",
num_classes=1,
backbone_kwargs={"in_chans": 1},
pretrained=True,
learning_rate=1e-4,
loss_fn=torch.nn.BCEWithLogitsLoss(),
)
The error message I took:
ValueError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_11756/4058120394.py in
1 trainer = flash.Trainer(max_epochs=3, gpus=torch.cuda.device_count())
----> 2 trainer.finetune(model, datamodule=datamodule, strategy=FreezeUnfreeze(unfreeze_epoch=1))
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\flash\core\trainer.py in finetune(self, model, train_dataloader, val_dataloaders, datamodule, strategy)
163 """
164 self._resolve_callbacks(model, strategy)
--> 165 return super().fit(model, train_dataloader, val_dataloaders, datamodule)
166
167 def _resolve_callbacks(self, model, strategy):
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\trainer\trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, train_dataloader)
550 self.checkpoint_connector.resume_start()
551
--> 552 self._run(model)
553
554 assert self.state.stopped
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\trainer\trainer.py in _run(self, model)
915
916 # dispatch
start_training
orstart_evaluating
orstart_predicting
--> 917 self._dispatch()
918
919 # plugin will finalized fitting (e.g. ddp_spawn will load trained model)
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\trainer\trainer.py in _dispatch(self)
983 self.accelerator.start_predicting(self)
984 else:
--> 985 self.accelerator.start_training(self)
986
987 def run_stage(self):
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in start_training(self, trainer)
90
91 def start_training(self, trainer: "pl.Trainer") -> None:
---> 92 self.training_type_plugin.start_training(trainer)
93
94 def start_evaluating(self, trainer: "pl.Trainer") -> None:
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\plugins\training_type\training_type_plugin.py in start_training(self, trainer)
159 def start_training(self, trainer: "pl.Trainer") -> None:
160 # double dispatch to initiate the training loop
--> 161 self._results = trainer.run_stage()
162
163 def start_evaluating(self, trainer: "pl.Trainer") -> None:
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\trainer\trainer.py in run_stage(self)
993 if self.predicting:
994 return self._run_predict()
--> 995 return self._run_train()
996
997 def _pre_training_routine(self):
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\trainer\trainer.py in _run_train(self)
1028 self.progress_bar_callback.disable()
1029
-> 1030 self._run_sanity_check(self.lightning_module)
1031
1032 # enable train mode
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\flash\core\trainer.py in _run_sanity_check(self, ref_model)
91 def _run_sanity_check(self, ref_model):
92 if hasattr(super(), "_run_sanity_check"):
---> 93 super()._run_sanity_check(ref_model)
94
95 self.run_sanity_check(ref_model)
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\trainer\trainer.py in _run_sanity_check(self, ref_model)
1112 # run eval step
1113 with torch.no_grad():
-> 1114 self._evaluation_loop.run()
1115
1116 self.on_sanity_check_end()
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\loops\base.py in run(self, *args, **kwargs)
109 try:
110 self.on_advance_start(*args, **kwargs)
--> 111 self.advance(*args, **kwargs)
112 self.on_advance_end()
113 self.iteration_count += 1
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\loops\dataloader\evaluation_loop.py in advance(self, *args, **kwargs)
108 dl_max_batches = self._max_batches[self.current_dataloader_idx]
109
--> 110 dl_outputs = self.epoch_loop.run(
111 dataloader_iter, self.current_dataloader_idx, dl_max_batches, self.num_dataloaders
112 )
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\loops\base.py in run(self, *args, **kwargs)
109 try:
110 self.on_advance_start(*args, **kwargs)
--> 111 self.advance(*args, **kwargs)
112 self.on_advance_end()
113 self.iteration_count += 1
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\loops\epoch\evaluation_epoch_loop.py in advance(self, dataloader_iter, dataloader_idx, dl_max_batches, num_dataloaders)
108 # lightning module methods
109 with self.trainer.profiler.profile("evaluation_step_and_end"):
--> 110 output = self.evaluation_step(batch, batch_idx, dataloader_idx)
111 output = self.evaluation_step_end(output)
112
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\loops\epoch\evaluation_epoch_loop.py in evaluation_step(self, batch, batch_idx, dataloader_idx)
152 self.trainer.lightning_module._current_fx_name = "validation_step"
153 with self.trainer.profiler.profile("validation_step"):
--> 154 output = self.trainer.accelerator.validation_step(step_kwargs)
155
156 return output
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in validation_step(self, step_kwargs)
209 """
210 with self.precision_plugin.val_step_context(), self.training_type_plugin.val_step_context():
--> 211 return self.training_type_plugin.validation_step(*step_kwargs.values())
212
213 def test_step(self, step_kwargs: Dict[str, Union[Any, int]]) -> Optional[STEP_OUTPUT]:
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\pytorch_lightning\plugins\training_type\training_type_plugin.py in validation_step(self, *args, **kwargs)
176
177 def validation_step(self, *args, **kwargs):
--> 178 return self.model.validation_step(*args, **kwargs)
179
180 def test_step(self, *args, **kwargs):
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\flash\image\classification\model.py in validation_step(self, batch, batch_idx)
124 def validation_step(self, batch: Any, batch_idx: int) -> Any:
125 batch = (batch[DefaultDataKeys.INPUT], batch[DefaultDataKeys.TARGET])
--> 126 return super().validation_step(batch, batch_idx)
127
128 def test_step(self, batch: Any, batch_idx: int) -> Any:
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\flash\core\model.py in validation_step(self, batch, batch_idx)
385
386 def validation_step(self, batch: Any, batch_idx: int) -> None:
--> 387 output = self.step(batch, batch_idx, self.val_metrics)
388 self.log_dict({f"val_{k}": v for k, v in output["logs"].items()}, on_step=False, on_epoch=True, prog_bar=True)
389
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\flash\core\model.py in step(self, batch, batch_idx, metrics)
334 output = {"y_hat": y_hat}
335 y_hat = self.to_loss_format(output["y_hat"])
--> 336 losses = {name: l_fn(y_hat, y) for name, l_fn in self.loss_fn.items()}
337
338 y_hat = self.to_metrics_format(output["y_hat"])
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\flash\core\model.py in (.0)
334 output = {"y_hat": y_hat}
335 y_hat = self.to_loss_format(output["y_hat"])
--> 336 losses = {name: l_fn(y_hat, y) for name, l_fn in self.loss_fn.items()}
337
338 y_hat = self.to_metrics_format(output["y_hat"])
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
887 result = self._slow_forward(*input, **kwargs)
888 else:
--> 889 result = self.forward(*input, **kwargs)
890 for hook in itertools.chain(
891 _global_forward_hooks.values(),
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\torch\nn\modules\loss.py in forward(self, input, target)
712 assert self.weight is None or isinstance(self.weight, Tensor)
713 assert self.pos_weight is None or isinstance(self.pos_weight, Tensor)
--> 714 return F.binary_cross_entropy_with_logits(input, target,
715 self.weight,
716 pos_weight=self.pos_weight,
~\anaconda3\envs\flashlightningimgaud\lib\site-packages\torch\nn\functional.py in binary_cross_entropy_with_logits(input, target, weight, size_average, reduce, reduction, pos_weight)
2825
2826 if not (target.size() == input.size()):
-> 2827 raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
2828
2829 return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum)
ValueError: Target size (torch.Size([4])) must be the same as input size (torch.Size([4, 1]))
4 is my batch size which is the default in flash-lightning.
Beta Was this translation helpful? Give feedback.
All reactions