26
26
27
27
28
28
def _requires_closure (optimizer ):
29
- return inspect .signature (optimizer .step ).parameters .get ('closure' ).default == inspect ._empty
29
+ # starting from torch v1.13, simple optimizers no longer have a `closure` argument
30
+ closure_param = inspect .signature (optimizer .step ).parameters .get ('closure' )
31
+ return closure_param and closure_param .default == inspect ._empty
30
32
31
33
32
34
class BaseSolver (ABC , PretrainedSolver ):
@@ -60,7 +62,7 @@ class BaseSolver(ABC, PretrainedSolver):
60
62
:param optimizer:
61
63
The optimizer to be used for training.
62
64
:type optimizer: `torch.nn.optim.Optimizer`, optional
63
- :param criterion :
65
+ :param loss_fn :
64
66
The loss function used for training.
65
67
66
68
- If a str, must be present in the keys of `neurodiffeq.losses._losses`.
@@ -72,7 +74,7 @@ class BaseSolver(ABC, PretrainedSolver):
72
74
to a tensor of empty shape (i.e. a scalar). The returned tensor must be connected to the computational graph,
73
75
so that backpropagation can be performed.
74
76
75
- :type criterion :
77
+ :type loss_fn :
76
78
str or `torch.nn.moduesl.loss._Loss` or callable
77
79
:param n_batches_train:
78
80
Number of batches to train in every epoch, where batch-size equals ``train_generator.size``.
@@ -107,9 +109,10 @@ class BaseSolver(ABC, PretrainedSolver):
107
109
:type shuffle: bool
108
110
"""
109
111
112
+ @deprecated_alias (criterion = 'loss_fn' )
110
113
def __init__ (self , diff_eqs , conditions ,
111
114
nets = None , train_generator = None , valid_generator = None , analytic_solutions = None ,
112
- optimizer = None , criterion = None , n_batches_train = 1 , n_batches_valid = 4 ,
115
+ optimizer = None , loss_fn = None , n_batches_train = 1 , n_batches_valid = 4 ,
113
116
metrics = None , n_input_units = None , n_output_units = None ,
114
117
# deprecated arguments are listed below
115
118
shuffle = None , batch_size = None ):
@@ -176,7 +179,7 @@ def analytic_mse(*args):
176
179
self .metrics_history .update ({'valid__' + name : [] for name in self .metrics_fn })
177
180
178
181
self .optimizer = optimizer if optimizer else Adam (set (chain .from_iterable (n .parameters () for n in self .nets )))
179
- self ._set_criterion ( criterion )
182
+ self ._set_loss_fn ( loss_fn )
180
183
181
184
def make_pair_dict (train = None , valid = None ):
182
185
return {'train' : train , 'valid' : valid }
@@ -203,15 +206,15 @@ def make_pair_dict(train=None, valid=None):
203
206
# the _phase variable is registered for callback functions to access
204
207
self ._phase = None
205
208
206
- def _set_criterion (self , criterion ):
209
+ def _set_loss_fn (self , criterion ):
207
210
if criterion is None :
208
- self .criterion = lambda r , f , x : (r ** 2 ).mean ()
211
+ self .loss_fn = lambda r , f , x : (r ** 2 ).mean ()
209
212
elif isinstance (criterion , nn .modules .loss ._Loss ):
210
- self .criterion = lambda r , f , x : criterion (r , torch .zeros_like (r ))
213
+ self .loss_fn = lambda r , f , x : criterion (r , torch .zeros_like (r ))
211
214
elif isinstance (criterion , str ):
212
- self .criterion = _losses [criterion .lower ()]
215
+ self .loss_fn = _losses [criterion .lower ()]
213
216
elif callable (criterion ):
214
- self .criterion = criterion
217
+ self .loss_fn = criterion
215
218
else :
216
219
raise TypeError (f"Unknown type of criterion { type (criterion )} " )
217
220
@@ -236,6 +239,24 @@ def _batch_examples(self):
236
239
)
237
240
return self ._batch
238
241
242
+ @property
243
+ def criterion (self ):
244
+ warnings .warn (
245
+ f'`{ self .__class__ .__name__ } `.criterion is a deprecated alias for `{ self .__class__ .__name__ } .loss_fn`.'
246
+ f'The alias is only meant to be accessed by certain functions in `neurodiffeq.solver_utils` '
247
+ f'until proper fixes are made; by which time this alias will be removed.'
248
+ )
249
+ return self .loss_fn
250
+
251
+ @criterion .setter
252
+ def criterion (self , loss_fn ):
253
+ warnings .warn (
254
+ f'`{ self .__class__ .__name__ } `.criterion is a deprecated alias for `{ self .__class__ .__name__ } .loss_fn`.'
255
+ f'The alias is only meant to be accessed by certain functions in `neurodiffeq.solver_utils` '
256
+ f'until proper fixes are made; by which time this alias will be removed.'
257
+ )
258
+ self .loss_fn = loss_fn
259
+
239
260
def compute_func_val (self , net , cond , * coordinates ):
240
261
r"""Compute the function value evaluated on the points specified by ``coordinates``.
241
262
@@ -352,7 +373,7 @@ def closure(zero_grad=True):
352
373
residuals = self .diff_eqs (* funcs , * batch )
353
374
residuals = torch .cat (residuals , dim = 1 )
354
375
try :
355
- loss = self .criterion (residuals , funcs , batch ) + self .additional_loss (residuals , funcs , batch )
376
+ loss = self .loss_fn (residuals , funcs , batch ) + self .additional_loss (residuals , funcs , batch )
356
377
except TypeError as e :
357
378
warnings .warn (
358
379
"You might need to update your code. "
@@ -507,7 +528,8 @@ def _get_internal_variables(self):
507
528
"metrics" : self .metrics_fn ,
508
529
"n_batches" : self .n_batches ,
509
530
"best_nets" : self .best_nets ,
510
- "criterion" : self .criterion ,
531
+ "criterion" : self .loss_fn ,
532
+ "loss_fn" : self .loss_fn ,
511
533
"conditions" : self .conditions ,
512
534
"global_epoch" : self .global_epoch ,
513
535
"lowest_loss" : self .lowest_loss ,
@@ -766,7 +788,7 @@ class SolverSpherical(BaseSolver):
766
788
Optimizer to be used for training.
767
789
Defaults to a ``torch.optim.Adam`` instance that trains on all parameters of ``nets``.
768
790
:type optimizer: ``torch.nn.optim.Optimizer``, optional
769
- :param criterion :
791
+ :param loss_fn :
770
792
The loss function used for training.
771
793
772
794
- If a str, must be present in the keys of `neurodiffeq.losses._losses`.
@@ -778,7 +800,7 @@ class SolverSpherical(BaseSolver):
778
800
to a tensor of empty shape (i.e. a scalar). The returned tensor must be connected to the computational graph,
779
801
so that backpropagation can be performed.
780
802
781
- :type criterion :
803
+ :type loss_fn :
782
804
str or `torch.nn.moduesl.loss._Loss` or callable
783
805
:param n_batches_train:
784
806
Number of batches to train in every epoch, where batch-size equals ``train_generator.size``.
@@ -820,7 +842,7 @@ class SolverSpherical(BaseSolver):
820
842
821
843
def __init__ (self , pde_system , conditions , r_min = None , r_max = None ,
822
844
nets = None , train_generator = None , valid_generator = None , analytic_solutions = None ,
823
- optimizer = None , criterion = None , n_batches_train = 1 , n_batches_valid = 4 , metrics = None , enforcer = None ,
845
+ optimizer = None , loss_fn = None , n_batches_train = 1 , n_batches_valid = 4 , metrics = None , enforcer = None ,
824
846
n_output_units = 1 ,
825
847
# deprecated arguments are listed below
826
848
shuffle = None , batch_size = None ):
@@ -848,7 +870,7 @@ def __init__(self, pde_system, conditions, r_min=None, r_max=None,
848
870
valid_generator = valid_generator ,
849
871
analytic_solutions = analytic_solutions ,
850
872
optimizer = optimizer ,
851
- criterion = criterion ,
873
+ loss_fn = loss_fn ,
852
874
n_batches_train = n_batches_train ,
853
875
n_batches_valid = n_batches_valid ,
854
876
metrics = metrics ,
@@ -1025,7 +1047,7 @@ class Solver1D(BaseSolver):
1025
1047
Optimizer to be used for training.
1026
1048
Defaults to a ``torch.optim.Adam`` instance that trains on all parameters of ``nets``.
1027
1049
:type optimizer: ``torch.nn.optim.Optimizer``, optional
1028
- :param criterion :
1050
+ :param loss_fn :
1029
1051
The loss function used for training.
1030
1052
1031
1053
- If a str, must be present in the keys of `neurodiffeq.losses._losses`.
@@ -1037,7 +1059,7 @@ class Solver1D(BaseSolver):
1037
1059
to a tensor of empty shape (i.e. a scalar). The returned tensor must be connected to the computational graph,
1038
1060
so that backpropagation can be performed.
1039
1061
1040
- :type criterion :
1062
+ :type loss_fn :
1041
1063
str or `torch.nn.moduesl.loss._Loss` or callable
1042
1064
:param n_batches_train:
1043
1065
Number of batches to train in every epoch, where batch-size equals ``train_generator.size``.
@@ -1073,7 +1095,7 @@ class Solver1D(BaseSolver):
1073
1095
1074
1096
def __init__ (self , ode_system , conditions , t_min = None , t_max = None ,
1075
1097
nets = None , train_generator = None , valid_generator = None , analytic_solutions = None , optimizer = None ,
1076
- criterion = None , n_batches_train = 1 , n_batches_valid = 4 , metrics = None , n_output_units = 1 ,
1098
+ loss_fn = None , n_batches_train = 1 , n_batches_valid = 4 , metrics = None , n_output_units = 1 ,
1077
1099
# deprecated arguments are listed below
1078
1100
batch_size = None , shuffle = None ):
1079
1101
@@ -1098,7 +1120,7 @@ def __init__(self, ode_system, conditions, t_min=None, t_max=None,
1098
1120
valid_generator = valid_generator ,
1099
1121
analytic_solutions = analytic_solutions ,
1100
1122
optimizer = optimizer ,
1101
- criterion = criterion ,
1123
+ loss_fn = loss_fn ,
1102
1124
n_batches_train = n_batches_train ,
1103
1125
n_batches_valid = n_batches_valid ,
1104
1126
metrics = metrics ,
@@ -1209,7 +1231,7 @@ class BundleSolver1D(BaseSolver):
1209
1231
Optimizer to be used for training.
1210
1232
Defaults to a ``torch.optim.Adam`` instance that trains on all parameters of ``nets``.
1211
1233
:type optimizer: ``torch.nn.optim.Optimizer``, optional
1212
- :param criterion :
1234
+ :param loss_fn :
1213
1235
The loss function used for training.
1214
1236
1215
1237
- If a str, must be present in the keys of `neurodiffeq.losses._losses`.
@@ -1221,7 +1243,7 @@ class BundleSolver1D(BaseSolver):
1221
1243
to a tensor of empty shape (i.e. a scalar). The returned tensor must be connected to the computational graph,
1222
1244
so that backpropagation can be performed.
1223
1245
1224
- :type criterion :
1246
+ :type loss_fn :
1225
1247
str or `torch.nn.moduesl.loss._Loss` or callable
1226
1248
:param n_batches_train:
1227
1249
Number of batches to train in every epoch, where batch-size equals ``train_generator.size``.
@@ -1258,7 +1280,7 @@ class BundleSolver1D(BaseSolver):
1258
1280
def __init__ (self , ode_system , conditions , t_min , t_max ,
1259
1281
theta_min = None , theta_max = None ,
1260
1282
nets = None , train_generator = None , valid_generator = None , analytic_solutions = None , optimizer = None ,
1261
- criterion = None , n_batches_train = 1 , n_batches_valid = 4 , metrics = None , n_output_units = 1 ,
1283
+ loss_fn = None , n_batches_train = 1 , n_batches_valid = 4 , metrics = None , n_output_units = 1 ,
1262
1284
# deprecated arguments are listed below
1263
1285
batch_size = None , shuffle = None ):
1264
1286
@@ -1319,7 +1341,7 @@ def non_var_filter(*variables):
1319
1341
valid_generator = valid_generator ,
1320
1342
analytic_solutions = analytic_solutions ,
1321
1343
optimizer = optimizer ,
1322
- criterion = criterion ,
1344
+ loss_fn = loss_fn ,
1323
1345
n_batches_train = n_batches_train ,
1324
1346
n_batches_valid = n_batches_valid ,
1325
1347
metrics = metrics ,
@@ -1420,7 +1442,7 @@ class Solver2D(BaseSolver):
1420
1442
Optimizer to be used for training.
1421
1443
Defaults to a ``torch.optim.Adam`` instance that trains on all parameters of ``nets``.
1422
1444
:type optimizer: ``torch.nn.optim.Optimizer``, optional
1423
- :param criterion :
1445
+ :param loss_fn :
1424
1446
The loss function used for training.
1425
1447
1426
1448
- If a str, must be present in the keys of `neurodiffeq.losses._losses`.
@@ -1432,7 +1454,7 @@ class Solver2D(BaseSolver):
1432
1454
to a tensor of empty shape (i.e. a scalar). The returned tensor must be connected to the computational graph,
1433
1455
so that backpropagation can be performed.
1434
1456
1435
- :type criterion :
1457
+ :type loss_fn :
1436
1458
str or `torch.nn.moduesl.loss._Loss` or callable
1437
1459
:param n_batches_train:
1438
1460
Number of batches to train in every epoch, where batch-size equals ``train_generator.size``.
@@ -1468,7 +1490,7 @@ class Solver2D(BaseSolver):
1468
1490
1469
1491
def __init__ (self , pde_system , conditions , xy_min = None , xy_max = None ,
1470
1492
nets = None , train_generator = None , valid_generator = None , analytic_solutions = None , optimizer = None ,
1471
- criterion = None , n_batches_train = 1 , n_batches_valid = 4 , metrics = None , n_output_units = 1 ,
1493
+ loss_fn = None , n_batches_train = 1 , n_batches_valid = 4 , metrics = None , n_output_units = 1 ,
1472
1494
# deprecated arguments are listed below
1473
1495
batch_size = None , shuffle = None ):
1474
1496
@@ -1493,7 +1515,7 @@ def __init__(self, pde_system, conditions, xy_min=None, xy_max=None,
1493
1515
valid_generator = valid_generator ,
1494
1516
analytic_solutions = analytic_solutions ,
1495
1517
optimizer = optimizer ,
1496
- criterion = criterion ,
1518
+ loss_fn = loss_fn ,
1497
1519
n_batches_train = n_batches_train ,
1498
1520
n_batches_valid = n_batches_valid ,
1499
1521
metrics = metrics ,
0 commit comments