Skip to content

feat: add general checkpoint #8

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 16 additions & 5 deletions pytorchtools.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0):
def __init__(self, patience=7, verbose=False, delta=0, general_checkpoint=False):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Expand All @@ -12,6 +12,8 @@ def __init__(self, patience=7, verbose=False, delta=0):
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
general_checkpoint (bool): Saves addition information that can be used to resume training.
Default: False
"""
self.patience = patience
self.verbose = verbose
Expand All @@ -20,8 +22,9 @@ def __init__(self, patience=7, verbose=False, delta=0):
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.general_checkpoint = general_checkpoint

def __call__(self, val_loss, model):
def __call__(self, val_loss, model, epoch=None, optimizer=None):

score = -val_loss

Expand All @@ -35,12 +38,20 @@ def __call__(self, val_loss, model):
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.save_checkpoint(val_loss, model, epoch=epoch, optimizer=optimizer)
self.counter = 0

def save_checkpoint(self, val_loss, model):
def save_checkpoint(self, val_loss, model, epoch=None, optimizer=None):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), 'checkpoint.pt')
if self.general_checkpoint and epoch is not None and optimizer is not None:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': val_loss
}, 'checkpoint.tar')
else:
torch.save(model.state_dict(), 'checkpoint.pt')
self.val_loss_min = val_loss