pytorch early stop

pytorch early stop

pytorch 说实话没有tensorflow kears 方便,早停得自己写。
本文为基于github 项目的修改,增加了一个mode参数,可以选择指标的衡量标准是增大还是减小。比如以acc作为指标,它增大时认为是表现的更好的,此时mode应为max。用loss作为指标时,它减小时模型是表现的更好的,此时mode应为min。

import numpy as np
import torch

class EarlyStopping:
    """Early stops the training if validation loss doesn't improve after a given patience."""
    def __init__(self, patience=7, verbose=False, delta=0,mode='max',path='checkpoint.pt', trace_func=print):
        """
        Args:
            patience (int): How long to wait after last time validation loss improved.
                            Default: 7
            verbose (bool): If True, prints a message for each validation loss improvement.
                            Default: False
            delta (float): Minimum change in the monitored quantity to qualify as an improvement.
                            Default: 0
            mode (str): 指标上升为改进还是下降为改进。
                            Default: max
            path (str): Path for the checkpoint to be saved to.
                            Default: 'checkpoint.pt'
            trace_func (function): trace print function.
                            Default: print
        """
        self.mode_dict = {
    
    'min':np.argmin,'max':np.argmax}
        self.fun = self.mode_dict[mode]
        self.patience = patience
        self.verbose = verbose
        self.counter = 0
        self.best_score = None
        self.early_stop = False
        self.delta = delta
        self.path = path
        self.trace_func = trace_func
    def __call__(self, score, model):

        if self.best_score is None:
            self.best_score = score
            self.save_checkpoint(score, model)
            print(self.fun([score, self.best_score]))
        elif self.fun([score,self.best_score]) ==0 and abs(score-self.best_score)>=self.delta: #当前为最佳状态
            print(self.fun([score, self.best_score]))
            self.best_score = score
            self.save_checkpoint(score, model)
            self.counter = 0
        else:
            print(self.fun([score, self.best_score]))
            print(score)
            self.counter += 1
            self.trace_func(f'EarlyStopping counter: {
      
      self.counter} out of {
      
      self.patience}')
            if self.counter >= self.patience:
                self.early_stop = True

    def save_checkpoint(self, score, model):
        '''Saves model when validation loss decrease.'''
        if self.verbose:
            self.trace_func(f'模型表现得更好啦  正在保存 ...')
        torch.save(model.state_dict(), self.path)

猜你喜欢

转载自blog.csdn.net/qtzbxjg/article/details/131323218