from torch.optim.lr_scheduler import _LRScheduler
class ConstantLR(_LRScheduler):
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLR, self).__init__(optimizer, last_epoch)# 调用_LRScheduler的init初始化
def get_lr(self):
return [base_lr for base_lr in self.base_lrs]# 列表解析返回base_lr
class PolynomialLR(_LRScheduler):# Poly动态调整方法
def __init__(self, optimizer, max_iter, decay_iter=1, gamma=0.9, last_epoch=-1):
self.decay_iter = decay_iter
self.max_iter = max_iter
self.gamma = gamma
super(PolynomialLR, self).__init__(optimizer, last_epoch)
def get_lr(self):# 根据decay_iter和epoch数调整学习率
if self.last_epoch % self.decay_iter or self.last_epoch % self.max_iter:
return [base_lr for base_lr in self.base_lrs]
else:
factor = (1 - self.last_epoch / float(self.max_iter)) ** self.gamma
return [base_lr * factor for base_lr in self.base_lrs]
class WarmUpLR(_LRScheduler):# 预热学习率
def __init__(
self, optimizer, scheduler, mode="linear", warmup_iters=100, gamma=0.2, last_epoch=-1
):
self.mode = mode
self.scheduler = scheduler
self.warmup_iters = warmup_iters
self.gamma = gamma
super(WarmUpLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
cold_lrs = self.scheduler.get_lr()
if self.last_epoch < self.warmup_iters:# 在设置周期之前由小学习率逐渐增长,之后恒定学习率
if self.mode == "linear":
alpha = self.last_epoch / float(self.warmup_iters)
factor = self.gamma * (1 - alpha) + alpha
elif self.mode == "constant":
factor = self.gamma
else:
raise KeyError("WarmUp type {} not implemented".format(self.mode))
return [factor * base_lr for base_lr in cold_lrs]
return cold_lrs
参考:torch.optim.lr_scheduler:调整学习率