脑子不好使,记录方便查阅
...
optimizer = optim.SGD(para_optim, lr=0.001, momentum=0.9, weight_decay=0.0002)
def adjust_learning_rate(optimizer, epoch):
lr = optimizer.param_groups[0]['lr'] * 0.1 #学习率每个epoch乘以0.1
#lr = opt.lr * (0.1 ** (epoch // opt.step)) #学习率没10个epoch乘以0.1
return lr
def train(net, optimizer):
for epoch in range(epoch_nums):
lr = adjust_learning_rate(optimizer, epoch-1)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
print('-----------learning rate', optimizer.param_groups[0]["lr"])
net.train()
for batch_idx, (inputs, targets) in enumerate(trainloader):
...
除此之外,还有可以通过torch.optim.lr_scheduler接口实现PyTorch学习率调整,参考https://blog.csdn.net/goodxin_ie/article/details/90704600