若在Pytorch训练网络中出现一下错误警告的解决办法如下:
UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`. In PyTorch 1.1.0 and later, you should call them in the opposite order: `optimizer.step()` before `lr_scheduler.step()`. Failure to do this will result in PyTorch skipping the first value of the learning rate schedule. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
错误原因在“optimizer.step()”之前检测到对“lr_scheduler.step()”的调用。在 PyTorch 1.1.0 及更高版本中,您应该以相反的顺序调用它们:'optimizer.step()' 在 'lr_scheduler.step()' 之前。 如果不这样做,将导致 PyTorch 跳过学习率计划的第一个值。
例如代码修改前:
for epoch in range( 1 + opt.continueEpochs, opt.nEpochs +1 + opt.continueEpochs):
print("Training...")
scheduler.step()
epoch_loss = 0
psnr_list = []
for iteration, inputs in enumerate(train_dataloader,1):
haze, gt = Variable(inputs['hazy_image']), Variable(inputs['clear_image'])
haze = haze.cuda()
#print(haze.shape)
gt = gt.cuda()
# --- Zero the parameter gradients --- #
optimizer.zero_grad()
# --- Forward + Backward + Optimize --- #
model.train()
dehaze = model(haze)
#print(dehaze.size())
#MSE_loss = MSELoss(dehaze, gt)
#msssim_loss_ =1 -msssim_loss(dehaze, gt, normalize=True)
Loss1 = loss_function_at(dehaze,gt)
perceptual_loss = loss_network(dehaze,gt)
#EDGE_loss = edge_loss(dehaze, gt, device)
#ContrastLoss = ContrastLoss(dehaze)
Loss = Loss1 +0.01*perceptual_loss# + 0.2*msssim_loss_
epoch_loss +=Loss
Loss.backward()
optimizer.step()
代码修改后消除警告
for epoch in range( 1 + opt.continueEpochs, opt.nEpochs +1 + opt.continueEpochs):
print("Training...")
epoch_loss = 0
psnr_list = []
for iteration, inputs in enumerate(train_dataloader,1):
haze, gt = Variable(inputs['hazy_image']), Variable(inputs['clear_image'])
haze = haze.to(device)
gt = gt.to(device)
# --- Zero the parameter gradients --- #
optimizer.zero_grad()
# --- Forward + Backward + Optimize --- #
model.train()
dehaze = model(haze)
#print(dehaze.size())
#MSE_loss = MSELoss(dehaze, gt)
#msssim_loss_ =1 -msssim_loss(dehaze, gt, normalize=True)
Loss1 = loss_function_at(dehaze,gt)
perceptual_loss = loss_network(dehaze,gt)
#EDGE_loss = edge_loss(dehaze, gt, device)
#ContrastLoss = ContrastLoss(dehaze)
Loss = Loss1 +0.01*perceptual_loss# + 0.2*msssim_loss_
epoch_loss +=Loss
Loss.backward()
optimizer.step()
scheduler.step()
消除警告