def optimizer_step(self, current_epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, using_native_amp, using_lbfgs):
# warm up lr
step = self.trainer.global_step + 1
lr = self.optimizer_conf['lr'] * self.d_model ** (-0.5) * \
min(step ** (-0.5), step * self.optimizer_conf['warmup_steps'] ** (-1.5))
for pg in optimizer.param_groups:
pg['lr'] = lr
optimizer.step()
optimizer.zero_grad()
在重写 optimizer_step() 后控制台输出显示的 loss 一直是 nan,而打印模型返回的 loss 却是正常值。如下所示:
Epoch 0: 0%| | 0/1422 [00:00<?, ?it/s] tensor(2.5710, device='cuda:0', grad_fn=<NllLossBackward>)
Epoch 0: 0%| | 1/1422 [00:00<20:44, 1.14it/s, loss=nan, v_num=20] tensor(2.5092, device='cuda:0', grad_fn=<NllLossBackward>)
Epoch 0: 0%| | 2/1422 [00:01<15:05, 1.57it/s, loss=nan, v_num=20] tensor(2.5047, device='cuda:0', grad_fn=<NllLossBackward>)
Epoch 0: 0%| | 4/1422 [00:01<10:14, 2.31it/s, loss=nan, v_num=20] tensor(1.8983, device='cuda:0', grad_fn=<NllLossBackward>)
Epoch 0: 0%| | 5/1422 [00:01<08:46, 2.69it/s, loss=nan, v_num=20] tensor(1.5652, device='cuda:0', grad_fn=<NllLossBackward>)
Epoch 0: 0%| | 6/1422 [00:02<09:03, 2.61it/s, loss=nan, v_num=20] tensor(1.7140, device='cuda:0', grad_fn=<NllLossBackward>)
Epoch 0: 1%| | 8/1422 [00:02<08:07, 2.90it/s, loss=nan, v_num=20] tensor(1.9435, device='cuda:0', grad_fn=<NllLossBackward>)
Epoch 0: 1%| | 9/1422 [00:02<07:32, 3.12it/s, loss=nan, v_num=20] tensor(1.7388, device='cuda:0', grad_fn=<NllLossBackward>)
Epoch 0: 1%| | 10/1422 [00:03<07:59, 2.94it/s, loss=nan, v_num=20] tensor(1.8826, device='cuda:0', grad_fn=<NllLossBackward>)
Epoch 0: 1%| | 12/1422 [00:03<07:22, 3.18it/s, loss=nan, v_num=20] tensor(1.6767, device='cuda:0', grad_fn=<NllLossBackward>)
Epoch 0: 1%| | 13/1422 [00:03<07:05, 3.31it/s, loss=nan, v_num=20] tensor(1.9654, device='cuda:0', grad_fn=<NllLossBackward>)
Epoch 0: 1%| | 14/1422 [00:04<07:26, 3.15it/s, loss=nan, v_num=20] tensor(2.0091, device='cuda:0', grad_fn=<NllLossBackward>)
原因:
未重写之前 train_loop.run_training_batch() 中调用 optimizer_step() 时会先调用 train_step_and_backword(), train_step_and_backword() 中会对 _curr_step_result 进行赋值。而重写后没有调用 train_step_and_backword(),导致 ProgressBar 获取 tqdm_dict 时得到的 loss 为 nan。
def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]:
running_train_loss = self.trainer.train_loop.running_loss.mean()
avg_training_loss = (
running_train_loss.cpu().item()
if running_train_loss is not None
else float("NaN")
)
tqdm_dict = {"loss": "{:.3f}".format(avg_training_loss)}
...
return tqdm_dict
解决方法:
调用 optimizer.step() 时传入 closure=optimizer_closure,既:
def optimizer_step(self, current_epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, using_native_amp, using_lbfgs):
# warm up lr
step = self.trainer.global_step + 1
lr = self.optimizer_conf['lr'] * self.d_model ** (-0.5) * min(step ** (-0.5),
step * self.optimizer_conf['warmup_steps'] ** (-1.5))
for pg in optimizer.param_groups:
pg['lr'] = lr
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
下图为 pytorch-lightning 中部分模块之间的调用关系(accelerator_backend 以 gpu_accelerator 为例)。