optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.9)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.9)
loss_func = nn.CrossEntropyLoss().cuda() # the target label is not one-hotted
# train
for epoch in range(EPOCH):
epoch_loss = 0
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data
b_x = b_x.view(-1, TIME_STEP, INPUT_SIZE).float() # reshape x to (batch, time_step, input_size)
output = rnn(b_x.cuda()) # rnn output
loss = loss_func(output, b_y.cuda()) # cross entropy loss, output is vector, target is int
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
epoch_loss += loss.data.cpu().numpy()
scheduler.step(epoch_loss)
print(optimizer.state_dict()['param_groups'][0]['lr'])
pytorch动态调整learning rate
最新推荐文章于 2023-02-22 10:06:56 发布