在PyTorch中,可以通过访问优化器的学习率来打印或可视化Adam优化器的学习率变化情况。以下是一个示例代码,展示如何在训练过程中记录并可视化Adam优化器的学习率变化。
打印学习率变化
你可以在每个训练周期(epoch)或每个批次(batch)中打印当前的学习率。以下是一个示例,展示如何在每个epoch结束后打印学习率:
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
# 定义一个简单的神经网络
class SimpleNet(nn.Module):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc1 = nn.Linear(28*28, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = x.view(-1, 28*28)
x = torch.relu(self.fc1(x))
x = self.fc2(x)
return x
# 加载数据集
transform = transforms.Compose([transforms.ToTensor()])
trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=2)
# 初始化模型、损失函数和优化器
model = SimpleNet().cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# 训练过程
for epoch in range(5): # 训练5个epoch
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs, labels = inputs.cuda(), labels.cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99: # 每100个小批次输出一次损失值
print(f'[Epoch {epoch + 1}, Batch {i + 1}] loss: {running_loss / 100:.3f}')
running_loss = 0.0
# 打印当前学习率
for param_group in optimizer.param_groups:
print(f'Epoch {epoch + 1} learning rate: {param_group["lr"]}')
print('Finished Training')
可视化学习率变化
为了可视化学习率变化,你可以在每个epoch记录学习率,然后使用matplotlib绘制学习率曲线。以下是一个示例,展示如何记录并可视化学习率变化:
import matplotlib.pyplot as plt
# 定义记录学习率的列表
learning_rates = []
# 训练过程
for epoch in range(5): # 训练5个epoch
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs, labels = inputs.cuda(), labels.cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99: # 每100个小批次输出一次损失值
print(f'[Epoch {epoch + 1}, Batch {i + 1}] loss: {running_loss / 100:.3f}')
running_loss = 0.0
# 记录当前学习率
for param_group in optimizer.param_groups:
learning_rates.append(param_group['lr'])
# 可视化学习率变化
plt.plot(learning_rates)
plt.xlabel('Epoch')
plt.ylabel('Learning Rate')
plt.title('Learning Rate Schedule')
plt.show()
使用学习率调度器
如果你使用学习率调度器(如torch.optim.lr_scheduler
),也可以记录和可视化学习率变化。以下是一个示例,展示如何使用学习率调度器并记录学习率:
from torch.optim.lr_scheduler import StepLR
# 初始化学习率调度器
scheduler = StepLR(optimizer, step_size=2, gamma=0.1)
# 训练过程
for epoch in range(5): # 训练5个epoch
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs, labels = inputs.cuda(), labels.cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99: # 每100个小批次输出一次损失值
print(f'[Epoch {epoch + 1}, Batch {i + 1}] loss: {running_loss / 100:.3f}')
running_loss = 0.0
# 记录当前学习率
for param_group in optimizer.param_groups:
learning_rates.append(param_group['lr'])
# 更新学习率
scheduler.step()
# 可视化学习率变化
plt.plot(learning_rates)
plt.xlabel('Epoch')
plt.ylabel('Learning Rate')
plt.title('Learning Rate Schedule')
plt.show()
这些示例展示了如何在训练过程中记录和可视化学习率变化。你可以根据需要调整这些代码来适应你的具体情况。