目录
训练模块
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25,filename='best.pt'):
#咱们要算时间的
since = time.time()
#也要记录最好的那一次
best_acc = 0
#模型也得放到你的CPU或者GPU
model.to(device)
#训练过程中打印一堆损失和指标
val_acc_history = []
train_acc_history = []
train_losses = []
valid_losses = []
#学习率
LRs = [optimizer.param_groups[0]['lr']]
#最好的那次模型,后续会变的,先初始化
best_model_wts = copy.deepcopy(model.state_dict())
#一个个epoch来遍历
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# 训练和验证
for phase in ['train', 'valid']:
if phase == 'train':
model.train() # 训练
else:
model.eval() # 验证
running_loss = 0.0
running_corrects = 0
# 把数据都取个遍
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)#放到你的CPU或GPU
labels = labels.to(device)
# 清零
optimizer.zero_grad()
# 只有训练的时候计算和更新梯度
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
# 训练阶段更新权重
if phase == 'train':
loss.backward()
optimizer.step()
# 计算损失
running_loss += loss.item() * inputs.size(0)#0表示batch那个维度
running_corrects += torch.sum(preds == labels.data)#预测结果最大的和真实值是否一致
epoch_loss = running_loss / len(dataloaders[phase].dataset)#算平均
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
time_elapsed = time.time() - since#一个epoch我浪费了多少时间
print('Time elapsed {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# 得到最好那次的模型
if phase == 'valid' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
state = {
'state_dict': model.state_dict(),#字典里key就是各层的名字,值就是训练好的权重
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}
torch.save(state, filename)
if phase == 'valid':
val_acc_history.append(epoch_acc)
valid_losses.append(epoch_loss)
#scheduler.step(epoch_loss)#学习率衰减
if phase == 'train':
train_acc_history.append(epoch_acc)
train_losses.append(epoch_loss)
print('Optimizer learning rate : {:.7f}'.format(optimizer.param_groups[0]['lr']))
LRs.append(optimizer.param_groups[0]['lr'])
print()
scheduler.step()#学习率衰减
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# 训练完后用最好的一次当做模型最终的结果,等着一会测试
model.load_state_dict(best_model_wts)
return model, val_acc_history, train_acc_history, valid_losses, train_losses, LRs
开始训练!
- 我们现在只训练了输出层
model_ft, val_acc_history, train_acc_history, valid_losses, train_losses, LRs = train_model(model_ft, dataloaders, criterion, optimizer_ft, num_epochs=20)
Output exceeds the size limit. Open the full output data in a text editor
Epoch 0/19 ---------- Time elapsed 0m 39s train Loss: 4.0874 Acc: 0.2355 Time elapsed 0m 43s valid Loss: 3.5746 Acc: 0.2531 Optimizer learning rate : 0.0100000 Epoch 1/19 ---------- Time elapsed 1m 22s train Loss: 2.8185 Acc: 0.3953 Time elapsed 1m 26s valid Loss: 3.5450 Acc: 0.3142 Optimizer learning rate : 0.0100000 Epoch 2/19 ---------- Time elapsed 2m 5s train Loss: 2.7673 Acc: 0.4174 Time elapsed 2m 9s valid Loss: 3.9110 Acc: 0.2653 Optimizer learning rate : 0.0100000 Epoch 3/19
...
Optimizer learning rate : 0.0000100 Training complete in 14m 20s Best val Acc: 0.367971
再继续训练所有层
for param in model_ft.parameters():
param.requires_grad = True
# 再继续训练所有的参数,学习率调小一点
optimizer = optim.Adam(model_ft.parameters(), lr=1e-3)
scheduler = optim.lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
# 损失函数
criterion = nn.CrossEntropyLoss()
# 加载之前训练好的权重参数
checkpoint = torch.load(filename)
best_acc = checkpoint['best_acc']
model_ft.load_state_dict(checkpoint['state_dict'])
Epoch 0/9 ---------- Time elapsed 1m 32s train Loss: 2.2451 Acc: 0.4846 Time elapsed 1m 36s valid Loss: 2.3190 Acc: 0.4633 Optimizer learning rate : 0.0010000 Epoch 1/9 ---------- Time elapsed 2m 54s train Loss: 1.2920 Acc: 0.6505 Time elapsed 2m 58s valid Loss: 2.2263 Acc: 0.4670 Optimizer learning rate : 0.0010000 Epoch 2/9 ---------- Time elapsed 4m 15s train Loss: 1.1026 Acc: 0.6993 Time elapsed 4m 19s valid Loss: 1.8115 Acc: 0.5452 Optimizer learning rate : 0.0010000 Epoch 3/9
...
Optimizer learning rate : 0.0010000 Training complete in 13m 45s Best val Acc: 0.640587
加载训练好的模型
model_ft, input_size = initialize_model(model_name, 102, feature_extract, use_pretrained=True)
# GPU模式
model_ft = model_ft.to(device)
# 保存文件的名字
filename='best.pt'
# 加载模型
checkpoint = torch.load(filename)
best_acc = checkpoint['best_acc']
model_ft.load_state_dict(checkpoint['state_dict'])
测试数据预处理
- 测试数据处理方法需要跟训练时一致才可以
- crop操作的目的是保证输入的大小是一致的
- 标准化操作也是必须的,用跟训练数据相同的mean和std,但是需要注意一点训练数据是在0-1上进行标准化,所以测试数据也需要先归一化
- 最后一点,PyTorch中颜色通道是第一个维度,跟很多工具包都不一样,需要转换
# 得到一个batch的测试数据
dataiter = iter(dataloaders['valid'])
images, labels = dataiter.next()
model_ft.eval()
if train_on_gpu:
output = model_ft(images.cuda())
else:
output = model_ft(images)
output表示对一个batch中每一个数据得到其属于各个类别的可能性
output.shape
torch.Size([128, 102])
得到概率最大的那个
_, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.numpy()) if not train_on_gpu else np.squeeze(preds_tensor.cpu().numpy())
preds
展示预测结果
def im_convert(tensor):
""" 展示数据"""
image = tensor.to("cpu").clone().detach()
image = image.numpy().squeeze()
image = image.transpose(1,2,0)
image = image * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406))
image = image.clip(0, 1)
return image
fig=plt.figure(figsize=(20, 20))
columns =4
rows = 2
for idx in range (columns*rows):
ax = fig.add_subplot(rows, columns, idx+1, xticks=[], yticks=[])
plt.imshow(im_convert(images[idx]))
ax.set_title("{} ({})".format(cat_to_name[str(preds[idx])], cat_to_name[str(labels[idx].item())]),
color=("green" if cat_to_name[str(preds[idx])]==cat_to_name[str(labels[idx].item())] else "red"))
plt.show()