CNN训练一个模型可能会遇到的问题:
-
训练的数据量少,模型会过拟合
-
调参花费时间多
-
模型大训练时间长
⇊ \downdownarrows ⇊
怎么使模型训练更加简单?
⇊ \downdownarrows ⇊
迁移学习:用别人训练好的模型对自己的数据进行训练(用别人训练好的权重参数初始化自己的参数)
⇊ \downdownarrows ⇊
如何保证用别人的模型:数据结构,输入输出数据格式都要与别人一样
⇊ \downdownarrows ⇊
迁移学习策略:- A用别人的参数初始化继续迭代别人的参数
- B直接把别人训练好的参数做最终参数
⇊ \downdownarrows ⇊
迁移学习用在什么地方?
用在全连接层之前的layers,全链接层需要根据自己的任务设计相应的参数
如何构建一个net
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
# 选择合适的模型,不同模型的初始化方法稍微有点区别
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet152
"""
model_ft = models.resnet152(pretrained=use_pretrained)#pretrained置为true值会帮我们把模型下载
set_parameter_requires_grad(model_ft, feature_extract)#需要冻住哪些层进行训练
num_ftrs = model_ft.fc.in_features#把最后一层的全连接层拿到手
model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, 102),#修改全连接层参数为(2048,102)
nn.LogSoftmax(dim=1))#nn.LogSoftmax(dim=1)横向计算log_softmax,功能:将输出转化为概率,在计算损失值的时候用,不用交叉熵
input_size = 224
- 加载net,把pretrained model设为true,相当于用别人训练好的作为最终结果
model_ft = models.resnet152(pretrained=use_pretrained) - 指定要冻住(参数不更新)的哪些层
set_parameter_requires_grad(model_ft, feature_extract)#需要冻住哪些层进行训练
def set_parameter_requires_grad(model, feature_extracting):#要不要把某些层指定为false不做训练 参数不做更新
if feature_extracting:
for param in model.parameters():
param.requires_grad = False#把梯度更新设为false冻住梯度更新
- 把最后的全连接层改成与自己任务一直的参数
model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, 102),#修改全连接层参数为(2048,102)
nn.LogSoftmax(dim=1))#nn.LogSoftmax(dim=1)横向计算log_softmax,功能:将输出转化为概率,在计算损失值的时候用,不用交叉熵
整体代码流程展示:
加载models中提供的模型,并且直接用训练的好权重当做初始化参数
model_name = 'resnet' #可选的比较多 ['resnet', 'alexnet', 'vgg', 'squeezenet', 'densenet', 'inception']
#是否用人家训练好的特征来做
feature_extract = True
# 是否用GPU训练
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu: #if x is None=if not x:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def set_parameter_requires_grad(model, feature_extracting):#要不要把某些层指定为false不做训练 参数不做更新
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
model_ft = models.resnet152()#152层模型
model_ft
这里的全连接层使20481000,是10001分类任务,等下要改成2048102,做102分类任务
参考pytorch官网例子
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):#传入 模型名字 分类数 是否用别人训练好的模型 模型是否下载
# 选择合适的模型,不同模型的初始化方法稍微有点区别
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet152
"""
model_ft = models.resnet152(pretrained=use_pretrained)#pretrained置为true值会帮我们把模型下载
set_parameter_requires_grad(model_ft, feature_extract)#需要冻住哪些层进行训练
num_ftrs = model_ft.fc.in_features#把最后一层的全连接层拿到手
#nn.Sequential()是nn.module()的容器,用于按顺序包装一组网络层。
model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, 102),#修改全连接层参数为(2048,102)
nn.LogSoftmax(dim=1))#nn.LogSoftmax(dim=1)横向计算log_softmax,功能:将输出转化为概率,在计算损失值的时候用,不用交叉熵
input_size = 224
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg16(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = 224
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
return model_ft, input_size
设置哪些层需要训练
model_ft, input_size = initialize_model(model_name, 102, feature_extract, use_pretrained=True)#传入 模型名字 分类个数 。。。
#GPU计算
model_ft = model_ft.to(device)
# 模型保存名字
filename='checkpoint.pth'
# 是否训练所有层
params_to_update = model_ft.parameters()#parameters里存的就是weight,parameters()会返回一个生成器(迭代器)
print("Params to learn:")
if feature_extract:
params_to_update = []
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t",name)
else:
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
print("\t",name)
Downloading: “https://download.pytorch.org/models/resnet152-b121ed2d.pth” to C:\Users\Administrator/.cache\torch\checkpoints\resnet152-b121ed2d.pth
100%|███████████████████████████████████████████████████████████████████████████████| 230M/230M [01:38<00:00, 2.44MB/s]
Params to learn:
fc.0.weight
fc.0.bias
model_ft
… (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
(fc): Sequential(
(0): Linear(in_features=2048, out_features=102, bias=True)
(1): LogSoftmax()
)
优化器设置
# 优化器设置
optimizer_ft = optim.Adam(params_to_update, lr=1e-2)#lr=1e-2 学习率
#学习率衰减策略 随着学习次数增加进行衰减 step_size=7 迭代多少epoch后学习率衰减多少 gamma=0.1衰减学习率比率
scheduler = optim.lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)#学习率每7个epoch衰减成原来的1/10
#定义损失函数
#最后一层已经LogSoftmax()了,所以不能nn.CrossEntropyLoss()来计算了,nn.CrossEntropyLoss()相当于logSoftmax()和nn.NLLLoss()整合
criterion = nn.NLLLoss()
训练模块
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False,filename=filename):#模型 batch数据 损失函数 优化器 迭代次数 是否勇别的网络模型 文件名
since = time.time()#time.time()是统计的wall time(即墙上时钟),也就是系统时钟的时间戳(1970纪元后经过的浮点秒数)。所以两次调用的时间差即为系统经过的总时间。
best_acc = 0
"""
checkpoint = torch.load(filename)
best_acc = checkpoint['best_acc']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
model.class_to_idx = checkpoint['mapping']
"""
model.to(device)
val_acc_history = []
train_acc_history = []
train_losses = []
valid_losses = []
LRs = [optimizer.param_groups[0]['lr']]
best_model_wts = copy.deepcopy(model.state_dict())#把验证最好的模型保存
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# 训练和验证
for phase in ['train', 'valid']:
if phase == 'train':
model.train() # 训练
else:
model.eval() # 验证
running_loss = 0.0
running_corrects = 0
# 把数据都取个遍
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)#传入到GPU当中
labels = labels.to(device)
# 梯度清零
optimizer.zero_grad()
# 只有训练的时候计算和更新梯度
with torch.set_grad_enabled(phase == 'train'):
if is_inception and phase == 'train':#resnet不执行这步操作
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else:#resnet执行的是这里----------------------------------
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)#torch.max(a,1) 返回每一行中最大值的那个元素,且返回其索引(返回最大元素在这一行的列索引)
# 训练阶段更新权重
if phase == 'train':
loss.backward()
optimizer.step()
# 计算损失
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
time_elapsed = time.time() - since
print('Time elapsed {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# 得到最好那次的模型
if phase == 'valid' and epoch_acc > best_acc:#如果当前是验证集,如果当前的准确率比之前最好的准确率高:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())#把当前最好的权重参数复制过来
#构造字典模型保存当前状态
state = {
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}
torch.save(state, filename)#保存当前最好的模型保存到路径filename里
if phase == 'valid':
val_acc_history.append(epoch_acc)
valid_losses.append(epoch_loss)
scheduler.step(epoch_loss)
if phase == 'train':
train_acc_history.append(epoch_acc)
train_losses.append(epoch_loss)
print('Optimizer learning rate : {:.7f}'.format(optimizer.param_groups[0]['lr']))
LRs.append(optimizer.param_groups[0]['lr'])
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# 训练完后用最好的一次当做模型最终的结果
model.load_state_dict(best_model_wts)
return model, val_acc_history, train_acc_history, valid_losses, train_losses, LRs
开始训练!
model_ft, val_acc_history,train_acc_history, valid_losses, train_losses, LRs =
train_model(model_ft, dataloaders, criterion, optimizer_ft, num_epochs=20,
is_inception=(model_name=="inception"))
Time elapsed 35m 20s
train Loss: 2.5828 Acc: 0.8205
Time elapsed 35m 29s
valid Loss: 7.2196 Acc: 0.6968
Optimizer learning rate : 0.0010000
Epoch 19/19
Time elapsed 37m 11s
train Loss: 2.4293 Acc: 0.8248
Time elapsed 37m 21s
valid Loss: 5.7857 Acc: 0.7213
Optimizer learning rate : 0.0100000
Training complete in 37m 21s
Best val Acc: 0.721271
再继续训练所有层
- 之前是训练全连接层,现在把全连接层之前的所有层再训练一遍看看效果如何
但凡网络中有参数的就进行训练,把改训练的参数置为true
for param in model_ft.parameters():
param.requires_grad = True
# 再继续训练所有的参数,学习率调小一点
optimizer = optim.Adam(params_to_update, lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)#学习率衰减策略不变
# 损失函数不变
criterion = nn.NLLLoss()
# Load the checkpoint
#在之前保存最好的一次模型基础上进行训练
checkpoint = torch.load(filename)
best_acc = checkpoint['best_acc']
model_ft.load_state_dict(checkpoint['state_dict'])#加载模型读入所有的权重参数
optimizer.load_state_dict(checkpoint['optimizer'])
#model_ft.class_to_idx = checkpoint['mapping']
model_ft, val_acc_history, train_acc_history, valid_losses, train_losses, LRs =
train_model(model_ft, dataloaders, criterion, optimizer, num_epochs=10,
is_inception=(model_name=="inception"))
Epoch 0/9
Time elapsed 3m 29s
train Loss: 2.3541 Acc: 0.8291
Time elapsed 3m 38s
valid Loss: 7.1179 Acc: 0.6895
E:\ProgramData\Anaconda3\envs\python36\lib\site-packages\torch\optim\lr_scheduler.py:82: UserWarning: Detected call of
lr_scheduler.step()before
optimizer.step(). In PyTorch 1.1.0 and later, you should call them in the opposite order:
optimizer.step()before
lr_scheduler.step(). Failure to do this will result in PyTorch skipping the first value of the learning rate schedule.See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
Optimizer learning rate : 0.0010000
Epoch 1/9
Time elapsed 7m 7s
train Loss: 2.2214 Acc: 0.8318
Time elapsed 7m 16s
valid Loss: 6.3605 Acc: 0.7188
Optimizer learning rate : 0.0010000
…
Epoch 9/9
Time elapsed 36m 21s
train Loss: 1.9166 Acc: 0.8422
Time elapsed 36m 31s
valid Loss: 5.4032 Acc: 0.7298
Optimizer learning rate : 0.0010000
Training complete in 36m 32s
Best val Acc: 0.729829
测试网络效果(走一次前向传播)
输入一张测试图像,看看网络的返回结果:
probs, classes = predict(image_path, model)#传入 图像路径 模型
print(probs)#得到概率值
print(classes)#得到分类索引
> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
> ['70', '3', '45', '62', '55']#在.jsion文件中找到索引对应的类名
注意预处理方法需相同
加载训练好的模型
model_ft, input_size = initialize_model(model_name, 102, feature_extract, use_pretrained=True)
# GPU模式
model_ft = model_ft.to(device)
# 保存文件的名字
filename='seriouscheckpoint.pth'
# 加载模型
checkpoint = torch.load(filename)
best_acc = checkpoint['best_acc']
model_ft.load_state_dict(checkpoint['state_dict'])
测试数据预处理
- 测试数据处理方法需要跟训练时一样才可以
- crop操作的目的是保证输入的大小是一致的
- 标准化操作也是必须的,用跟训练数据相同的mean和std,但是需要注意一点训练数据是在0-1上进行标准化,所以测试数据也需要先归一化
- 最后一点,PyTorch中颜色通道是第一个维度,跟很多工具包都不一样,需要转换
def process_image(image_path):
# 读取测试数据
img = Image.open(image_path)
#保证输入的图像是三通道的彩色图像,图片大小为224*224
# Resize,thumbnail方法只能进行缩小,所以进行了判断
if img.size[0] > img.size[1]:#把图像resize成256*256
img.thumbnail((10000, 256))
else:
img.thumbnail((256, 10000))
# Crop操作 把图像裁剪为224*224
left_margin = (img.width-224)/2
bottom_margin = (img.height-224)/2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
img = img.crop((left_margin, bottom_margin, right_margin,
top_margin))
# 相同的预处理方法
img = np.array(img)/255#做归一化操作
mean = np.array([0.485, 0.456, 0.406]) #provided mean
std = np.array([0.229, 0.224, 0.225]) #provided std
img = (img - mean)/std
# 注意颜色通道应该放在第一个位置
img = img.transpose((2, 0, 1))
return img
def imshow(image, ax=None, title=None):
"""展示数据"""
if ax is None:
fig, ax = plt.subplots()
# 颜色通道还原
image = np.array(image).transpose((1, 2, 0))
# 预处理还原
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
image = np.clip(image, 0, 1)
ax.imshow(image)
ax.set_title(title)
return ax
image_path = 'image_06621.jpg'
img = process_image(image_path)
imshow(img)
img.shape
(3, 224, 224)
测试一个batch数据
# 得到一个batch的测试数据
dataiter = iter(dataloaders['valid'])
images, labels = dataiter.next()
model_ft.eval()
if train_on_gpu:
output = model_ft(images.cuda())#把数据放入GPU执行
else:
output = model_ft(images)
output表示对一个batch中每一个数据得到其属于各个类别的可能性
output.shape
#batch中有8个数据,一个图片有102个分类概率值
torch.Size([8, 102])
得到概率最大的那个
_, preds_tensor = torch.max(output, 1)
preds = np.squeeze(preds_tensor.numpy()) if not train_on_gpu else np.squeeze(preds_tensor.cpu().numpy())
preds
array([77, 22, 46, 46, 64, 93, 28, 48], dtype=int64)
展示预测结果
fig=plt.figure(figsize=(20, 20))
columns =4
rows = 2
for idx in range (columns*rows):
ax = fig.add_subplot(rows, columns, idx+1, xticks=[], yticks=[])
plt.imshow(im_convert(images[idx]))
ax.set_title("{} ({})".format(cat_to_name[str(preds[idx])], cat_to_name[str(labels[idx].item())]),
color=("green" if cat_to_name[str(preds[idx])]==cat_to_name[str(labels[idx].item())] else "red"))
plt.show()
#绿色标签表示预测对的 红色标签表示预测错的
咱也不知为何图片小于5MB还是不能插入CSDN里面!将就着看吧。。。
使用到的数据集链接