LeNet模型训练与预测
预测的classes = (‘plane’, ‘car’, ‘bird’, ‘cat’, ‘deer’, ‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’)
predict.py
import torch
import torchvision.transforms as transforms
from model import LeNet
import torch
from PIL import Image
from torchvision import transforms
import matplotlib.pyplot as plt
import cv2
def main():
transform = transforms.Compose(
[transforms.Resize((32, 32)), #缩放
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
net = LeNet()
net.load_state_dict(torch.load('Lenet.pth'))
im = Image.open('data\\cifar-10-batches-py\\NO.45class7horse.jpg')
img=cv2.imread('data\\cifar-10-batches-py\\NO.45class7horse.jpg')
im = transform(im) # [C, H, W]
im = torch.unsqueeze(im, dim=0) # 在转换为 Python Tensor 的通道排序:[N, C, H, W]; dim=0 在最前面增加维度
with torch.no_grad(): #不求损失梯度,而网络默认会求损失梯度
outputs = net(im)
predict = torch.max(outputs, dim=1)[1].numpy()
print(classes[int(predict)])#把索引index 传给classes,得到分类
pre =torch.softmax(outputs,dim=1),#再次也可用softmax替代max来进行分类。
print(pre)
plt.title(classes[int(predict)])
data = cv2.resize(img,dsize=None,fx=5,fy=5,interpolation=cv2.INTER_LINEAR)
cv2.putText(data, classes[int(predict)], (0,40), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 1)
cv2.imshow('1',data)
cv2.waitKey(0)
if __name__ == '__main__':
main()
model.py
import torch.nn as nn
import torch.nn.functional as F
class LeNet(nn.Module):#继承nn.Module
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 5)#3通道的输入,16卷积核,5x5的卷积核
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(16, 32, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(32*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10) #用的CIFAR10数据集
def forward(self, x):#正向传播的过程
x = F.relu(self.conv1(x)) # input(3, 32, 32) output(16, 28, 28)
x = self.pool1(x) # output(16, 14, 14)
x = F.relu(self.conv2(x)) # output(32, 10, 10)
x = self.pool2(x) # output(32, 5, 5)
x = x.view(-1, 32*5*5) # output(32*5*5) #View函数展平操作,-1代表纬度,自动推理纬度,32*5*5展平后节点个数
x = F.relu(self.fc1(x)) # output(120)
x = F.relu(self.fc2(x)) # output(84)
x = self.fc3(x) # output(10) #最后一层,这里不需要添加softmax层了,train.py在卷网络中,在卷积交叉熵中nn.CrossEntropyLoss(),他的内部实现了分类的功能
return x
train.py
import torch
import torchvision
import torch.nn as nn
from model import LeNet
import torch.optim as optim
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
def main():
transform = transforms.Compose( #预处理方法打包
[transforms.ToTensor(), #[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) #``output[channel] = (input[channel] - mean[channel]) / std[channel]``
# 50000张训练图片
# 第一次使用时要将download设置为True才会自动去下载数据集
train_set = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=36,
shuffle=True, num_workers=0) # shuffle=True打乱数据 ,num_workers=0载入数据线程数,windos下只能设置 为0,linuxx下可设置值
# 10000张验证图片; train=False测试集
# 第一次使用时要将download设置为True才会自动去下载数据集
val_set = torchvision.datasets.CIFAR10(root='./data', train=False,
download=False, transform=transform)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=10000,
shuffle=False, num_workers=0) #每次载入 batch_size=5000张
val_data_iter = iter(val_loader)#iter 迭代器
val_image, val_label = val_data_iter.next()
classes = ('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')
#查看下数据集,训练用不到
# def imShow(img):
# img = img/2+0.5
# npIm = img.numpy()
# plt.imshow(np.transpose(npIm,(1,2,0))) #[channel,height,width] 还原成载入图像的shape[height,width,channel]
# plt.show()
# #img = torchvision.utils.make_grida(img)
# imShow( torchvision.utils.make_grid(val_image))
net = LeNet() #实例化模型
loss_function = nn.CrossEntropyLoss() #定义损失函数This criterion combines :class:`~torch.nn.LogSoftmax` and :class:`~torch.nn.NLLLoss` in one single class.
optimizer = optim.Adam(net.parameters(), lr=0.001) #net.parameters() 就是LeNet所需要训练的参数
for epoch in range(5): # loop over the dataset multiple times
running_loss = 0.0 #累加训练过程的损失
for step, data in enumerate(train_loader, start=0): #循环-->遍历训练集样本
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad() #将历史损失梯度清零,如果不清除历史梯度,就会对计算历史梯度进行累加(思维转变:相当于我们用了一个很大的batchSize,让效果更好)
# forward + backward + optimize
outputs = net(inputs)
loss = loss_function(outputs, labels)
loss.backward() #loss反向传播
optimizer.step() #step() 实现参数更新
# print statistics打印过程
running_loss += loss.item()
if step % 500 == 499: # print every 500 mini-batches,每隔500步打印一次
with torch.no_grad(): #torch.no_grad() 意思:不计算每一个节点的损失梯度,如果不加这个函数,在测试过程中也会计算损失梯度(会消耗算力,资源,会存储每个节点的损失梯度,消耗内存,容易导致程序内存不足而崩溃)
outputs = net(val_image) # [batch, 10] #正向传播
predict_y = torch.max(outputs, dim=1)[1] #max网络预测类别归属( dim=1纬度1上寻找最大值,10个节点中寻找最大值),[1] 代表我们只需要index即索引值
accuracy = torch.eq(predict_y, val_label).sum().item() / val_label.size(0)#.eq(predict_y, val_label).sum()累积预测对的数目是个terson张量; .item()将张量转换为标量; /val_label.size(0)除以样本总量
print('[%d, %5d] train_loss: %.3f test_accuracy: %.3f' %
(epoch + 1, step + 1, running_loss / 500, accuracy)) #running_loss / 500是500步的平均训练误差
running_loss = 0.0 #将累积训练误差清零,再进行下500次的训练,重新累计
print('Finished Training')
save_path = './Lenet.pth'
torch.save(net.state_dict(), save_path) #模型权重参数保存net.state_dict()
if __name__ == '__main__':
main()
#train_set = torchvision.datasets. 官方在这下面为我们提供了大量数据集,有时间.出来