import torch
import cv2
import numpy as np
from torchvision import datasets, transforms
import torchvision
# 1. 使用torchvision加载数据集并做预处理
transform = transforms.Compose([transforms.ToTensor(), # 把图像转换为tensor
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)), # 归一化处理
])
# 加载训练集和测试集
trainset = torchvision.datasets.CIFAR10(root='E:\\Machine Learning\\PyTorch\\CIFAR10', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='E:\\Machine Learning\\PyTorch\\CIFAR10', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)
# 2. 定义卷积神经网络
import torch.nn as nn
import torch.nn.functional as F
class MyModel(nn.Module): # 继承nn.Module
# 定义网络结构
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = nn.Conv2d(3,6,5)
self.pool = nn.MaxPool2d(2,2)
self.conv2 = nn.Conv2d(6,16,5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
# 定义前向传播过程
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16*5*5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
Net = MyModel()
'''这里解释一下为什么第一个全连接层的输入大小为16x5x5,16是因为最后的卷积层有16个filter(即输出的feature map有16个channel),后面的5x5并不是因为最后卷积层的kernel大小为5x5,而是因为最后的feature map大小为5x5'''
![在这里插入图片描述](https://img-blog.csdnimg.cn/d410f91184334a61ae2991e4e9be827f.png#pic_center)
# 3. 定义损失函数和优化器
import torch.optim as optim
criterion = nn.CrossEntropyLoss() # 多分类问题,用交叉熵损失函数
optimizer = optim.SGD(Net.parameters(), lr=0.001, momentum=0.9) # 用SGD优化器
# 4. 训练神经网络
epochs = 2 # 训练两个epoch,batch_size = 4 (batch_size的大小定义在第一步torch.utils.data.DataLoader中)
e1 = cv2.getTickCount() # 记录训练时间
for epoch in range(epochs):
total_loss = 0.0
for i, data in enumerate(trainloader, 0):
# 得到inputs
inputs, labels = data
optimizer.zero_grad()
# forward + backward + optimize
# 前向传播+反向传播+更新参数
outputs = Net(inputs) # 前向传播,得到outputs
loss = criterion(outputs, labels) # 得到损失函数
loss.backward() # 后向传播
optimizer.step() # 更新参数
# 输出训练过程
total_loss += loss.item()
if (i+1) % 1000 == 0: # 每1000次(就是4000张图像)输出一次loss
print('第{}个epoch:第{:5d}次:目前的训练损失loss为:{:.3f}'.format(epoch+1, i+1, total_loss/1000))
total_loss = 0.0
e2 = cv2.getTickCount()
print('用CPU训练总共用时:{} s'.format((e2-e1)/cv2.getTickFrequency()))
# 5. 测试模型准确率如何
correct = 0
total = 0
e1 = cv2.getTickCount() # 记录测试时间
with torch.no_grad(): # 不跟踪梯度
for data in testloader:
images, labels = data
outputs = Net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
e2 = cv2.getTickCount()
print('用CPU测试总共用时:{} s'.format((e2-e1)/cv2.getTickFrequency()))
print('在测试集上的准确率为:{:.3f}%'.format(correct*100/total))
从头设计一个pytorch的神经网络
最新推荐文章于 2023-10-26 20:09:37 发布