卷积神经网络实现mnist手写数字识别
卷积神经网络的结构为
ConvNet(
(layer1): Sequential(
(0): Conv2d(1, 16, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
(3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(layer2): Sequential(
(0): Conv2d(16, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
(3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(fc): Linear(in_features=1568, out_features=10, bias=True)
)
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Hyper parameters
num_epochs = 5
num_classes = 10
batch_size = 100
learning_rate = 0.001
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='../../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../../data/',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Convolutional neural network (two convolutional layers)
class ConvNet(nn.Module):
def __init__(self, num_classes=10):
super(ConvNet, self).__init__()
# nn.Sequential()定义一个时序容器,按顺序执行
# 第一个大层,包含卷积,批处理,激活,池化
self.layer1 = nn.Sequential(
# minist数据集像素为28*28
# Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True)
# in_channels 输入的通道,out_channels输出的通道,同时也是卷积核的个数
# 卷积过程,先padding,再kernal和map对齐,再卷积运算。
nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
# 经过一层卷积,28*28*1->28*28*16,因为stride=1,padding=2
# BatchNorm2d,16位特征数,将这16个通道归一化
nn.BatchNorm2d(16),
nn.ReLU(),
# 步长为2,所以最大池化后28/2=14
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
# 池化的步长为2,所以最大池化后14/2=7
nn.MaxPool2d(kernel_size=2, stride=2))
# 传入7*7*32个输入,接10个输出
self.fc = nn.Linear(7*7*32, num_classes)
# 定义前向传播,这里要一层一层的传入实参,参数x为模型的输入
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
# 这里fc为什么输入一个参数
out = self.fc(out)
# 返回模型的输出
return out
model = ConvNet(num_classes).to(device)
# 打印模型结构
print(model)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# 前向传播,并得到模型的输出和损失
outputs = model(images)
loss = criterion(outputs, labels)
# 反向传播及优化损失
# 将梯度置0
optimizer.zero_grad()
# 误差反向传播
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Test the model
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint,保存模型操作,序列化
torch.save(model.state_dict(), 'model.ckpt')
output:
Epoch [1/5], Step [100/600], Loss: 0.3434
Epoch [1/5], Step [200/600], Loss: 0.1523
Epoch [1/5], Step [300/600], Loss: 0.0984
Epoch [1/5], Step [400/600], Loss: 0.1279
Epoch [1/5], Step [500/600], Loss: 0.1632
Epoch [1/5], Step [600/600], Loss: 0.0438
Epoch [2/5], Step [100/600], Loss: 0.0173
Epoch [2/5], Step [200/600], Loss: 0.1028
Epoch [2/5], Step [300/600], Loss: 0.0338
Epoch [2/5], Step [400/600], Loss: 0.0202
Epoch [2/5], Step [500/600], Loss: 0.0782
Epoch [2/5], Step [600/600], Loss: 0.0509
Epoch [3/5], Step [100/600], Loss: 0.0721
Epoch [3/5], Step [200/600], Loss: 0.1173
Epoch [3/5], Step [300/600], Loss: 0.0181
Epoch [3/5], Step [400/600], Loss: 0.0115
Epoch [3/5], Step [500/600], Loss: 0.0083
Epoch [3/5], Step [600/600], Loss: 0.0271
Epoch [4/5], Step [100/600], Loss: 0.0207
Epoch [4/5], Step [200/600], Loss: 0.0679
Epoch [4/5], Step [300/600], Loss: 0.0222
Epoch [4/5], Step [400/600], Loss: 0.0169
Epoch [4/5], Step [500/600], Loss: 0.0666
Epoch [4/5], Step [600/600], Loss: 0.0187
Epoch [5/5], Step [100/600], Loss: 0.0146
Epoch [5/5], Step [200/600], Loss: 0.0018
Epoch [5/5], Step [300/600], Loss: 0.0211
Epoch [5/5], Step [400/600], Loss: 0.0028
Epoch [5/5], Step [500/600], Loss: 0.0250
Epoch [5/5], Step [600/600], Loss: 0.0060
Test Accuracy of the model on the 10000 test images: 99.06 %