学习Pytorch+Python之MNIST手写字体识别

学习Pytorch+python之MNIST手写字体识别

代码:

import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import numpy as np
import torchvision.utils
from torchvision import datasets, transforms
from torch.autograd import Variable
import torch.utils.data

#判断是否能用GPU,如果能就用GPU,不能就用CPU
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#数据转换,Pytorch的底层是tensor(张量),所有用来训练的图像均需要转换成tensor
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
#下载数据集
data_train = datasets.MNIST(root="./data/", transform=transform, train=True, download=True)
data_test  = datasets.MNIST(root="./data/", transform=transform, train=False)
#加载数据集,批次大小为64,shuffle表示乱序
data_loader_train = torch.utils.data.DataLoader(dataset=data_train, batch_size=64, shuffle=True)
data_loader_test = torch.utils.data.DataLoader(dataset=data_test, batch_size=64, shuffle=True)
#创建模型即网络架构
class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        #创建二维卷积
        self.conv1 = nn.Sequential(
        	#输入特征数量为1,输出特征数量为64,卷积核大小为3x3,步长为1,边缘填充为1,保证了卷积后的特征尺寸与原来一样
            nn.Conv2d(1, 64, kernel_size=3, stride=1, c=1),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            #最大池化,特征数量不变,尺寸减半[(input-kernel_size)/stride + 1]
            nn.MaxPool2d(stride=2, kernel_size=2)
        )
        #创建全连接
        self.dense = nn.Sequential(
            nn.Linear(14*14*128, 1024),
            nn.ReLU(),
            #随机丢弃部分结点,防止过拟合
            nn.Dropout(p=0.5),
            nn.Linear(1024, 10)
        )
	#创建好网络结构后,建立前向传播
    def forward(self, x):
    	#对数据进行卷积操作
        x = self.conv1(x)
        #改变特征形状
        x = x.c(-1, 14*14*128)
        #对特征进行全连接
        x = self.dense(x)
        return x
#类实例化
model = Model()
#指定数据训练次数
epochs = 5
#设置学习率,即梯度下降的权重,其值越大收敛越快,越小收敛越慢
learning_rate = 0.0001
#选用参数优化器,这里使用Adam
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
#选用损失函数,这里使用交叉熵函数,来判定实际的输出与期望的输出的接近程度
criterion = nn.CrossEntropyLoss()
#判断是否使用GPU训练
if(use_gpu):
    model = model.cuda()
    loss_f = criterion.cuda()

#用for循环的方式完成数据的批次训练    
for epoch in range(epochs):
	#定义并初始化训练过程的损失以及正确率
    running_loss = 0
    running_correct = 0
    for data in data_loader_train:
        x_train, y_train = data
        x_train, y_train =x_train.cuda(), y_train.cuda()
        x_train = x_train.to(device)
        y_train = y_train.to(device)
        #将预处理好的数据加载到实例化好的model模型中,进行训练得到输出
        outputs = model(x_train)
        _, pred = torch.max(outputs.data, 1)
        #每次循环中,梯度必须清零,防止梯度堆叠
        optimizer.zero_grad()
        #调用设定的损失
        loss = criterion(outputs, y_train)
        #反向传播损失
        loss.backward()
        #参数更新
        optimizer.step()
        #更新损失
        running_loss += loss.item()
        #更新正确率
        running_correct += torch.sum(pred == y_train.data)
    testing_correct = 0
    #查看每轮训练后,测试数据集中的正确率
    for data in data_loader_test:
        x_test, y_test = data
        x_test, y_test = Variable(x_test), Variable(y_test)
        x_test = x_test.to(device)
        y_test = y_test.to(device)
        outputs = model(x_test)
        _, pred = torch.max(outputs.data, 1)
        testing_correct += torch.sum(pred == y_test.data)
        print("Loss is {}, Training Accuray is {}%, Test Accurray is {}".format(running_loss/len(data_train), 100*running_correct/len(data_train), 100*testing_correct/len(data_test)))

#测试训练好的模型
#随机加载4个手写数字
data_loader_test = torch.utils.data.DataLoader(dataset=data_test, batch_size=4, shuffle=True)
#函数next相关 https://www.runoob.com/python/python-func-next.html
#函数iter相关 https://www.runoob.com/python/python-func-iter.html
x_test,y_test = next(iter(data_loader_test))
inputs = Variable(x_test)
inputs = inputs.to(device)
pred = model(inputs)
#_为输出的最大值,pred为最大值的索引值
_,pred = torch.max(pred, 1)

print('Predict Label is :', [i for i in pred.data])
print('Real Label is:', [ i for i in y_test] )

img = torchvision.utils.make_grid(x_test)
img = img.numpy().transpose(1, 2, 0)
std = [0.5]
mean = [0.5]
img = img*std+mean
plt.imshow(img)
plt.show()

模型训练结果:

Predict Label is : [tensor(3, device='cuda:0'), tensor(1, device='cuda:0'), tensor(2, device='cuda:0'), tensor(9, device='cuda:0')]
Real Label is:     [tensor(3), tensor(1), tensor(2), tensor(9)]

测试数据可视化:
在这里插入图片描述

  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值