Pytorch的CNN实现MNIST分类

# 导包
import numpy as np  # 科学计算
import torch  #
from torch import nn  #
from PIL import Image  # Python Image Library,python第三方图像处理库
import matplotlib.pyplot as plt  # python的绘图库 pyplot:matplotlib的绘图框架
import os  # 提供了丰富的方法来处理文件和目录
from torchvision import datasets, transforms, utils  # 提供很多数据集的下载,包括COCO,ImageNet,CIFCAR等

# 准备数据
# Compose是把多种数据处理的方法集合在一起
transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize(mean = [0.5], std = [0.5])])
# root为数据集存放的路径
train_data = datasets.MNIST(root="./data/",
                            transform=transform,
                            train=True,# 确定为训练集
                            download=True)
test_data = datasets.MNIST(root="./data/",
                           transform=transform,
                           train=False)
# print(len(test_data)) # 查看数据集结构
# num_workers 表示用多少个子进程加载数据
# shuffle 表示在装载过程中随机乱序
train_loader = torch.utils.data.DataLoader(train_data,
                                           batch_size=64,
                                           shuffle=True,
                                           num_workers=0)
test_loader = torch.utils.data.DataLoader(test_data,
                                           batch_size=64,
                                           shuffle=True,
                                           num_workers=0)
# print(len(train_loader))
# ---------从二维数组生成一张图片--------- #
'''one_img, label = train_data[0]
one_img = one_img.numpy().transpose(1, 2, 0)
# 转置数组transpose(1, 2, 0)是交换轴0,1,2的位置
std = [0.5]
mean = [0.5]
one_img = one_img * std + mean
one_img.resize(28, 28)
plt.imshow(one_img)
plt.show()'''
# print(one_img, label)
# ---------从三维生成一张黑白图片--------- #
'''one_img, label = train_data[0]
grid = utils.make_grid(one_img)
# make_grid产生的是三通道相同的伪灰度图像
grid = grid.numpy().transpose(1, 2, 0)
std = [0.5]
mean = [0.5]
grid = grid * std + mean
plt.imshow(grid)
plt.show()'''
# ---------输出一个batch的图片和标签--------- #
'''images, labels = next(iter(train_loader))
img = utils.make_grid(images)
# transpose 转置函数(x=0,y=1,z=2),新的x是原来的y轴大小,新的y是原来的z轴大小,新的z是原来的x大小
# 相当于把x=1这个一道最后面去。
img = img.numpy().transpose(1, 2, 0)
std = [0.5]
mean = [0.5]
img = img * std + mean
for i in range(64):
    print(labels[i], end=" ")
    i += 1
    if i%8 is 0:
        print(end='\n')
plt.imshow(img)
plt.show()'''
# ----------定义CNN网络----------- #
import torch.nn.functional as F
class CNN(nn.Module):
    def __init__(self):
        super(CNN,self).__init__()
        self.conv1 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
        self.fc1 = nn.Linear(64 * 7 * 7, 1024)  # 两个池化,所以是7*7而不是14*14
        self.fc2 = nn.Linear(1024, 512)
        self.fc3 = nn.Linear(512, 10)
# self.dp = nn.Dropout(p=0.5)
    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 64*7*7) # 将数据平整为一维的
        # x.view(-1, 4) 这里-1表示一个不确定的数,就是如果不确定想要reshape成几行,但是你很肯定要reshape成4列,那不确定的地方就可以写成-1
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        # x = F.log_softmax(x,dim=1) NLLLoss()才需要,交叉熵不需要
        return x

net = CNN()

import torch.optim as optim
criterion = nn.CrossEntropyLoss()  # 标准
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
#也可以选择Adam优化方法
# optimizer = torch.optim.Adam(net.parameters(),lr=1e-2)

# ----------模型训练----------- #
train_accs = []
train_loss = []
test_accs = []
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = net.to(device)
for epoch in range(3):
    running_loss = 0.0
    for i, data in enumerate(train_loader, 0):
        # enumerate获得当前迭代的次数
        # 0是下标起始位置默认为0
        # data 的格式[[inputs, labels]]
        inputs, labels = data[0].to(device), data[1].to(device)
        # 初始为0,清除上个batch的梯度信息
        optimizer.zero_grad()
        # 前向+后向+优化
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # loss的输出, 每个一百个batch输出, 后面会求平均
        running_loss += loss.item()
        if i % 100 == 99:
            print('[%d, %5d] loss :%.3f' %
                  (epoch+1, i+1,  running_loss/100))
            running_loss = 0.0
        train_loss.append(loss.item())

        # 训练曲线的绘制 一个batch中的准确率
        correct = 0
        total = 0
        _, predicted = torch.max(outputs.data, 1)
        total = labels.size(0)  # labels 的长度
        correct = (predicted == labels).sum().item() # 预测正确的数目
        train_accs.append(100 * correct / total)

print('Finished Training')

# ----------模型保存----------- #
PATH = './mnist_net.pth'
torch.save(net.state_dict(), PATH)
# torch.nn.Module模块中的state_dict变量存放训练过程中需要学习的权重和偏执系数,state_dict作为python的字典对象将每一层的参数映射成tensor张量.

# ----------模型评估----------- #
def draw_train_process(title, iters, costs, accs, label_cost, label_acc):
    plt.title(title, fontsize=24)
    plt.xlabel("iter", fontsize=20)
    plt.ylabel("acc(\%)", fontsize=20)
    plt.plot(iters, costs, color='red', label=label_cost)
    plt.plot(iters, accs, color='green', label=label_acc)
    plt.legend()  # 增加图例
    plt.grid()
    plt.show()

train_iters = range(len(train_accs))
draw_train_process('training', train_iters, train_loss, train_accs, 'training loss', 'training acc')

# ----------检验一个batch的分类情况----------- #
dataiter = iter(test_loader)
images, labels = dataiter.next()

# print images
test_img = utils.make_grid(images)
test_img = test_img.numpy().transpose(1,2,0)
std = [0.5,0.5,0.5]
mean =  [0.5,0.5,0.5]
test_img = test_img*std+0.5
plt.imshow(test_img)
plt.show()
print('GroundTruth: ', ' '.join('%d' % labels[j] for j in range(64)))

test_net = CNN()
test_net.load_state_dict(torch.load(PATH))
test_out = test_net(images)

# 输出的是每一类的对应概率,所以需要选择max来确定最终输出的类别,dim=1 表示选择行的最大索引
_, predicted = torch.max(test_out, dim=1)

print('Predicted: ', ' '.join('%d' % predicted[j]
                              for j in range(64)))

# 测试集上面整体的准确率
correct = 0
total = 0
with torch.no_grad():# 进行评测的时候网络不更新梯度
    for data in test_loader:
        images, labels = data
        outputs = test_net(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)  # labels 的长度
        correct += (predicted == labels).sum().item()  # 预测正确的数目

print('Accuracy of the network on the  test images: %d %%' % (
    100 * correct / total))

# 测试集上面整体的准确率
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
    for data in test_loader:
        images, labels = data
        outputs = test_net(images)
        _, predicted = torch.max(outputs, 1)
        c = (predicted == labels)
#         print(predicted == labels)
        for i in range(10):
            label = labels[i]
            class_correct[label] += c[i].item()
            class_total[label] += 1


for i in range(10):
    print('Accuracy of %d : %2d %%' % (
        i, 100 * class_correct[i] / class_total[i]))
  • 8
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值