第一周 PyTorch实现手写数字识别

一、本周学习内容:

初尝PyTorch
下周 讲解各函数功能

二、前言

按照Tensorflow系列第一周一样,使用mnist手写数据集

三、电脑环境

电脑系统:Windows 10
语言环境:Python 3.8.8
编译器:Pycharm 2021.1.3
深度学习环境:torch 1.8.2+cu111 torchvision 0.9.2+cu111
显卡及显存:RTX 3070 8G

四、前期准备

1、导入相关依赖项

import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import torchvision

2、检测gpu是否能用

device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
print(device)

3、加载数据集和展示

(1)、数据预处理

# 数据集加载
train_ds  = torchvision.datasets.MNIST('data',
                                       train=True,
                                       transform=torchvision.transforms.ToTensor(),
                                       download=True)

test_ds  = torchvision.datasets.MNIST('data',
                                      train=False,
                                       transform=torchvision.transforms.ToTensor(),
                                       download=True)


batch_size=32
train_dl = torch.utils.data.DataLoader(train_ds,
                                       batch_size=batch_size,
                                       shuffle=True)

test_dl = torch.utils.data.DataLoader(test_ds,
                                       batch_size=batch_size)

imgs,labels = iter(train_dl).__next__()
print(imgs.shape)

(2)、数据展示

图片展示


# 数据可视化
plt.figure(figsize=(10,8))
plt.suptitle("数据展示")
for images,labels in train_ds.take(1):
    for i in range(15):
        plt.subplot(4,5,i+1)
        plt.xticks([])
        plt.yticks([])
        plt.grid(False)
        # 显示图片
        plt.imshow(images[i])
        # 显示标签
        plt.xlabel(class_num[np.argmax(labels[i])])
plt.show()

四、搭建CNN网络

# 搭建网络层
import torch.nn.functional as F
num_class=10 # 类别数
class Model(nn.Module):
    def __init__(self):
        super().__init__()
        # 特征提取网络
        self.conv1 = nn.Conv2d(1,32,kernel_size=(3,3))
        self.pool1 = nn.MaxPool2d(2)
        self.conv2 = nn.Conv2d(32,64,kernel_size=(3,3))
        self.pool2 = nn.MaxPool2d(2)
        # 分类网络
        self.fc1 = nn.Linear(1600,64)
        self.fc2 = nn.Linear(64,num_class)
    # 定义前向传播
    def forward(self,x):
        # print(self.conv1(x))
        x = self.pool1(F.relu(self.conv1(x)))
        x = self.pool2(F.relu(self.conv2(x)))
        x = torch.flatten(x,start_dim=1)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x

from torchinfo import summary

# 将模型转移到GPU中
model = Model().to(device)
print(summary(model))

五、搭建训练和测试

# 开始训练

def train(dataloader,model,loss_fn,optimizer):
    size = len(dataloader.dataset) # 训练集的大小 一共60000张图片
    num_batches = len(dataloader)  # 批次数目,1875(60000/32)
    train_loss,train_acc=0,0 # 设置初始的损失和准确率
    for x,y in dataloader:  # x循环拿出图片
        x,y = x.to(device),y.to(device) # 将数据加载入GPU中
        # 计算预测损失误差
        pred = model(x)  # 网络输出
        loss = loss_fn(pred,y)   # 计算输出与真实值直接的差距
        # 反向传播
        optimizer.zero_grad()  # grad属性归0
        loss.backward()  # 反向传播
        optimizer.step()  # 更新

        # 记录acc 与 loss
        train_acc +=(pred.argmax(1)==y).type(torch.float).sum().item()
        train_loss+=loss.item()
    train_acc/=size
    train_loss/=num_batches
    return train_acc,train_loss

# 验证
def test(dataloader,model,loss_fn):
    size = len(dataloader.dataset)  # 测试集一共10000张图片
    num_batches = len(dataloader)
    test_loss,test_acc = 0,0
    # 当不进行训练时,停止梯度更新,计算内存消耗
    with torch.no_grad():
        for imgs,target in dataloader:

            imgs,target = imgs.to(device),target.to(device)
            # 计算loss
            # imgs = imgs.reshape(1, 32, 28, 28)
            target_pre = model(imgs)
            # target_pre = target_pre.to(device)
            loss = loss_fn(target_pre,target)
            test_loss +=loss.item()
            test_acc +=(target_pre.argmax(1)==target).type(torch.float).sum().item()

    test_acc /=size
    test_loss /=num_batches
    return test_acc,test_loss

# 开始训练
epoch = 5
train_loss = []
train_acc = []
test_loss = []
test_acc = []
for epoch in range(epoch):
    model.train()
    epoch_train_acc,epoch_train_loss = train(train_dl,model,loss_fn,opt)
    model.eval()
    epoch_test_acc,epoch_test_loss = test(test_dl,model,loss_fn)
    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)
    template = ('Epoch:{:2d},Train_acc:{:.1f},Train_loss:{:.3f},Test_acc:{:.1f},Test_loss:{:.3f}')
    print(template.format(epoch+1,epoch_train_acc,epoch_train_loss,epoch_test_acc,epoch_test_loss))
print('Done')
=================================================================
Epoch: 1,Train_acc:0.7,Train_loss:0.856,Test_acc:0.9,Test_loss:0.240
Epoch: 2,Train_acc:0.9,Train_loss:0.191,Test_acc:1.0,Test_loss:0.138
Epoch: 3,Train_acc:1.0,Train_loss:0.123,Test_acc:1.0,Test_loss:0.094
Epoch: 4,Train_acc:1.0,Train_loss:0.095,Test_acc:1.0,Test_loss:0.078
Epoch: 5,Train_acc:1.0,Train_loss:0.078,Test_acc:1.0,Test_loss:0.078```

# 六、绘制准确度和损失函数曲线

```python

# 结果可视化
epochs_range = range(epoch)
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['figure.dpi'] = 100
plt.figure(figsize=(16,4))
plt.subplot(1,2,1)
print(epochs_range)
print(train_acc)
plt.plot(epochs_range,train_acc,label='Training Accuracy')
plt.plot(epochs_range,test_acc,label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1,2,2)
plt.plot(epochs_range,train_loss,label='Training Loss')
plt.plot(epochs_range,test_loss,label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
# 设置刻度间隔,x轴每1一个刻度
plt.show()

在这里插入图片描述

以上就是我本周的学习内容
在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

降花绘

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值