Pytorch卷积神经网络Mnist手写数字识别-GPU训练

导入工具包 

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision import datasets,transforms 
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline

 定义超参数

# 定义超参数 
input_size = 28  #图像的总尺寸28*28
classes = 10  #标签的种类数
epochs = 10  #训练的总循环周期
batch_size = 64  #一个撮(批次)的大小,64张图片
learning_rate=0.001

 通过torchvision的dataset导入Mnist数据集

# 训练集
train_dataset = datasets.MNIST(root='./data',  
                            train=True,   
                            transform=transforms.ToTensor(),  
                            download=True) 

# 测试集
test_dataset = datasets.MNIST(root='./data', 
                           train=False, 
                           transform=transforms.ToTensor())

 通过DataLoader实现构建batch数据,进一步简化了代码。(这样就不用写关于设置batch的循环了)

# 构建batch数据
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, 
                                           batch_size=batch_size, 
                                           shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, 
                                           batch_size=batch_size, 
                                           shuffle=True)

 构建CNN网络,卷积层-池化层-卷积层-池化层-全连接层

class CNN(nn.Module):
    def __init__(self):
        super(CNN,self).__init__()       #输入大小为(1,28,28)
        self.conv1=nn.Sequential(      
           nn.Conv2d(
               in_channels=1,            #灰度图,通道只有一个特征图
               out_channels=16,          #输出16个特征图
               kernel_size=5,            #卷积核大小为5*5
               stride=1,                 #步长为1
               padding=2,                #填充2圈变为32*32
           ) ,                           #输出为16*28*28
            nn.ReLU(),                   #ReLU层
            nn.MaxPool2d(kernel_size=2), #进行池化操作,2*2
        )                                #输出为16*14*14
        self.conv2=nn.Sequential(
           nn.Conv2d(
               in_channels=16,           #输入16*14*14
               out_channels=32,          #输出32*14*14
               kernel_size=5,
               stride=1,
               padding=2,
           ) ,
            nn.ReLU(),                   #ReLU层
            nn.MaxPool2d(kernel_size=2), #输出(32,7,7)
        )
        self.out=nn.Linear(32*7*7,10)    #全连接层输出结果
        
    def forward(self,x):
        x=self.conv1(x)
        x=self.conv2(x)
        x=x.view(x.size(0),-1)
        output=self.out(x)
        return output

 定义准确率计算函数

def accuracy(predictions,labels):
    pred=torch.max(predictions.data,1)[1]
    rights=pred.eq(labels.data.view_as(pred)).sum()
    return rights,len(labels)

 网络实例化,设置优化器,损失函数,设定gpu训练(将模型,数据导入gpu即可)

#实例化
net=CNN()
#损失函数
criterion=nn.CrossEntropyLoss()
#优化器
optimizer=optim.Adam(net.parameters(),lr=learning_rate)

device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)

 训练模型并且打印输出 

#开始训练循环
for epoch in range(epochs):
    #保存当前epoch的结果
    train_rights=[]
    
    for batch_idx,(data,target) in enumerate(train_loader):
        data=data.to(device)
        target=target.to(device)
        net.train()
        output=net(data)
        loss=criterion(output,target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        right=accuracy(output,target)
        train_rights.append(right)
        
        if batch_idx%100==0:
            net.eval()
            val_rights=[]
            
            for (data,target) in test_loader:
                data=data.to(device)
                target=target.to(device)
                output=net(data)
                right=accuracy(output,target)
                val_rights.append(right)
                
            #准确率计算
            train_r = (sum([tup[0] for tup in train_rights]), sum([tup[1] for tup in train_rights]))
            val_r = (sum([tup[0] for tup in val_rights]), sum([tup[1] for tup in val_rights]))

            print('当前epoch: {} [{}/{} ({:.0f}%)]\t损失: {:.6f}\t训练集准确率: {:.2f}%\t测试集正确率: {:.2f}%'.format(
                epoch+1, batch_idx * batch_size, len(train_loader.dataset),
                100. * batch_idx / len(train_loader), 
                loss.data, 
                100. * train_r[0].numpy() / train_r[1], 
                100. * val_r[0].numpy() / val_r[1]))

 

 打印最后得到的训练模型总的准确率

train_rights=[]
val_rights=[]
for (data,target) in train_loader:
    data=data.to(device)
    target=target.to(device)
    train_output=net(data)
    right=accuracy(train_output,target)
    train_rights.append(right)
for (data,target) in test_loader:
    data=data.to(device)
    target=target.to(device)
    test_output=net(data)
    right=accuracy(test_output,target)
    val_rights.append(right)
#总准确率计算
train_r = (sum([tup[0] for tup in train_rights]), sum([tup[1] for tup in train_rights]))
val_r = (sum([tup[0] for tup in val_rights]), sum([tup[1] for tup in val_rights]))

print('训练集总准确率: {:.2f}%\t测试集总准确率: {:.2f}%'.format( 
    100. * train_r[0].numpy() / train_r[1], 
    100. * val_r[0].numpy() / val_r[1]))

 

  • 0
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
下面是一个简单的 PyTorch 实现 MNIST 手写数字识别的代码: ```python import torch import torch.nn as nn import torch.optim as optim import torchvision.datasets as datasets import torchvision.transforms as transforms # 定义超参数 batch_size = 64 learning_rate = 0.01 num_epochs = 10 # 下载和加载数据集 train_dataset = datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True) test_dataset = datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor(), download=True) # 创建数据加载器 train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) # 定义模型 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.pool = nn.MaxPool2d(2) self.fc = nn.Linear(320, 10) def forward(self, x): x = self.pool(nn.functional.relu(self.conv1(x))) x = self.pool(nn.functional.relu(self.conv2(x))) x = x.view(-1, 320) x = self.fc(x) return x # 实例化模型和损失函数 model = Net() criterion = nn.CrossEntropyLoss() # 使用随机梯度下降优化器 optimizer = optim.SGD(model.parameters(), lr=learning_rate) # 训练模型 for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader): # 将数据加载到 GPU 中 images = images.cuda() labels = labels.cuda() # 前向传播和计算损失 outputs = model(images) loss = criterion(outputs, labels) # 反向传播和优化 optimizer.zero_grad() loss.backward() optimizer.step() # 每 100 个批次输出一次日志信息 if (i + 1) % 100 == 0: print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' .format(epoch + 1, num_epochs, i + 1, len(train_loader), loss.item())) # 测试模型 model.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in test_loader: # 将数据加载到 GPU 中 images = images.cuda() labels = labels.cuda() # 计算输出并获取预测结果 outputs = model(images) _, predicted = torch.max(outputs.data, 1) # 统计正确预测的数量 total += labels.size(0) correct += (predicted == labels).sum().item() print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total)) ``` 这个代码使用了一个简单的卷积神经网络,包括两个卷积层、两个最大池化层和一个全连接层。训练过程中使用随机梯度下降优化器,测试过程中计算模型的准确率。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

阿巴乾

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值