MNIST数字识别pytorch

该代码示例展示了如何用PyTorch实现一个简单的卷积神经网络(CNN)模型来处理MNIST数据集。模型包括两个卷积层,最大池化层,以及全连接层。在训练过程中,使用Adam优化器和交叉熵损失函数,监控并记录了训练过程中的损失和准确性。
摘要由CSDN通过智能技术生成

 利用CNN

import torch
from torchvision.datasets import MNIST
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
import numpy as np

BATCH_SIZE = 150
LEARNING_RATE = 0.005
class DR(nn.Module):
    def __init__(self):
        super(DR, self).__init__()
        self.forwardSeq = nn.Sequential(
            nn.Conv2d(
                in_channels=1,#输入通道数
                out_channels=16,#输出通道数
                kernel_size=3,#卷积核大小
                stride=1,#滑动步长
                padding=2,#窗口扩充
                padding_mode="zeros"#对扩充的窗口以常量0进行填充
            ),#输入28*28,输出28*28
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2),##输入28*28,输出14*14
            nn.Conv2d(in_channels=16,out_channels=32,kernel_size=5,stride=1,padding=2),#输入14*14,输出14*14
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2),#输入14*14,输出7*7
        )
        self.linear = nn.Linear(32*7*7, 10)
    def forward(self, input):
        out = self.forwardSeq(input)
        out = out.view(input.size(0), -1)#全展开,但要注意保留batchsize的维度即input.size(0)
        return self.linear(out)
pass

def training(epoch, model, criterion, optimaizer, loader):
    rights = []
    losses = []
    maxAcc = {"idx":0, "num":0}
    minLoss = {"idx":0, "num":np.inf}
    for i in range(epoch):
        print("epoch:", i)
        for idx, (data, target) in enumerate(loader):
            out = model(data)
            loss = criterion(out, target)
            loss.backward()
            optimaizer.step()
            optimaizer.zero_grad()
            if idx % 25 == 0:
                pre = out.max(dim=-1)[1]
                acc = pre.eq(target).float().mean()
                rights.append(acc.item())
                losses.append(loss.item())
                maxAcc = {"idx":len(rights) * 25, "num":acc.item()} if maxAcc["num"] <= acc else maxAcc
                minLoss = {"idx":len(rights) * 25, "num":loss.item()} if minLoss["num"] >= loss else minLoss
                print("index({}) in epoch({}), loss is {}, acc is {}".format(idx, i, loss, acc))
    rights = np.array(rights)
    losses = np.array(losses)
    length = len(rights) * 25
    return rights,losses,maxAcc,minLoss,length
pass
sampleTR = MNIST(download=True, root="./data", transform=transforms.ToTensor(), train=True)#训练
sampleTE = MNIST(download=True, root="./data", transform=transforms.ToTensor(), train=False)#测试
loader = DataLoader(dataset=sampleTR, batch_size=BATCH_SIZE, shuffle=True)
model = DR()
criterion = nn.CrossEntropyLoss()
optimaizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
args = training(3, model, criterion, optimaizer, loader)
fig,ax = plt.subplots()
ax.plot(np.array([0, length]), np.array([1, 1]), c="red", label="one")
ax.plot(np.linspace(0, length, rights.size), rights, label="trainAcc", c="blue")
ax.plot(np.linspace(0, length, losses.size), losses, label="trainLoss", c="green")
ax.scatter(np.array(maxAcc["idx"]), np.array(maxAcc["num"]), c="blue", s=30, label="maxRight[index:{} number:{}]".format(maxAcc["idx"], maxAcc["num"]))
ax.scatter(np.array(minLoss["idx"]), np.array(minLoss["num"]), c="green", s=30, label="minLoss[index:{} number:{}]".format(minLoss["idx"], minLoss["num"]))
ax.legend()
plt.yticks(np.linspace(0, losses.max(), 10))
plt.xticks(np.linspace(0, length, 20), rotation=50)
plt.xlabel("Unit:BATCH, BATCH_SIZE:{} Samples".format(BATCH_SIZE))
plt.show()

利用普通神经网络:

import numpy as np
import torch
from torchvision.datasets import MNIST
import matplotlib.pyplot as plt
from torchvision import transforms
from torch import nn
from torch.utils.data import DataLoader
'''获取数据'''
mnist_tr = MNIST(root='./data', train=True, download=True, transform=transforms.ToTensor())#单通道28*28,共60000个样本
mnist_te = MNIST(root='./data', train=False, download=True, transform=transforms.ToTensor())#共10000个样本
BatchSize = 256
LearningRate = 0.005
class DR(nn.Module):
    def __init__(self):
        super(DR, self).__init__()
        self.linear_1 = nn.Linear(28*28, 28)#设置全连接层1
        self.linear_2 = nn.Linear(28, 10)#设置全连接层2
    def forward(self, input):
        out = self.linear_1(input)
        nn.ReLU(inplace=True)(out)
        return self.linear_2(out)
pass
dr = DR()
criterion = nn.CrossEntropyLoss()#定义损失函数
optimal = torch.optim.Adam(dr.parameters(), lr=LearningRate)#定义优化算法
loader = DataLoader(dataset=mnist_tr, shuffle=True, batch_size=BatchSize, drop_last=True)
def train(epoch):
    accs = []
    losses = []
    for i in range(epoch):
        print("epoch:{}-----------------------------------------\n".format(i))
        for index, (data, target) in enumerate(loader):
            out = dr(data.view(BatchSize, 28*28))
            loss = criterion(out, target)
            loss.backward()
            optimal.step()
            optimal.zero_grad()
            if index % 5 == 0:
                pre = out.max(dim=-1)[1]
                acc = pre.eq(target).float().mean()
                accs.append(acc.item())
                losses.append(loss.item())
                print("epoch: {}, index: {}, loss: {}, accuracy: {}".format(i, index, loss, acc))
    print("accs长度:", len(accs))
    plt.plot(np.linspace(0, len(losses)*25, len(losses)), np.array(losses), label="loss curve")
    plt.plot(np.linspace(0, len(accs)*25, len(accs)), np.array(accs), label="acc curve")
    plt.legend()
    plt.show()
pass

train(3)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,下面是一个简单的示例代码,使用PyTorch实现MNIST手写数字识别: ```python import torch import torch.nn as nn import torch.optim as optim import torchvision from torchvision import datasets, transforms # 加载数据集 transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) trainset = datasets.MNIST(root='./data', train=True, download=True, transform=transform) testset = datasets.MNIST(root='./data', train=False, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False) # 定义模型 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, kernel_size=3) self.conv2 = nn.Conv2d(32, 64, kernel_size=3) self.fc1 = nn.Linear(64*5*5, 128) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = self.conv1(x) x = nn.functional.relu(x) x = nn.functional.max_pool2d(x, 2) x = self.conv2(x) x = nn.functional.relu(x) x = nn.functional.max_pool2d(x, 2) x = x.view(-1, 64*5*5) x = self.fc1(x) x = nn.functional.relu(x) x = self.fc2(x) return nn.functional.log_softmax(x, dim=1) net = Net() # 定义优化器和损失函数 optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.5) criterion = nn.NLLLoss() # 训练模型 for epoch in range(10): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if i % 100 == 99: print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 100)) running_loss = 0.0 # 测试模型 correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total)) ``` 在这个示例中,我们首先使用`torchvision`加载MNIST数据集,并对数据进行标准化处理。然后我们定义了一个包含两个卷积层和两个全连接层的神经网络,使用ReLU作为激活函数,并使用负对数似然损失作为损失函数,优化器使用SGD。接着我们训练模型并测试模型的准确率。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值