PyTorch深度学习实践概论笔记10练习-Pytorch实现手写数字(MNIST)识别

28 篇文章 17 订阅

简单回顾一下PyTorch深度学习实践概论笔记10-卷积神经网络基础篇的练习题。如下图所示:

• Try a more complex CNN:(尝试更复杂的CNN)

        • Conv2d Layer *3

        • ReLU Layer * 3

        • MaxPooling Layer * 3

        • Linear Layer * 3

• Try different configuration of this CNN:(尝试不同的CNN配置)

        • Compare their performance.

老师给的课后练习中建议的CNN包含3个卷积层、3个ReLU激活层、3个池化层和3个线性层,我自己尝试去构造了一下,没有成功。然后我就去掉了一个池化层,构造了一个包含3个卷积层(3*3卷积层)、3个ReLU激活层、2个池化层(最大池化层)和3个线性层的CNN

输入维度输出维度计算过程
conv1(64,1,28,28)(64,16,26,26)28-3+1=26
relu(64,16,26,26)(64,16,26,26)不变
conv2(64,16,26,26)(64,32,24,24)26-3+1=24
relu(64,32,24,24)(64,32,24,24)不变
pooling(64,32,24,24)(64,32,12,12)24/2=12
conv3(64,32,12,12)(64,64,10,10)12-3+1=10
relu(64,64,10,10)(64,64,10,10)不变
pooling(64,64,10,10)(64,64,5,5)10/2=5
fc1(64,1600)(64,512)64*5*5=1600
fc2(64,512)(64,100)
fc2(64,100)(64,10)

【小建议】在自己搭建模型时,最好在forward函数中打印x.shape,有助于之后解决bug! 

具体设计模型的代码如下:

#2.设计模型
class ExNet(torch.nn.Module):

    def __init__(self):
        super(ExNet, self).__init__()
        self.conv1 = torch.nn.Conv2d(1, 16, kernel_size=3)
        self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=3)
        self.conv3 = torch.nn.Conv2d(32, 64, kernel_size=3)
        self.pooling = torch.nn.MaxPool2d(2)
        self.fc1 = torch.nn.Linear(1600, 512)  
        self.fc2 = torch.nn.Linear(512, 64)  
        self.fc3 = torch.nn.Linear(64, 10)

    def forward(self, x):
        batch_size = x.size(0)
#         x = x.view(-1,1*28*28)
        x = F.relu(self.conv1(x)) 
        print(x.shape)
        x = self.pooling(F.relu(self.conv2(x)))
        print(x.shape)
        x = self.pooling(F.relu(self.conv3(x)))
        print(x.shape)
        #方法一:
#         x = x.view(-1,64*4*4) # flatten
        #方法二:
        x = x.view(batch_size,-1)
        print(x.shape)
        x = F.relu(self.fc1(x))
        print(x.shape)
        x = F.relu(self.fc2(x))
        print(x.shape)
        x = self.fc3(x)
        print(x.shape)
        return x
     
modele = ExNet()
print(modele)

输出模型结果如下:

ExNet(
  (conv1): Conv2d(1, 16, kernel_size=(3, 3), stride=(1, 1))
  (conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1))
  (conv3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))
  (pooling): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  (fc1): Linear(in_features=1600, out_features=512, bias=True)
  (fc2): Linear(in_features=512, out_features=64, bias=True)
  (fc3): Linear(in_features=64, out_features=10, bias=True)
)

其他部分的代码和本节内容类似,最后测试集的准确率为99%,和之前9%相比,准确率大大提高。

完整的代码如下(可跑通):

#0.导库
import torch
#构建DataLoader的库
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
#使用函数relu()的库
import torch.nn.functional as F
#构建优化器的库
import torch.optim as optim#1.准备数据集
# batch_size = 64
transform = transforms.Compose([
    transforms.ToTensor(), #将PIL图像转化成Tensor
    transforms.Normalize((0.1307, ), (0.3081, ))   #正则化,归一化,0.1307是均值,0.3081是标准差,这两个值是根据所有数据集算出来的

])

train_dataset = datasets.MNIST(root='./data/',
    		train=True,
    		download=False,
    		transform=transform)

train_loader = DataLoader(train_dataset,
    		shuffle=True,
    		batch_size=64)
      

test_dataset = datasets.MNIST(root='./data/',
    		train=False,
    		download=False,
    		transform=transform)

test_loader = DataLoader(test_dataset,
    		shuffle=False,
    		batch_size=64)

#2.设计模型
class ExNet(torch.nn.Module):

    def __init__(self):
        super(ExNet, self).__init__()
        self.conv1 = torch.nn.Conv2d(1, 16, kernel_size=3)
        self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=3)
        self.conv3 = torch.nn.Conv2d(32, 64, kernel_size=3)
        self.pooling = torch.nn.MaxPool2d(2)
        self.fc1 = torch.nn.Linear(1600, 512)  
        self.fc2 = torch.nn.Linear(512, 64)  
        self.fc3 = torch.nn.Linear(64, 10)

    def forward(self, x):
        batch_size = x.size(0)
        x = F.relu(self.conv1(x)) 
        x = self.pooling(F.relu(self.conv2(x)))
        x = self.pooling(F.relu(self.conv3(x)))
        x = x.view(batch_size,-1)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x
     
modele = ExNet()
# print(modele)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#把整个模型的参数,缓存,所有的模块都放到cuda里面,转成cuda tensor
modele.to(device)

#3.构造损失和优化器
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(modele.parameters(), lr=0.01, momentum=0.5)

#4.训练代码与测试代码
def train(epoch):
    running_loss = 0.0
    for batch_idx, data in enumerate(train_loader, 0):
        inputs, target = data
        #print("inputs.shape",inputs.shape)#torch.Size([64])
        #print("target.shape",target.shape)#torch.Size([64, 1, 28, 28])
        #加入下面这行,把每一步的inputs和targets迁移到GPU
        inputs, target = inputs.to(device), target.to(device)
        optimizer.zero_grad()

        # forward + backward + update
        outputs = modele(inputs)
        #print("inputs.shape",inputs.shape)#torch.Size([64, 1, 28, 28])
        #print("outputs.shape",outputs.shape)#torch.Size([100, 10])
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if batch_idx % 300 == 299:
            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 2000))
            running_loss = 0.0

def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            inputs, target = data
            #加入下面这行,把每一步的inputs和targets迁移到GPU
            inputs, target = inputs.to(device), target.to(device)
            outputs = modele(inputs)
            _, predicted = torch.max(outputs.data, dim=1)
            total += target.size(0)
            correct += (predicted == target).sum().item()
        print('Accuracy on test set: %d %% [%d/%d]' % (100 * correct / total, correct, total))
    
if __name__ == '__main__':
    for epoch in range(10):
    	train(epoch)
    	test()

运行结果如下:

[1,   300] loss: 0.207
[1,   600] loss: 0.044
[1,   900] loss: 0.026
Accuracy on test set: 96 % [9655/10000]
[2,   300] loss: 0.017
[2,   600] loss: 0.015
[2,   900] loss: 0.013
Accuracy on test set: 97 % [9786/10000]
[3,   300] loss: 0.010
[3,   600] loss: 0.011
[3,   900] loss: 0.009
Accuracy on test set: 98 % [9850/10000]
[4,   300] loss: 0.008
[4,   600] loss: 0.008
[4,   900] loss: 0.008
Accuracy on test set: 98 % [9840/10000]
[5,   300] loss: 0.006
[5,   600] loss: 0.006
[5,   900] loss: 0.006
Accuracy on test set: 98 % [9866/10000]
[6,   300] loss: 0.005
[6,   600] loss: 0.005
[6,   900] loss: 0.005
Accuracy on test set: 98 % [9883/10000]
[7,   300] loss: 0.005
[7,   600] loss: 0.004
[7,   900] loss: 0.004
Accuracy on test set: 98 % [9870/10000]
[8,   300] loss: 0.003
[8,   600] loss: 0.004
[8,   900] loss: 0.004
Accuracy on test set: 99 % [9905/10000]
[9,   300] loss: 0.003
[9,   600] loss: 0.003
[9,   900] loss: 0.003
Accuracy on test set: 98 % [9894/10000]
[10,   300] loss: 0.003
[10,   600] loss: 0.002
[10,   900] loss: 0.002
Accuracy on test set: 98 % [9874/10000]

 可以看到输出结果最后的一次test准确率有99%,效果不错。

说明:记录学习笔记,如果错误欢迎指正!写文章不易,转载请联系我。

  • 3
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
### 回答1: PyTorch是一种深度学习框架,可以用来实现MNIST手写数字识别MNIST是一个常用的数据集,包含了大量手写数字的图像和对应的标签。我们可以使用PyTorch来构建一个卷积神经网络模型,对这些图像进行分类,从而实现手写数字识别的功能。具体实现过程可以参考PyTorch官方文档或相关教程。 ### 回答2: MNIST是一个经典的手写数字识别问题,其数据集包括60,000个训练样本和10,000个测试样本。PyTorch作为深度学习领域的热门工具,也可以用来实现MNIST手写数字识别。 第一步是加载MNIST数据集,可以使用PyTorch的torchvision.datasets模块实现。需要注意的是,MNIST数据集是灰度图像,需要将其转换为标准的三通道RGB图像。 ```python import torch import torchvision import torchvision.transforms as transforms # 加载数据集 train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]), download=True) test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]), download=True) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=64, shuffle=False) ``` 第二步是构建模型。在MNIST手写数字识别问题中,可以选择使用卷积神经网络(CNN),其可以捕获图像中的局部特征,这对于手写数字识别非常有用。 ```python import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, kernel_size=3) self.conv2 = nn.Conv2d(32, 64, kernel_size=3) self.dropout1 = nn.Dropout2d(0.25) self.dropout2 = nn.Dropout2d(0.5) self.fc1 = nn.Linear(64*12*12, 128) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.relu(x) x = F.max_pool2d(x, kernel_size=2) x = self.dropout1(x) x = torch.flatten(x, 1) x = self.fc1(x) x = F.relu(x) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return output model = Net() ``` 第三步是定义优化器和损失函数,并进行训练和测试。在PyTorch中,可以选择使用交叉熵损失函数和随机梯度下降(SGD)优化器进行训练。 ```python import torch.optim as optim # 定义优化器和损失函数 criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) # 训练模型 for epoch in range(10): running_loss = 0.0 for i, data in enumerate(train_loader, 0): inputs, labels = data optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if i % 100 == 99: print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 100)) running_loss = 0.0 # 测试模型 correct = 0 total = 0 with torch.no_grad(): for data in test_loader: images, labels = data outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total)) ``` 最后,可以输出测试集上的准确率。对于这个模型,可以得到大约98%的准确率,具有很好的性能。 ### 回答3: PyTorch是一个常用的深度学习框架,通过PyTorch可以方便地实现mnist手写数字识别mnist手写数字数据集是机器学习领域的一个经典数据集,用于训练和测试数字识别算法模型。以下是PyTorch实现mnist手写数字识别的步骤: 1. 获取mnist数据集:可以通过PyTorch提供的工具包torchvision来获取mnist数据集。 2. 数据预处理:将数据集中的手写数字图片转换为张量,然后进行标准化处理,使得每个像素值都在0到1之间。 3. 构建模型:可以使用PyTorch提供的nn模块构建模型,常用的模型包括卷积神经网络(CNN)和全连接神经网络(FNN)。例如,可以使用nn.Sequential()函数将多个层逐一堆叠起来,形成一个模型。 4. 训练模型:通过定义损失函数和优化器,使用训练数据集对模型进行训练。常用的损失函数包括交叉熵损失函数和均方误差损失函数,常用的优化器包括随机梯度下降(SGD)和Adam。 5. 测试模型:通过测试数据集对模型进行测试,可以用测试准确率来评估模型的性能。 以下是一个简单的PyTorch实现mnist手写数字识别的代码: ``` python import torch import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms # 获取数据集 train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True) test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor()) # 数据加载器 train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=100, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=100, shuffle=False) # 构建模型 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, kernel_size=5) self.conv2 = nn.Conv2d(32, 64, kernel_size=5) self.fc1 = nn.Linear(1024, 256) self.fc2 = nn.Linear(256, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = x.view(-1, 1024) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1) model = Net() # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.001) # 训练模型 num_epochs = 10 for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader): # 转换为模型所需格式 images = images.float() labels = labels.long() # 前向传播和计算损失 outputs = model(images) loss = criterion(outputs, labels) # 反向传播和更新参数 optimizer.zero_grad() loss.backward() optimizer.step() # 每100个批次输出一次日志 if (i+1) % 100 == 0: print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, i+1, len(train_dataset)//100, loss.item())) # 测试模型 correct = 0 total = 0 with torch.no_grad(): # 不需要计算梯度 for images, labels in test_loader: # 转换为模型所需格式 images = images.float() labels = labels.long() # 前向传播 outputs = model(images) _, predicted = torch.max(outputs.data, 1) # 统计预测正确数和总数 total += labels.size(0) correct += (predicted == labels).sum().item() print('Test Accuracy: {:.2f}%'.format(100 * correct / total)) ``` 以上就是一个基于PyTorchmnist手写数字识别的简单实现方法。需要注意的是,模型的设计和训练过程可能会受到多种因素的影响,例如网络结构、参数初始化、优化器等,需要根据实际情况进行调整和优化,才能达到更好的性能。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值