用Lenet 5处理CIFAR10数据集

1.Lenet

LeNet-5 这个网络虽然很小,但是它包含了深度学习的基本模块:卷积层,池化层,全连接层。是其他深度学习模型的基础, 这里我们对LeNet-5进行深入分析。同时,通过实例分析,加深对与卷积层和池化层的理解。

在这里插入图片描述
C1层:
类型:卷积层
输入图片:3232
卷积核大小:5
5
卷积核种类:6
输出featuremap大小:28*28

S2层:
类型:池化层
输入:2828
采样大小:2
2
输出featuremap大小:14*14

C3层
类型:卷积层
输入:1414
卷积核大小:5
5
卷积核种类:16
输出featuremap大小:10*10

S4层:
类型:池化层
输入:1010
采样大小:2
2
输出featuremap大小:5*5

C5,F6,OUTPUT层:
类型:全连接层
[b,400]–>[b,120]–>[b,84]–>[b,10]

2.用cifar10进行训练和测试

先写Lenet类,

import torch
from torch import nn
from torch.nn import functional as F

class Lenet5(nn.Module):
    def __init__(self):
        super(Lenet5, self).__init__()
        self.model = nn.Sequential(
            # x:[b,3,32,32]->[b,6,28,28]
            nn.Conv2d(3, 6, kernel_size=(5, 5), stride=1, padding=0),
            # x:[b,6,28,28]->[b,6,14,14]
            nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
            # x:[b,6,14,14]->[b,16,10,10]
            nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
            # x:[b,16,10,10]->[b,16,5,5]
            nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
            # x:[b,16,5,5]->[b,400]
            nn.Flatten(),
            nn.Linear(400, 120),
            nn.ReLU(),
            nn.Linear(120, 84),
            nn.ReLU(),
            nn.Linear(84, 10),
            nn.ReLU(),
        )
        

    def forward(self, x):
        x = self.model(x)
        return x

主函数,训练和测试

import torch
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
from lenet5 import Lenet5
from torch import nn, optim


def main():
    batch_size = 50

    cifar_train = datasets.CIFAR10('cifar', train=True, transform=transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor(),
    ]), download=True)
    cifar_train = DataLoader(cifar_train, batch_size=batch_size, shuffle=True)

    cifar_test = datasets.CIFAR10('cifar', train=False, transform=transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor(),
    ]), download=True)
    cifar_test = DataLoader(cifar_test, batch_size=batch_size, shuffle=True)
    x, label = iter(cifar_train).next()
    print(x.shape, label)


    device = torch.device('cuda')
    model = Lenet5().to(device)
    criteon = nn.CrossEntropyLoss().to(device)  # 交叉熵
    optimizer = optim.Adam(model.parameters(), lr=1e-3)  # 优化器步长设置为0.001

    for epoch in range(1000):

        model.train()  # 转换为train
        for i, (x, label) in enumerate(cifar_train):
            x, label = x.to(device), label.to(device)
            logits = model(x)
            loss = criteon(logits, label)  
            optimizer.zero_grad()  # 梯度置为零
            loss.backward()  # 计算梯度
            optimizer.step()  # 更新参数
        print(i)
        print(epoch, loss.item())

        model.eval()  # 转换为test
        with torch.no_grad():  # 不对参数作变化,即不让test的数据影响网络
            total_correct = 0
            total_number = 0
            for i,(x,label) in enumerate(cifar_test):
                x, label = x.to(device), label.to(device)
                logits = model(x)
                pred = logits.argmax(dim=1)
                total_correct += torch.eq(pred, label).float().sum().item()
                total_number += x.size(0)
            accuracy = total_correct / total_number  # 计算准确率
            print(i)
            print(epoch, accuracy)

if __name__ == '__main__':
    main()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值