第九章 CIFAR10与ResNet实战

一、CIFAR-10 数据集介绍

  • 一种包含10种分类的数据集
  • 总共 60000 张 32*32 图片
  • 其中 50000张做为train,10000作为test
    在这里插入图片描述

二、LeNet5实现

# -*- coding: UTF-8 -*-
'''
@version: 1.0
@PackageName: pytorch_learning - Lenet5.py
@author: yonghao
@Description: Lenet-5网络
@since 2021/02/23 17:09
'''
from collections import OrderedDict
import torch
from torch import nn


class Flatten(nn.Module):
def __init__(self):
'''
用于将2D转换为1D数据的铺平模块
'''
super(Flatten, self).__init__()

def forward(self, x):
return x.view(x.shape[0], -1)


class LeNet5(nn.Module):

def __init__(self):
'''
for cifar10 dataset
'''
super(LeNet5, self).__init__()
self.model = nn.Sequential(OrderedDict([
# x:[b 3 32 32] => [b 6 28 28]
("layer1", nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=0)),
# x:[b 6 28 28] => [b 6 14 14]
("layer2", nn.MaxPool2d(kernel_size=2, stride=2)),
# x:[b 6 14 14] => [b 16 10 10]
("layer3", nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0)),
# x:[b 16 10 10] => [b 16 5 5]
("layer4", nn.MaxPool2d(kernel_size=2, stride=2)),
# x:[b 16 5 5] => [b 16*5*5]
("layer4-5-mid", Flatten()),
# [b 16*5*5] => [b 120]
("layer5-1", nn.Linear(400, 120)),
("layer5-2", nn.ReLU(inplace=True)),
# x:[b 120] => [b 84]
("layer6-1", nn.Linear(120, 84)),
("layer6-2", nn.ReLU(inplace=True)),
# x:[b 84] => [b 10]
("layer7(output)", nn.Linear(84, 10))
]))

def forward(self, x):
return self.model(x)
# loss
# self.criteon(x, y)


if __name__ == "__main__":
net = LeNet5()
tmp = torch.rand(2, 3, 32, 32)
out = net(tmp)
print("conv out:", out.shape)

三、ResNet18实现

# -*- coding: UTF-8 -*-
'''
@version: 1.0
@PackageName: pytorch_learning - ResNet.py
@author: yonghao
@Description: 残差网络
@since 2021/02/23 18:46
'''
import torch
from torch import nn
from torch.nn import functional as F


class ResBlk(nn.Module):
'''
残差块
'''

def __init__(self, channel_in, channel_out, stride=1):
'''

:paramchannel_in: 输入通道层数
:paramchannel_out: 输出通道层数
'''
super(ResBlk, self).__init__()
# 均执行same卷积
self.conv1 = nn.Conv2d(channel_in, channel_out, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(channel_out)
self.conv2 = nn.Conv2d(channel_out, channel_out, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(channel_out)
self.extra = nn.Sequential()
if channel_in != channel_out:
self.extra = nn.Sequential(
nn.Conv2d(channel_in, channel_out, kernel_size=1, stride=stride),
nn.BatchNorm2d(channel_out)
)

def forward(self, x):
'''

:paramx:[b 3 32 32]
:return:
'''
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# short cut
# x[b channel_in h w] out[b channel_out h w]
out = self.extra(x) + out
 return F.relu(out)


class ResNet18(nn.Module):
def __init__(self):
super(ResNet18, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=3, padding=0),
nn.BatchNorm2d(64)
)
# follow 4 block
# x:[b 64 10 10] => [b 128 5 5]
self.blk1 = ResBlk(64, 128, stride=2)
# x:[b 128 5 5] => [b 256 3 3]
self.blk2 = ResBlk(128, 256, stride=2)
# x:[b 256 3 3] => [b 512 2 2]
self.blk3 = ResBlk(256, 512, stride=2)
# x:[b 512 2 2] => [b 512 2 2]
self.blk4 = ResBlk(512, 512, stride=2)
self.outlayer = nn.Linear(512 * 1 * 1, 10)

def forward(self, x):
x = F.relu(self.conv1(x))
x = self.blk1(x)
x = self.blk2(x)
x = self.blk3(x)
x = self.blk4(x)
# adaptive_avg_pool2d()可以对输入值的w,h进行全局的平均池化
# x:[b 512 2 2] => [b 512 1 1]
x = F.adaptive_avg_pool2d(x, [1, 1])

x = self.outlayer(x.view(x.shape[0], -1))
return x


if __name__ == "__main__":
tmp = torch.rand(2, 64, 32, 32)
blk = ResBlk(64, 128, stride=2)
out = blk(tmp)
# print(out.shape)

x = torch.rand(2, 3, 32, 32)
net = ResNet18()
logits = net(x)
print(logits.shape)

四、通用代码,使用模型进行训练与测试

# -*- coding: UTF-8 -*-
'''
@version: 1.0
@PackageName: pytorch_learning - CIFAR10_cnn_demo.py
@author: yonghao
@Description: 对于CIFAR 10数据集的分类任务,使用CNN网络实现
@since 2021/02/23 16:21
'''
import torch
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
from realwork.work4_CNN_net_demo.Lenet5 import LeNet5
from realwork.work4_CNN_net_demo.ResNet import ResNet18

# 设置GPU信息
device = torch.device('cuda')

# 交叉熵loss函数
criteon = torch.nn.CrossEntropyLoss()


def main():
# 读入训练数据集
cifar_train = datasets.CIFAR10('cifar', train=True, transform=transforms.Compose([
transforms.Resize([32, 32]),
transforms.ToTensor()
]), download=True)

# 设置批量数值
batch_size = 32

# 创建训练数据的加载器
'''
dataset (Dataset) – 加载数据的数据集。
batch_size (int, optional) – 每个batch加载多少个样本(默认: 1)。
shuffle (bool, optional) – 设置为True时会在每个epoch重新打乱数据(默认: False).
sampler (Sampler, optional) – 定义从数据集中提取样本的策略。如果指定,则忽略shuffle参数。
num_workers (int, optional) – 用多少个子进程加载数据。0表示数据将在主进程中加载(默认: 0)
collate_fn (callable, optional) –
pin_memory (bool, optional) –
drop_last (bool, optional) – 如果数据集大小不能被batch size整除,则设置为True后可删除最后一个不完整的batch。如果设为False并且数据集的大小不能被batch size整除,则最后一个batch将更小。(默认: False)
'''
cifar_train = DataLoader(cifar_train, batch_size=batch_size, shuffle=True)

# 读入测试数据集
cifar_test = datasets.CIFAR10('cifar', train=False, transform=transforms.Compose([
transforms.Resize([32, 32]),
transforms.ToTensor()
]), download=True)

# 设置批量数值
batch_size = 32

# 创建测试数据的加载器
cifar_test = DataLoader(cifar_test, batch_size=batch_size, shuffle=True)

# x, label = iter(cifar_train).next()
# print(len(cifar_train.dataset), len(cifar_test.dataset))
# net = LeNet5().to(device)
net = ResNet18().to(device)
train(net, dataset=cifar_train, test_dataset=cifar_test)


def train(net, dataset, test_dataset, lr=1e-3, epochs=10):
'''
训练过程
:paramnet:网络结构
:paramdataset:训练集
:paramlr: 学习率
:paramepochs: 迭代次数
:return:
'''
# 设置为train模式
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
for epoch in range(epochs):
net.train()
total_loss = 0
for batch_idx, (data, label) in enumerate(dataset):
# data[b 3 32 32],label[b]
data, label = data.to(device), label.to(device)
# logits[b 10]
logits = net(data)
loss = criteon(logits, label)
total_loss += loss.item()
# 清除上一次的累加值
optimizer.zero_grad()
loss.backward()
optimizer.step()

print('Train Iter_{},loss={}'.format(epoch, total_loss / len(dataset.dataset)))
with torch.no_grad():
test(net, dataset=test_dataset)


def test(net, dataset):
'''
测试过程
:paramnet:网络结构
:paramdataset:测试数据集
:return:
'''
# 设置为test模式
net.eval()
correct = 0
for data, label in dataset:
# data[b 3 32 32] label[b]
data, label = data.to(device), label.to(device)
# logits[b,10]
logits = net(data)
pred = logits.argmax(dim=1)
correct += pred.eq(label).sum()
print(
'Test accuracy:{} / {} = {:.4f} %'.format(correct, len(dataset.dataset), 100. * correct / len(dataset.dataset)))


if __name__ == "__main__":
main()
'''
LeNet5
...
9 0.7916133403778076
Test accuracy:6325 / 10000 = 63.2500
'''
  • 10
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

ModelBulider

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值