010_wz_ledr_pytorch深度学习实战_第十一讲——卷积神经网络(CNN)高级篇

一、目的

本次目的是实现GoogNet的部分模块,以及如何去解决梯度消失问题(ResNet)

二、编程

GoogleNet中Inception模块其一:
在这里插入图片描述
在这里插入图片描述

关于Inception模块的好处,看这篇文章:Inception 模块作用
我们将搭建这样的网络

输入层->
卷积层->池化层->rule->
inception->
卷积层->池化层->relu->
inception->
全连接层

数据集仍然使用Mnist

import torch.nn.functional as F
import torch
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader

# 准备数据集
batch_size = 64
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])

train_data = datasets.MNIST(root='./dataset/mnist', train=True, transform=transform, download=True)
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True, num_workers=0)
test_data = datasets.MNIST(root='./dataset/mnist', train=False, transform=transform, download=True)
test_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=True, num_workers=0)


class InceptionA(torch.nn.Module):
    def __init__(self, in_channels):
        super(InceptionA, self).__init__()
        self.branch_pool = torch.nn.Conv2d(in_channels=in_channels, out_channels=24, kernel_size=1)

        self.branch_conv1x1 = torch.nn.Conv2d(in_channels=in_channels, out_channels=16, kernel_size=1)

        self.branch_conv5x5_1 = torch.nn.Conv2d(in_channels=in_channels, out_channels=16, kernel_size=1)
        self.branch_conv5x5_2 = torch.nn.Conv2d(in_channels=16, out_channels=24, kernel_size=5, padding=2)

        self.branch_conv3x3_1 = torch.nn.Conv2d(in_channels=in_channels, out_channels=16, kernel_size=3, padding=1)
        self.branch_conv3x3_2 = torch.nn.Conv2d(in_channels=16, out_channels=24, kernel_size=3, padding=1)
        self.branch_conv3x3_3 = torch.nn.Conv2d(in_channels=24, out_channels=24, kernel_size=3, padding=1)

    def forward(self, x):
        branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
        branch_pool = self.branch_pool(branch_pool)

        branch_conv1x1 = self.branch_conv1x1(x)

        branch_conv5x5 = self.branch_conv5x5_1(x)
        branch_conv5x5 = self.branch_conv5x5_2(branch_conv5x5)

        branch_conv3x3 = self.branch_conv3x3_1(x)
        branch_conv3x3 = self.branch_conv3x3_2(branch_conv3x3)
        branch_conv3x3 = self.branch_conv3x3_3(branch_conv3x3)

        outputs = [branch_pool, branch_conv1x1, branch_conv3x3, branch_conv5x5]
        outputs = torch.cat(outputs, dim=1)
        return outputs


class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5)
        self.conv2 = torch.nn.Conv2d(in_channels=88, out_channels=20, kernel_size=5)

        self.incep1 = InceptionA(in_channels=10)
        self.incep2 = InceptionA(in_channels=20)

        self.mp = torch.nn.MaxPool2d(kernel_size=2)
        self.fc = torch.nn.Linear(1408, 10)

    def forward(self, x):
        x_size = x.size(0)
        x = F.relu(self.mp(self.conv1(x)))
        x = self.incep1(x)
        x = F.relu(self.mp(self.conv2(x)))
        x = self.incep2(x)
        x = x.view(x_size, -1)
        x = self.fc(x)
        return x


model = Net()

# 构建损失函数和优化器
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), momentum=0.5, lr=0.01)


# 训练
def train(epoch):
    running_loss = 0.
    for batch_idx, data in enumerate(train_loader, 0):
        inputs, labels = data
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
        if batch_idx % 300 == 299:
            print("[%d, %d], loss=%.3f" % (epoch+1, batch_idx+1, running_loss/300))
            running_loss = 0.


def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for batch_idx, data in enumerate(test_loader, 0):
            inputs, labels = data
            total += inputs.size(0)
            predicated = model(inputs)
            _, predicated = torch.max(predicated, dim=1)
            correct += (predicated == labels).sum().item()
        print('accuracy on test set: %d %% ' % (100*correct/total))


if __name__ == "__main__":
    for epoch in range(10):
        train(epoch)
        test()

当神经网络深度过高,就容易发生梯度消失的问题,我们接下来看一种能够解决这种问题的网络模块——ResNet中的ResidualBlock模块(残差结构):
在这里插入图片描述
对于残差网络有兴趣的小伙伴可以看这篇文章:resnet50网络结构图_ResNet——CNN经典网络模型详解(pytorch实现)

我们将要搭建这样的网络:
在这里插入图片描述

import torch
import torch.nn.functional as F
from torchvision import datasets
from torch.utils.data import DataLoader
from torchvision import transforms

# 导入数据集
batch_size = 64
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])

train_data = datasets.MNIST(root='./dataset/mnist', transform=transform, train=True, download=True)
train_loader = DataLoader(dataset=train_data, shuffle=True, batch_size=batch_size, num_workers=0)
test_data = datasets.MNIST(root='./dataset/mnist', transform=transform, train=False, download=True)
test_loader = DataLoader(dataset=test_data, shuffle=True, batch_size=batch_size, num_workers=0)


# 构建神经网络
class ResidualBlock(torch.nn.Module):
    def __init__(self, in_channels):
        super(ResidualBlock, self).__init__()
        self.conv1 = torch.nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, padding=1)
        self.conv2 = torch.nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, padding=1)

    def forward(self, x):
        y = F.relu(self.conv1(x))
        y = self.conv2(y)
        return F.relu(x+y)


class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5)
        self.conv2 = torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5)
        self.mp = torch.nn.MaxPool2d(kernel_size=2)
        self.rblock1 = ResidualBlock(16)
        self.rblock2 = ResidualBlock(32)
        self.fc = torch.nn.Linear(512, 10)

    def forward(self, x):
        x_size = x.size(0)
        x = self.mp(F.relu(self.conv1(x)))
        x = self.rblock1(x)
        x = self.mp(F.relu(self.conv2(x)))
        x = self.rblock2(x)
        x = x.view(x_size, -1)
        x = self.fc(x)
        return x


model = Net()

# 构架损失函数和优化器
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), momentum=0.5, lr=0.1)


# 训练
def train(epoch):
    running_loss = 0.
    for batch_idx, data in enumerate(train_loader, 0):
        inputs, labels = data
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
        if batch_idx % 300 == 299:
            print("[%d %d],loss=%.3f" % (epoch+1, batch_idx+1, running_loss/300))
            running_loss = 0.


def test():
    total = 0
    correct = 0
    with torch.no_grad():
        for batcg_idx, data in enumerate(test_loader, 0):
            inputs, labels = data
            total += inputs.size(0)
            x_size = inputs.size(0)
            predicted = model(inputs)
            _, predicted = torch.max(predicted, dim=1)
            correct += (predicted == labels).sum().item()
        print('accuracy on test set: %d %% ' % (100*correct/total))


if __name__ == "__main__":
    for epoch in range(10):
        train(epoch)
        test()

三、参考

pytorch深度学习实践
PyTorch 深度学习实践 第11讲

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值