NIN,Googlenet,ResNet具体实现

NIN

在这里插入图片描述

●无全连接层
●交替使用NiN块和步幅为2的最大池化层逐步减小高宽和增大通道数
●最后使用全局平均池化层得到输出其输入通道数是类别数

三卷121最池,n块,全平均

from torch import nn


def NIN_block(in_channel, out_channel, kernel_size, stride, padding):
    return nn.Sequential(
        nn.Conv2d(in_channel, out_channel, kernel_size, stride, padding), nn.ReLU(),
        nn.Conv2d(out_channel, out_channel, kernel_size=1), nn.ReLU(),
        nn.Conv2d(out_channel, out_channel, kernel_size=1), nn.ReLU()
    )


def Nin():
    return nn.Sequential(
        NIN_block(1, 96, 11, 4, 0),
        nn.MaxPool2d(3, 2),
        NIN_block(96, 256, 5, 1, 2),
        nn.MaxPool2d(3, 2),
        NIN_block(256, 384, 3, 1, 1),
        nn.MaxPool2d(3, 2),
        nn.Dropout(0.5),
        NIN_block(384, 10, 3, 1, 1),
        nn.AdaptiveAvgPool2d((1, 1)),
        nn.Flatten()
    )

GoogLeNet

在这里插入图片描述

Inception块用4条有不同超参数的卷积层和池化层的路来抽取不同的信息,,然后在输出通道维合并

它的一个主要优点是模型参数小,计算复杂度低

import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l

class Inception(nn.Module):
    # `c1`--`c4` 是每条路径的输出通道数
    def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):
        super(Inception, self).__init__(**kwargs)
        # 线路1,单1 x 1卷积层
        self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1)
        # 线路2,1 x 1卷积层后接3 x 3卷积层
        self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1)
        self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1)
        # 线路3,1 x 1卷积层后接5 x 5卷积层
        self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1)
        self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2)
        # 线路4,3 x 3最大池化层后接1 x 1卷积层
        self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
        self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1)

    def forward(self, x):
        p1 = F.relu(self.p1_1(x))
        p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))
        p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))
        p4 = F.relu(self.p4_2(self.p4_1(x)))
        # 在通道维度上连结输出
        return torch.cat((p1, p2, p3, p4), dim=1)

b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
                   nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2,
                                           padding=1))
b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1), nn.ReLU(),
                   nn.Conv2d(64, 192, kernel_size=3, padding=1),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),
                   Inception(256, 128, (128, 192), (32, 96), 64),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),
                   Inception(512, 160, (112, 224), (24, 64), 64),
                   Inception(512, 128, (128, 256), (24, 64), 64),
                   Inception(512, 112, (144, 288), (32, 64), 64),
                   Inception(528, 256, (160, 320), (32, 128), 128),
                   nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),
                   Inception(832, 384, (192, 384), (48, 128), 128),
                   nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten())

net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))

ResNet

问题一:累加Conv会导致在反向传播过程中 x > 1 梯度爆炸,x < 1 梯度消失

解决方案

1.权重初始化

2.数据标准化

3.batch norm

残差块

在这里插入图片描述

左图虚线框中的部分需要直接拟合出该映射 f(x) ,而右图虚线框中的部分则需要拟合出残差映射 f(x)−x 。 残差映射在现实中往往更容易优化。

这样的设计要求 2 个卷积层的输出与输入形状一样,从而可以相加。 如果想改变通道数,就需要引入一个额外的 1×1 卷积层来将输入变换成需要的形状后再做相加运算
在这里插入图片描述

import torch
from torch import nn


# 残差
class Residual(nn.Module):
    def __init__(self, in_channel, out_channel, use_1x1=False, stride=1):
        super().__init__()
        # kernel size = 3 padding = 1 高宽不变
        self.conv1 = nn.Conv2d(in_channel, out_channel, kernel_size=3, padding=1, stride=stride)
        self.bn1 = nn.BatchNorm2d(out_channel)
        self.ReLu = nn.ReLU(inplace=True)

        self.conv2 = nn.Conv2d(out_channel, out_channel, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channel)

        # 降低通道 kernel size 1x1
        if use_1x1:
            self.conv3 = nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride)
        else:
            self.conv3 = None

    def forward(self, x):
        out = self.ReLu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))

        if self.conv3 is not None:
            x = self.conv3(x)

        return self.ReLu(x + out)


# stage
def resnetBlock(in_channel, out_channel, size, is_First=False):
    blk = []
    for i in range(size):
        # not (is_First = True) ==> False
        if i == 0 and not is_First:
            # 其他通道 高宽减半
            blk.append(Residual(in_channel, out_channel, use_1x1=True, stride=2))
        else:
            # 第一个通道时 输入输出大小一致
            blk.append(Residual(out_channel, out_channel))
    return blk


def restNet():
    b1 = nn.Sequential(
        nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
        nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
    )
    # 四个残差块
    # 第一个残差块
    b2 = nn.Sequential(*resnetBlock(64, 64, 2, is_First=True))
    # 其他通道
    b3 = nn.Sequential(*resnetBlock(64, 128, 2))
    b4 = nn.Sequential(*resnetBlock(128, 256, 2))
    b5 = nn.Sequential(*resnetBlock(256, 512, 2))
    # 全局平均池化
    avg = nn.AdaptiveAvgPool2d((1, 1))
    # 全连接
    liner = nn.Linear(512, 10)

    return nn.Sequential(b1, b2, b3, b4, b5, avg, nn.Flatten(), liner)


X = torch.rand(size=(1, 1, 224, 224))
for layer in restNet():
    X = layer(X)
    print(layer.__class__.__name__, 'output shape:\t', X.shape)

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值