Pytorch MobileNetV1 学习

"""mobilenet in pytorch



[1] Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam

    MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications
    https://arxiv.org/abs/1704.04861
"""


import torch.nn as nn
import torch
# 基本卷积类
class BasicConv2dBlock(nn.Module):
    # 构造方法
    def __init__(self,input_channels, output_channels, kernel_size,downsample = True, **kwargs):
        """基本卷积模块

        Args:
            input_channels        (int): 输入通道数
            output_channels       (int): 输出通道数
            kernel_size           (int): 卷积核大小
            downsample (bool, optional): 是否进行下采样(一些比较小的图片如果下采样后,到后面结构太小了). Defaults to True.
        """
        super(BasicConv2dBlock,self).__init__()
        # 判断是否进行下采样
        stride = 2 if downsample else 1
        # 卷积
        self.conv   = nn.Conv2d(input_channels,output_channels,kernel_size,stride=stride,**kwargs)
        # 批量归一化
        self.bn     = nn.BatchNorm2d(output_channels)
        # 激活函数
        self.relu   = nn.ReLU(inplace=True)
    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.relu(x)
        return x
# 深度可分离卷积
class DepthSeperabelConv2dBlock(nn.Module):
    def __init__(self,input_channels, output_channels, kernel_size, **kwargs):
        super(DepthSeperabelConv2dBlock,self).__init__()
        # 深度卷积
        self.depth_wise = nn.Sequential(
            nn.Conv2d(input_channels,input_channels,kernel_size,groups=input_channels,**kwargs),
            nn.BatchNorm2d(input_channels),
            nn.ReLU(inplace=True)
        )
        # 逐点卷积
        self.point_wise = nn.Sequential(
            nn.Conv2d(input_channels,output_channels,1),
            nn.BatchNorm2d(output_channels),
            nn.ReLU(inplace=True)
        )
    def forward(self, x):
        x = self.depth_wise(x)
        x = self.point_wise(x)

        return x
class MobileNet(nn.Module):
    """
    Args:
        width multipler: The role of the width multiplier α is to thin
                        a network uniformly at each layer. For a given
                        layer and width multiplier α, the number of
                        input channels M becomes αM and the number of
                        output channels N becomes αN.
    """

    def __init__(self, width_multiplier=1, class_num=100):
        super(MobileNet,self).__init__()

        alpha = width_multiplier

        self.stem = nn.Sequential(
            BasicConv2dBlock(3,int(32 * alpha),kernel_size = 3,padding = 1,bias = False),
            DepthSeperabelConv2dBlock(int(32 * alpha),int(64 * alpha),kernel_size = 3,padding = 1,bias = False)
        )

        self.conv1 = nn.Sequential(
            DepthSeperabelConv2dBlock(int(64 * alpha),int(128 * alpha),kernel_size = 3,stride = 2,padding = 1,bias = False),
            DepthSeperabelConv2dBlock(int(128 * alpha),int(128 * alpha),kernel_size = 3,padding = 1,bias = False)
        )

        self.conv2 = nn.Sequential(
            DepthSeperabelConv2dBlock(int(128 * alpha),int(256 * alpha),kernel_size = 3,stride = 2,padding = 1,bias = False),
            DepthSeperabelConv2dBlock(int(256 * alpha),int(256 * alpha),kernel_size = 3,padding = 1,bias = False)
        )

        self.conv3 = nn.Sequential(
            DepthSeperabelConv2dBlock(int(256 * alpha),int(512 * alpha),kernel_size = 3,stride = 2,padding = 1,bias = False),
            DepthSeperabelConv2dBlock(int(512 * alpha),int(512 * alpha),kernel_size = 3,padding = 1,bias = False),
            DepthSeperabelConv2dBlock(int(512 * alpha),int(512 * alpha),kernel_size = 3,padding = 1,bias = False),
            DepthSeperabelConv2dBlock(int(512 * alpha),int(512 * alpha),kernel_size = 3,padding = 1,bias = False),
            DepthSeperabelConv2dBlock(int(512 * alpha),int(512 * alpha),kernel_size = 3,padding = 1,bias = False),
            DepthSeperabelConv2dBlock(int(512 * alpha),int(512 * alpha),kernel_size = 3,padding = 1,bias = False)
        )
        self.conv4 = nn.Sequential(
            DepthSeperabelConv2dBlock(int(512 * alpha),int(1024 * alpha),kernel_size = 3,stride = 2,padding = 1,bias = False),
            DepthSeperabelConv2dBlock(int(1024 * alpha),int(1024 * alpha),kernel_size = 3,padding = 1,bias = False)
        )
        self.fc = nn.Linear(int(1024 * alpha), class_num)
        self.avg = nn.AdaptiveAvgPool2d(1)
    def forward(self, x):
        x = self.stem(x)

        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)

        x = self.avg(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x

def mobilenet(alpha=1, class_num=100):
    return MobileNet(alpha, class_num)

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

熊猫Devin

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值