XSepConv_MobileNetV3_small | 自己实现代码

疫情期间在家用vscode写的,自己没调试,可能会有错误

1.MobileNetV3_small 结构与代码:

在这里插入图片描述

class MobileNetV3_Small(nn.Module):
    def __init__(self, num_classes=1000):
        super(MobileNetV3_Small, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(16)
        self.hs1 = hswish()

        self.bneck = nn.Sequential(
            Block(3, 16, 16, 16, nn.ReLU(inplace=True), SeModule(16), 2),
            Block(3, 16, 72, 24, nn.ReLU(inplace=True), None, 2),
            Block(3, 24, 88, 24, nn.ReLU(inplace=True), None, 1),
            Block(5, 24, 96, 40, hswish(), SeModule(40), 2),
            Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
            Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
            Block(5, 40, 120, 48, hswish(), SeModule(48), 1),
            Block(5, 48, 144, 48, hswish(), SeModule(48), 1),
            Block(5, 48, 288, 96, hswish(), SeModule(96), 2),
            Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
            Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
        )
        self.conv2 = nn.Conv2d(96, 576, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn2 = nn.BatchNorm2d(576)
        self.hs2 = hswish()
        self.linear3 = nn.Linear(576, 1280)
        self.bn3 = nn.BatchNorm1d(1280)
        self.hs3 = hswish()
        self.linear4 = nn.Linear(1280, num_classes)
        self.init_params()

    def init_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant_(m.weight, 1)
                init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal_(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant_(m.bias, 0)

    def forward(self, x):
        out = self.hs1(self.bn1(self.conv1(x)))
        out = self.bneck(out)
        out = self.hs2(self.bn2(self.conv2(out)))
        out = F.avg_pool2d(out, 7)
        out = out.view(out.size(0), -1)
        out = self.hs3(self.bn3(self.linear3(out)))
        out = self.linear4(out)
        return out

2.XsepConv结构与代码:

在这里插入图片描述在这里插入图片描述

class XSepBlock(nn.Module):
    ''' 1*1 expand + Improved Symmetric Padding + 2*2 DW + 1*K DW + k*1 DW + 1*1 out'''
    def __init__(self, kernel_size, in_size, expand_size, out_size, nolinear, semodule, stride, ith):
        super(Block, self).__init__()
        self.stride = stridE
        self.ith = ith
        # 1*1 expand
        self.conv1 = nn.Conv2d(in_size, expand_size, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn1 = nn.BatchNorm2d(expand_size)
        self.nolinear1 = nolinear
        # 2*2 DW
        self.conv2 = nn.Conv2d(expand_size, expand_size, kernel_size=2, stride=1, padding=0, groups=expand_size, bias=False)
        self.bn2 = nn.BatchNorm2d(expand_size)
        self.nolinear2 = nolinear
        # 1*k DW 
        self.conv3 = nn.Conv2d(expand_size, expand_size, kernel_size=(1, kernel_size), stride=stride, 
                    padding=(0, kernel_size//2), groups=expand_size, bias=False)
        self.bn3 = nn.BatchNorm2d(expand_size)
        self.nolinear3 = nolinear
        # k*1 DW
        self.conv4 = nn.Conv2d(expand_size, expand_size, kernel_size=(kernel_size, 1), stride=stride, 
                    padding=(kernel_size//2, 0),, groups=expand_size, bias=False)
        self.bn4 = nn.BatchNorm2d(expand_size)
        # SE 
        self.se = semodule
        self.nolinear_se = nolinear
        # 1*1 out
        self.conv5 = nn.Conv2d(expand_size, out_size, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn5 = nn.BatchNorm2d(out_size)

        self.shortcut = nn.Sequential()
        if stride == 1 and in_size != out_size:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_size, out_size, kernel_size=1, stride=1, padding=0, bias=False),
                nn.BatchNorm2d(out_size),
            )

    def forward(self, x):
        x1 = x
        # Improved Symmetric Padding
        if self.ith % 4 == 0:
            x1 = nn.functional.pad(x1,(0,1,1,0),mode = "constant",value = 0) # right top
        elif self.ith % 4 == 1:
            x1 = nn.functional.pad(x1,(0,1,0,1),mode = "constant",value = 0) # right bottom
        elif self.ith % 4 == 2:
            x1 = nn.functional.pad(x1,(1,0,1,0),mode = "constant",value = 0) # left top
        elif self.ith % 4 == 3:
            x1 = nn.functional.pad(x1,(1,0,0,1),mode = "constant",value = 0) # left bottom
        else:
            raise NotImplementedError('ith layer is not right')
          
        # 1*1 expand
        out = self.nolinear1(self.bn1(self.conv1(x1)))
        # 2*2 DW
        out = self.nolinear2(self.bn2(self.conv2(out)))
        # 1*k DW
        out = self.nolinear3(self.bn3(self.conv3(out)))
        # k*1 DW
        out = self.bn4(self.conv4(out))
        # SE
        if self.se != None:
            out = self.nolinear_se(self.se(out))
        # 1*1 out
        out = self.bn5(self.conv5(out))
        out = out + self.shortcut(x) if self.stride==1 else out
        return out

3.XSepMobileNetV3_small代码:

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init

class hswish(nn.Module):
    def forward(self, x):
        out = x * F.relu6(x + 3, inplace=True) / 6
        return out

class hsigmoid(nn.Module):
    def forward(self, x):
        out = F.relu6(x + 3, inplace=True) / 6
        return out

class SeModule(nn.Module):
    def __init__(self, in_size, reduction=4):
        super(SeModule, self).__init__()
        self.se = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_size, in_size // reduction, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(in_size // reduction),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_size // reduction, in_size, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(in_size),
            hsigmoid()
        )

    def forward(self, x):
        return x * self.se(x)
# DWConv
class Block(nn.Module):
    '''expand + depthwise + pointwise'''
    def __init__(self, kernel_size, in_size, expand_size, out_size, nolinear, semodule, stride):
        super(Block, self).__init__()
        self.stride = stride
        self.se = semodule

        self.conv1 = nn.Conv2d(in_size, expand_size, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn1 = nn.BatchNorm2d(expand_size)
        self.nolinear1 = nolinear
        self.conv2 = nn.Conv2d(expand_size, expand_size, kernel_size=kernel_size, stride=stride, padding=kernel_size//2, groups=expand_size, bias=False)
        self.bn2 = nn.BatchNorm2d(expand_size)
        self.nolinear2 = nolinear
        self.conv3 = nn.Conv2d(expand_size, out_size, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn3 = nn.BatchNorm2d(out_size)

        self.shortcut = nn.Sequential()
        if stride == 1 and in_size != out_size:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_size, out_size, kernel_size=1, stride=1, padding=0, bias=False),
                nn.BatchNorm2d(out_size),
            )

    def forward(self, x):
        out = self.nolinear1(self.bn1(self.conv1(x)))
        out = self.nolinear2(self.bn2(self.conv2(out)))
        out = self.bn3(self.conv3(out))
        if self.se != None:
            out = self.se(out)
        out = out + self.shortcut(x) if self.stride==1 else out
        return out
# XSepConv
class XSepBlock(nn.Module):
    ''' 1*1 expand + Improved Symmetric Padding + 2*2 DW + 1*K DW + k*1 DW + 1*1 out'''
    def __init__(self, kernel_size, in_size, expand_size, out_size, nolinear, semodule, stride, ith):
        super(Block, self).__init__()
        self.stride = stridE
        self.ith = ith

        # 1*1 expand
        self.conv1 = nn.Conv2d(in_size, expand_size, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn1 = nn.BatchNorm2d(expand_size)
        self.nolinear1 = nolinear
        # 2*2 DW
        self.conv2 = nn.Conv2d(expand_size, expand_size, kernel_size=2, stride=1, padding=0, groups=expand_size, bias=False)
        self.bn2 = nn.BatchNorm2d(expand_size)
        self.nolinear2 = nolinear
        # 1*k DW 
        self.conv3 = nn.Conv2d(expand_size, expand_size, kernel_size=(1, kernel_size), stride=stride, 
                    padding=(0, kernel_size//2), groups=expand_size, bias=False)
        self.bn3 = nn.BatchNorm2d(expand_size)
        self.nolinear3 = nolinear
        # k*1 DW
        self.conv4 = nn.Conv2d(expand_size, expand_size, kernel_size=(kernel_size, 1), stride=stride, 
                    padding=(kernel_size//2, 0),, groups=expand_size, bias=False)
        self.bn4 = nn.BatchNorm2d(expand_size)
        # SE 
        self.se = semodule
        self.nolinear_se = nolinear
        # 1*1 out
        self.conv5 = nn.Conv2d(expand_size, out_size, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn5 = nn.BatchNorm2d(out_size)

        self.shortcut = nn.Sequential()
        if stride == 1 and in_size != out_size:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_size, out_size, kernel_size=1, stride=1, padding=0, bias=False),
                nn.BatchNorm2d(out_size),
            )

    def forward(self, x):
        x1 = x
        if self.ith % 4 == 0:
            x1 = nn.functional.pad(x1,(0,1,1,0),mode = "constant",value = 0) # right top
        elif self.ith % 4 == 1:
            x1 = nn.functional.pad(x1,(0,1,0,1),mode = "constant",value = 0) # right bottom
        elif self.ith % 4 == 2:
            x1 = nn.functional.pad(x1,(1,0,1,0),mode = "constant",value = 0) # left top
        elif self.ith % 4 == 3:
            x1 = nn.functional.pad(x1,(1,0,0,1),mode = "constant",value = 0) # left bottom
        else:
            raise NotImplementedError('ith layer is not right')
          
        # 1*1 expand
        out = self.nolinear1(self.bn1(self.conv1(x1)))
        # 2*2 DW
        out = self.nolinear2(self.bn2(self.conv2(out)))
        # 1*k DW
        out = self.nolinear3(self.bn3(self.conv3(out)))
        # k*1 DW
        out = self.bn4(self.conv4(out))
        # SE
        if self.se != None:
            out = self.nolinear_se(self.se(out))
        # 1*1 out
        out = self.bn5(self.conv5(out))
        out = out + self.shortcut(x) if self.stride==1 else out
        return out
#按照论文设置改的,可能会有出入
class XSepMobileNetV3_Small(nn.Module):
    def __init__(self, num_classes=1000):
        super(MobileNetV3_Small, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(16)
        self.hs1 = hswish()

        self.bneck = nn.Sequential(
            Block(3, 16, 16, 16, nn.ReLU(inplace=True), SeModule(16), 2),
            Block(3, 16, 72, 24, nn.ReLU(inplace=True), None, 2),
            Block(3, 24, 88, 24, nn.ReLU(inplace=True), None, 1),
            Block(5, 24, 96, 40, hswish(), SeModule(40), 2), # 下采样层使用DW,因为kernel_size < 7
            XSepBlock(5, 40, 240, 40, hswish(), SeModule(40), 1, 1),
            XSepBlock(5, 40, 240, 40, hswish(), SeModule(40), 1, 2),
            XSepBlock(5, 40, 120, 48, hswish(), SeModule(48), 1, 3),
            XSepBlock(5, 48, 144, 48, hswish(), SeModule(48), 1, 4),
            Block(5, 48, 288, 96, hswish(), SeModule(96), 2), # 下采样层使用DW,因为kernel_size < 7
            XSepBlock(3, 96, 576, 96, hswish(), SeModule(96), 1, 1), 
            XSepBlock(3, 96, 576, 96, hswish(), SeModule(96), 1, 2),
        )


        self.conv2 = nn.Conv2d(96, 576, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn2 = nn.BatchNorm2d(576)
        self.hs2 = hswish()
        self.linear3 = nn.Linear(576, 1280)
        self.bn3 = nn.BatchNorm1d(1280)
        self.hs3 = hswish()
        self.linear4 = nn.Linear(1280, num_classes)
        self.init_params()

    def init_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant_(m.weight, 1)
                init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal_(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant_(m.bias, 0)

    def forward(self, x):
        out = self.hs1(self.bn1(self.conv1(x)))
        out = self.bneck(out)
        out = self.hs2(self.bn2(self.conv2(out)))
        out = F.avg_pool2d(out, 7)
        out = out.view(out.size(0), -1)
        out = self.hs3(self.bn3(self.linear3(out)))
        out = self.linear4(out)
        return out

def test():
    net = XSepMobileNetV3_Small()
    x = torch.randn(2,3,224,224)
    y = net(x)
    print(y.size())
  • 2
    点赞
  • 19
    收藏
    觉得还不错? 一键收藏
  • 6
    评论
好的,这里是 MobileNetV3_Small代码实现: ``` import torch import torch.nn as nn class ConvBNReLU(nn.Sequential): """Convolutional layer with batch normalization and ReLU activation.""" def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): padding = (kernel_size - 1) // 2 super(ConvBNReLU, self).__init__( nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), nn.BatchNorm2d(out_planes), nn.ReLU6(inplace=True) ) class InvertedResidual(nn.Module): """Inverted residual block.""" def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual, self).__init__() hidden_dim = round(inp * expand_ratio) self.use_res_connect = stride == 1 and inp == oup layers = [] if expand_ratio != 1: # pointwise layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) layers.extend([ # depthwise ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), # pointwise-linear nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), ]) self.conv = nn.Sequential(*layers) def forward(self, x): if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) class MobileNetv3_small(nn.Module): """MobileNetV3_Small model.""" def __init__(self, num_classes=1000, width_mult=1.0): super(MobileNetv3_small, self).__init__() # setting of inverted residual blocks self.cfgs = [ # k, t, c, SE, s [3, 16, 16, 0, 1], [3, 72, 24, 0, 2], [3, 88, 24, 0, 1], [5, 96, 40, 1, 2], [5, 240, 40, 1, 1], [5, 240, 40, 1, 1], [5, 120, 48, 1, 1], [5, 144, 48, 1, 1], [5, 288, 96, 1, 2], [5, 576, 96, 1, 1], [5, 576, 96, 1, 1], ] # building first layer input_channel = int(16 * width_mult) last_channel = int(576 * width_mult) if width_mult > 1.0 else 576 self.features = [ConvBNReLU(3, input_channel, stride=2)] # building inverted residual blocks for k, exp_size, c, se, s in self.cfgs: output_channel = int(c * width_mult) self.features.append(InvertedResidual(input_channel, output_channel, stride=s, expand_ratio=exp_size/ input_channel)) input_channel = output_channel # building last several layers self.features.append(ConvBNReLU(input_channel, last_channel, kernel_size=1)) self.features.append(nn.AdaptiveAvgPool2d((1, 1))) self.features.append(nn.Conv2d(last_channel, 1024, kernel_size=1, stride=1, padding=0, bias=False)) self.features.append(nn.Hardswish()) self.features.append(nn.Conv2d(1024, num_classes, kernel_size=1, stride=1, padding=0, bias=True)) self.features = nn.Sequential(*self.features) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) return x ``` 这个实现中包含了 MobileNetV3_Small 模型中使用的 Inverted Residual Block 和 ConvBNReLU 等基本组件。同时,在实现模型的过程中,作者还考虑到了模型的可配置性,可以通过调整宽度因子 `width_mult` 来控制模型的宽度。
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值