MobileNet V3代码

在这里插入图片描述

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

代码:

import torch
import torch.nn as nn
import torch.nn.functional as F

class h_swish(nn.Module):
    def forward(self,x):
        return x*F.relu6(x+3)/6

class swish(nn.Module):
    def forward(self,x):
        return x*F.sigmoid(x)

class h_sigmoid(nn.Module):
    def forward(self,x):
        return F.relu6(x+3)/6

def _make_divisor(ch, divisor, min_ch = None):
    if not min_ch:
        min_ch = divisor
    new_ch = max(min_ch,int(ch+divisor/2)//divisor*divisor)
    if new_ch < 0.9*ch:
        new_ch += divisor
    return new_ch

class SE_module(nn.Module):
    def __init__(self,inchannel):
        super(SE_module, self).__init__()
        self.se = nn.Sequential(
            nn.AdaptiveAvgPool2d((1,1)),
            nn.Conv2d(inchannel,inchannel//4,1,1),
            nn.ReLU(inplace=True),
            nn.Conv2d(inchannel//4,inchannel,1),
            h_sigmoid()
        )
    def forward(self,x):
        mul = self.se(x)
        return x * mul

class bneck(nn.Module):
    def __init__(self,inchannel,outchannel,hidden_channel,nonlinear,stride,SE=False):
        super(bneck, self).__init__()
        self.shortcut = True if stride == 1 and inchannel == outchannel else False

        layers = []
        if inchannel != hidden_channel:
            layers.extend([
                nn.Conv2d(inchannel,hidden_channel,1),
                nn.BatchNorm2d(hidden_channel),
                nonlinear()
            ])
        layers.extend([
            nn.Conv2d(hidden_channel,hidden_channel,3,1,1,groups = hidden_channel),
            nn.BatchNorm2d(hidden_channel),
            nonlinear()
        ])
        self.conv1 = nn.Sequential(*layers)
        self.se = SE_module(hidden_channel)
        self.conv2 = nn.Sequential(
            nn.Conv2d(hidden_channel,outchannel,1,1),
            nn.BatchNorm2d(outchannel)
        )
    def forward(self,x):
        x = self.conv1(x)
        x = self.conv2(self.se(x))
        return x


class MobileNet_V3(nn.Module):
    def __init__(self, setting, inchannel, classes, alpha = 0.2, round_nearest = 8):
        super(MobileNet_V3, self).__init__()
        input_channel = _make_divisor(16*alpha,round_nearest)
        last_channel = _make_divisor(setting[-1][3]*alpha,round_nearest)

        self.HS = h_swish
        self.RE = nn.ReLU

        self.conv1 = nn.Sequential(nn.Conv2d(inchannel,input_channel,3,2,1),
                                   nn.BatchNorm2d(input_channel),
                                   self.HS()
                                   )

        self.block = bneck
        self.blocks = nn.ModuleList([])
        self.nonlin = self.HS
        for _, kernel_size, hidden, out_channels, SE, nonlinear, stride in setting:
            self.nonlin = self.RE if nonlinear == 'RE' else self.HS
            self.hidden = _make_divisor(hidden*alpha,round_nearest)
            out_channels = _make_divisor(out_channels*alpha,round_nearest)
            self.blocks.append(self.block(input_channel, out_channels, self.hidden, self.nonlin, stride, SE))
            input_channel = out_channels

        self.conv2 = nn.Sequential(
            nn.Conv2d(input_channel, self.hidden, 1,1),
            nn.BatchNorm2d(self.hidden),
            SE_module(self.hidden),
            self.HS()
        )

        self.pool = nn.AdaptiveAvgPool2d((1))
        self.conv3 = nn.Sequential(
            nn.Conv2d(self.hidden, 1024, 1, 1),
            self.HS(),
            nn.Dropout(0.2),
            nn.Conv2d(1024,classes,1,1)
        )

        for m in self.modules():
            if isinstance(m,nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,mode = 'fan_out')
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m,nn.BatchNorm2d):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)

    def forward(self,x):
        x = self.conv1(x)
        for block in self.blocks:
            x = block(x)
        x = self.conv2(x)
        x = self.pool(x)
        x = self.conv3(x)
        return x



def MobileNet_V3_large(inchannel,classes):
    setting = [
        [16, 3, 16, 16, False, 'RE', 1],
        [16, 3, 64, 24, False, 'RE', 2],
        [24, 3, 72, 24, False, 'RE', 1],
        [24, 5, 72, 40, True, 'RE', 2],
        [40, 5, 120, 40, True, 'RE', 1],
        [40, 5, 120, 40, True, 'RE', 1],
        [40, 3, 240, 80, False, 'HS', 2],
        [80, 3, 200, 80, False, 'HS', 1],
        [80, 3, 184, 80, False, 'HS', 1],
        [80, 3, 184, 80, False, 'HS', 1],
        [80, 3, 480, 112, True, 'HS', 1],
        [112, 3, 672, 112, True, 'HS', 1],
        [112, 5, 672, 160, True, 'HS', 2],
        [160, 5, 960, 160, True, 'HS', 1],
        [160, 5, 960, 160, True, 'HS', 1]
    ]
    return MobileNet_V3(setting,inchannel,classes)

def MobileNet_V3_small(inchannel,classes):
    setting = [
        [16, 3, 16, 16, True, 'RE', 2],
        [16, 3, 72, 24, False, 'RE', 2],
        [24, 3, 88, 24, False, 'RE', 1],
        [24, 5, 96, 40, True, 'HS', 2],
        [40, 5, 240, 40, True, 'HS',1],
        [40, 5, 240, 40, True, 'HS', 1],
        [40, 5, 120, 48, True, 'HS',1],
        [48, 5, 144, 48, True, 'HS', 1],
        [48, 5, 288, 96, True, 'HS',2],
        [96, 5, 576, 96, True, 'HS',1],
        [96, 5, 576, 96, True, 'HS',1]

    ]
    return MobileNet_V3(setting,inchannel,classes)

if __name__ == '__main__':
    input = torch.empty(1,3,224,224)
    m = MobileNet_V3_small(3,10)
    out = m(input)
    print(out)





  • 0
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是使用PyTorch实现的MobileNet V3模型代码示例: ```python import torch import torch.nn as nn class Hswish(nn.Module): def __init__(self, inplace=True): super(Hswish, self).__init__() self.inplace = inplace def forward(self, x): if self.inplace: x *= torch.clamp(x + 3, 0, 6) / 6 else: x = x * (torch.nn.functional.relu6(x + 3, inplace=True) / 6) return x class Hsigmoid(nn.Module): def __init__(self, inplace=True): super(Hsigmoid, self).__init__() self.inplace = inplace def forward(self, x): if self.inplace: x = torch.clamp(x + 3, 0, 6) / 6 else: x = torch.nn.functional.relu6(x + 3, inplace=True) / 6 return x class SeModule(nn.Module): def __init__(self, in_size, reduction=4): super(SeModule, self).__init__() self.se = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_size, in_size // reduction, kernel_size=1, stride=1, padding=0, bias=True), nn.Hardswish(inplace=True), nn.Conv2d(in_size // reduction, in_size, kernel_size=1, stride=1, padding=0, bias=True), Hsigmoid(inplace=True) ) def forward(self, x): return x * self.se(x) class ConvBNActivation(nn.Sequential): def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, norm_layer=None, activation_layer=None): if norm_layer is None: norm_layer = nn.BatchNorm2d if activation_layer is None: activation_layer = nn.ReLU(inplace=True) padding = (kernel_size - 1) // 2 super(ConvBNActivation, self).__init__( nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), norm_layer(out_planes), activation_layer(inplace=True) ) class SqueezeExcite(nn.Module): def __init__(self, in_channels, se_ratio=0.25): super().__init__() self.se_ratio = se_ratio self.avgpool = nn.AdaptiveAvgPool2d(1) num_squeezed_channels = max(1, int(in_channels * se_ratio)) self.fc1 = nn.Conv2d(in_channels, num_squeezed_channels, kernel_size=1) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(num_squeezed_channels, in_channels, kernel_size=1) self.sigmoid = nn.Sigmoid() def forward(self, x): out = self.avgpool(x) out = self.fc1(out) out = self.relu(out) out = self.fc2(out) out = self.sigmoid(out) return out * x class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride, expand_ratio, se_ratio): super(InvertedResidual, self).__init__() self.stride = stride assert stride in [1, 2] self.use_res_connect = self.stride == 1 and inp == oup layers = [] # expand expand_inp = int(round(inp * expand_ratio)) if expand_ratio != 1: layers.append(ConvBNActivation(inp, expand_inp, kernel_size=1)) # depthwise layers.append(ConvBNActivation(expand_inp, expand_inp, stride=stride, groups=expand_inp, norm_layer=nn.BatchNorm2d, activation_layer=Hswish())) # squeeze-and-excite layers.append(SqueezeExcite(expand_inp, se_ratio)) # project layers.append(nn.Conv2d(expand_inp, oup, kernel_size=1, bias=False)) layers.append(nn.BatchNorm2d(oup)) self.conv = nn.Sequential(*layers) def forward(self, x): if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) class MobileNetV3(nn.Module): def __init__(self, num_classes=1000, mode='large', width_mult=1.0): super(MobileNetV3, self).__init__() self.num_classes = num_classes if mode == 'large': layers = [ # stage 1 ConvBNActivation(3, int(16 * width_mult), kernel_size=3, stride=2, norm_layer=nn.BatchNorm2d, activation_layer=Hswish()), # stage 2 InvertedResidual(int(16 * width_mult), int(16 * width_mult), stride=1, expand_ratio=1, se_ratio=0.25), InvertedResidual(int(16 * width_mult), int(24 * width_mult), stride=2, expand_ratio=4, se_ratio=0.25), InvertedResidual(int(24 * width_mult), int(24 * width_mult), stride=1, expand_ratio=3, se_ratio=0.25), # stage 3 InvertedResidual(int(24 * width_mult), int(40 * width_mult), stride=2, expand_ratio=3, se_ratio=0.25), InvertedResidual(int(40 * width_mult), int(40 * width_mult), stride=1, expand_ratio=3, se_ratio=0.25), InvertedResidual(int(40 * width_mult), int(40 * width_mult), stride=1, expand_ratio=3, se_ratio=0.25), # stage 4 InvertedResidual(int(40 * width_mult), int(80 * width_mult), stride=2, expand_ratio=6, se_ratio=0.25), InvertedResidual(int(80 * width_mult), int(80 * width_mult), stride=1, expand_ratio=2.5, se_ratio=0.25), InvertedResidual(int(80 * width_mult), int(80 * width_mult), stride=1, expand_ratio=2.3, se_ratio=0.25), InvertedResidual(int(80 * width_mult), int(80 * width_mult), stride=1, expand_ratio=2.3, se_ratio=0.25), # stage 5 InvertedResidual(int(80 * width_mult), int(112 * width_mult), stride=1, expand_ratio=6, se_ratio=0.25), InvertedResidual(int(112 * width_mult), int(112 * width_mult), stride=1, expand_ratio=6, se_ratio=0.25), # stage 6 InvertedResidual(int(112 * width_mult), int(160 * width_mult), stride=2, expand_ratio=6, se_ratio=0.25), InvertedResidual(int(160 * width_mult), int(160 * width_mult), stride=1, expand_ratio=6, se_ratio=0.25), InvertedResidual(int(160 * width_mult), int(160 * width_mult), stride=1, expand_ratio=6, se_ratio=0.25), # stage 7 ConvBNActivation(int(160 * width_mult), int(960 * width_mult), kernel_size=1, norm_layer=nn.BatchNorm2d, activation_layer=Hswish()) ] elif mode == 'small': layers = [ # stage 1 ConvBNActivation(3, int(8 * width_mult), kernel_size=3, stride=2, norm_layer=nn.BatchNorm2d, activation_layer=Hswish()), # stage 2 InvertedResidual(int(8 * width_mult), int(16 * width_mult), stride=2, expand_ratio=3, se_ratio=0.25), InvertedResidual(int(16 * width_mult), int(16 * width_mult), stride=1, expand_ratio=3, se_ratio=0.25), # stage 3 InvertedResidual(int(16 * width_mult), int(24 * width_mult), stride=2, expand_ratio=3, se_ratio=0.25), InvertedResidual(int(24 * width_mult), int(24 * width_mult), stride=1, expand_ratio=3, se_ratio=0.25), # stage 4 InvertedResidual(int(24 * width_mult), int(40 * width_mult), stride=2, expand_ratio=6, se_ratio=0.25), InvertedResidual(int(40 * width_mult), int(40 * width_mult), stride=1, expand_ratio=6, se_ratio=0.25), InvertedResidual(int(40 * width_mult), int(40 * width_mult), stride=1, expand_ratio=6, se_ratio=0.25), # stage 5 InvertedResidual(int(40 * width_mult), int(48 * width_mult), stride=1, expand_ratio=6, se_ratio=0.25), InvertedResidual(int(48 * width_mult), int(48 * width_mult), stride=1, expand_ratio=6, se_ratio=0.25), # stage 6 InvertedResidual(int(48 * width_mult), int(96 * width_mult), stride=2, expand_ratio=6, se_ratio=0.25), InvertedResidual(int(96 * width_mult), int(96 * width_mult), stride=1, expand_ratio=6, se_ratio=0.25), # stage 7 InvertedResidual(int(96 * width_mult), int(576 * width_mult), kernel_size=1, stride=1, expand_ratio=6, se_ratio=0.25), ConvBNActivation(int(576 * width_mult), int(1024 * width_mult), kernel_size=1, norm_layer=nn.BatchNorm2d, activation_layer=Hswish()) ] else: raise ValueError('Unsupported MobileNetV3 model mode: ', mode) # build the network self.features = nn.Sequential(*layers) # build the head self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.classifier = nn.Sequential( nn.Linear(int(1024 * width_mult), int(1280 * width_mult)), Hswish(inplace=True), nn.Dropout(p=0.2, inplace=True) if self.num_classes > 0 else nn.Identity(), nn.Linear(int(1280 * width_mult), self.num_classes), nn.Softmax(dim=1) ) def forward(self, x): x = self.features(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.classifier(x) return x ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值