PyTorch实现GhostNet

注释等在代码中直接给出,模块结构都已给出,可分块进行搭建。

import包

import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from torchsummary import summary

使用GPU

device = torch.device('cuda')

输出通道满足倍数关系

def Make_Divisible(v, divisor, min_value=None):
    '''
    Ensure that all layers have a channel number that is divisible by 8
    :param v:
    :param divisor:
    :param min_value:
    :return:
    '''
    if min_value is None:
        min_value = divisor
    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    if new_v < 0.9 * v:
        # Make sure that round down does not go down by more than 10%.
        new_v += divisor
    return new_v

SENet

class SENet(nn.Module):
    def __init__(self, chs, reduction=4):
        super(SENet, self).__init__()
        self.average_pooling = nn.AdaptiveAvgPool2d(output_size=(1, 1))
        self.fc = nn.Sequential(
            nn.Linear(chs, chs // reduction),
            nn.ReLU(inplace=True),
            nn.Linear(chs // reduction, chs)
        )
        self.activation = nn.Sigmoid()
    def forward(self, x):
        tmp = x
        batch_size, chs, h, w = x.shape
        x = self.average_pooling(x).view(batch_size, chs)
        x = self.fc(x).view(batch_size, chs, 1, 1)
        x = torch.clamp(x, min=0, max=1)
        # if min =< elem <= max, new_elem = elem, if elem > max, new_elem = max, if elem < min, new_elem = min
        # x = self.activation(x)
        return x * tmp  # x: (batch_size, chs, 1, 1), tmp: (batch_size, chs, h, w)

深度卷积模块(颈)

def Depthwise_Convolution(in_chs, out_chs, kernel_size=(3, 3), stride=(1, 1), relu=False):
    '''
    in_chs = out_chs = num_groups
    input:(batch_size, in_chs, h, w)
    output:(batch_size, out_chs, h, w) or (batch_size, out_chs, h // 2, w // 2)
    '''
    return nn.Sequential(
        nn.Conv2d(
            in_channels=in_chs,
            out_channels=out_chs,
            kernel_size=kernel_size,
            stride=stride,
            padding=kernel_size[0] // 2,
            groups=in_chs,
            bias=False
        ),
        nn.BatchNorm2d(num_features=out_chs),
        nn.ReLU(inplace=True) if relu else nn.Sequential()
    )

GhostModule

class GhostModule(nn.Module):
    '''
    input:(batch_size, in_chs, h, w)
    output:(batch_size, out_chs, h, w)
    '''
    def __init__(self, in_chs, out_chs, kernel_size=(1, 1), ratio=2, dw_size=(3, 3), stride=(1, 1), relu=True):
        super(GhostModule, self).__init__()
        self.out_chs = out_chs
        init_chs = math.ceil(out_chs / ratio)  # n / s = m
        ghost_chs = init_chs * (ratio - 1)  # number of ghost maps:m(s - 1)

        self.primary_conv = nn.Sequential(
            # input map -> intrinsic map
            # (batch_size, in_chs, h, w)
            nn.Conv2d(
                in_channels=in_chs,
                out_channels=init_chs,  # m
                kernel_size=kernel_size,
                stride=stride,
                padding=kernel_size[0] // 2,
                bias=False
            ),
            nn.BatchNorm2d(num_features=init_chs),
            nn.ReLU(inplace=True) if relu else nn.Sequential()
        )
        self.cheap_operation = nn.Sequential(
            nn.Conv2d(
                in_channels=init_chs,  # m
                out_channels=ghost_chs,  # m(s - 1)
                kernel_size=dw_size,
                stride=(1, 1),
                padding=dw_size[0] // 2,
                groups=init_chs,
                bias=False
            ),
            nn.BatchNorm2d(num_features=ghost_chs),
            nn.ReLU(inplace=True) if relu else nn.Sequential()
        )
    def forward(self, x):  # (batch_size, in_chs, h, w)
        intrinsic_maps = self.primary_conv(x)
        ghost_maps = self.cheap_operation(intrinsic_maps)
        out = torch.cat([intrinsic_maps, ghost_maps], dim=1)  # n = ms
        return out[:, : self.out_chs, :, :]  # (batch_size, out_chs, h, w)

GhostBottleneck

class GhostBottleneck(nn.Module):
    '''
    input:(batch_size, in_chs, h, w)
    output:(batch_size, out_chs, h, w) or (batch_size, out_chs, h // 2, w // 2)
    '''
    def __init__(self, in_chs, hid_chs, out_chs, kernel_size, stride, use_se):
        super(GhostBottleneck, self).__init__()
        assert stride in [(1, 1), (2, 2)]

        self.conv = nn.Sequential(
            GhostModule(
                in_chs=in_chs,
                out_chs=hid_chs,
                kernel_size=(1, 1),
                relu=True
            ),
            Depthwise_Convolution(
                in_chs=hid_chs,
                out_chs=hid_chs,
                kernel_size=kernel_size,
                stride=stride,
                relu=False
            ) if stride == (2, 2) else nn.Sequential(),
            SENet(hid_chs) if use_se else nn.Sequential(),
            GhostModule(
                in_chs=hid_chs,
                out_chs=out_chs,
                kernel_size=(1, 1),
                relu=False
            )
        )
        if stride == (1, 1) and in_chs == out_chs:
            self.shortcut = nn.Sequential()
        else:
            self.shortcut = nn.Sequential(
                Depthwise_Convolution(
                    in_chs=in_chs,
                    out_chs=in_chs,
                    kernel_size=kernel_size,
                    stride=stride,
                    relu=True
                ),  # change (h, w)
                nn.Conv2d(
                    in_channels=in_chs,
                    out_channels=out_chs,
                    kernel_size=(1, 1),
                    stride=(1, 1),
                    padding=(0, 0),
                    bias=False
                ),
                nn.BatchNorm2d(num_features=out_chs)
            )
    def forward(self, x):
        out = self.conv(x) + self.shortcut(x)
        # print(out.shape)
        return out

GhostNet

class GhostNet(nn.Module):
    def __init__(self, configurations, num_chs=4, num_classes=8, mult=1., dropout=0.2):
        super(GhostNet, self).__init__()
        self.cfgs = configurations
        self.dropout = dropout

        # build the first layer
        out_chs = Make_Divisible(16 * mult, divisor=4)  # 16
        layers = [
            nn.Sequential(
                nn.Conv2d(in_channels=num_chs, out_channels=out_chs, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False),
                nn.BatchNorm2d(num_features=out_chs),
                nn.ReLU(inplace=True)
            )  # (batch_size, in_chs(4), h, w) -> (batch_size, out_chs(16), h // 2, w // 2)
        ]
        in_chs = out_chs

         # build inverted residual blocks
        for k_size, expansion_size, o_chs, use_se, stride in self.cfgs:
            out_chs = Make_Divisible(o_chs * mult, 4)
            hid_chs = Make_Divisible(expansion_size * mult, 4)
            layers.append(GhostBottleneck(in_chs=in_chs, hid_chs=hid_chs, out_chs=out_chs, kernel_size=k_size, stride=stride, use_se=use_se))
            in_chs = out_chs
        self.features = nn.Sequential(*layers)

        # build last several layers
        out_chs = Make_Divisible(expansion_size * mult, 4)  # 960
        self.squeeze = nn.Sequential(
            nn.Conv2d(
                in_channels=in_chs,  # 160
                out_channels=out_chs,
                kernel_size=(1, 1),
                stride=(1, 1),
                padding=(0, 0),
                bias=False
            ),
            nn.BatchNorm2d(num_features=out_chs),
            nn.ReLU(inplace=True),
            nn.AdaptiveAvgPool2d(output_size=(1, 1))
        )
        in_chs = out_chs
        out_chs = 1280
        self.conv = nn.Sequential(
            nn.Conv2d(
                in_channels=in_chs,
                out_channels=out_chs,
                kernel_size=(1, 1),
                stride=(1, 1),
                padding=(0, 0),
                bias=True,
            ),
            nn.BatchNorm2d(num_features=out_chs),
            nn.ReLU(inplace=True)
        )

        self.fc1 = nn.Linear(out_chs, 20)
        self.fc2 = nn.Linear(20, num_classes)

    def forward(self, x):
        x = self.features(x)
        x = self.squeeze(x)
        x = self.conv(x)
        x = x.view((x.size(0), -1))
        x = self.fc1(x)
        x = F.dropout(x, p=self.dropout, training=self.training)
        out = self.fc2(x)
        return out

Configuration

def Ghost_Net(**kwargs):
    """
    Constructs a GhostNet model
    """
    cfgs = [
        # k, t, c, SE, s
        [(3, 3),  16,  16, 0, (1, 1)],
        [(3, 3),  48,  24, 0, (2, 2)],

        [(3, 3),  72,  24, 0, (1, 1)],
        [(5, 5),  72,  40, 1, (2, 2)],

        [(5, 5), 120,  40, 1, (1, 1)],
        [(3, 3), 240,  80, 0, (2, 2)],

        [(3, 3), 200,  80, 0, (1, 1)],
        [(3, 3), 184,  80, 0, (1, 1)],
        [(3, 3), 184,  80, 0, (1, 1)],
        [(3, 3), 480, 112, 1, (1, 1)],
        [(3, 3), 672, 112, 1, (1, 1)],
        [(5, 5), 672, 160, 1, (2, 2)],

        [(5, 5), 960, 160, 0, (1, 1)],
        [(5, 5), 960, 160, 1, (1, 1)],
        [(5, 5), 960, 160, 0, (1, 1)],
        [(5, 5), 960, 160, 1, (1, 1)]
    ]
    return GhostNet(configurations=cfgs, **kwargs)

Test

ghostnet = Ghost_Net().to(device)
ghostnet.eval()
# tensor = torch.randn(size=(1, 4, 32, 32))
# print(ghostnet(tensor))
print(summary(ghostnet, input_size=(4, 32, 32)))
  • 1
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值