CenterTrack改进:替换主干网络为Ghostnet

CenterTrack论文:Tracking Objects as Points

CenterTrack源码:CenterTrack


CenterTrack网络结构

        CenterTrack的输入是将当前帧和前一帧的图片,以及前一帧的heatmap一起输入网络。默认使用的主干网络是DLA34,融合模块也是用的DLAUP结构,输出的是主要信息就是中心点热力图、中心点偏移量、宽高、预测框偏移。
        图片参考于知乎文章

CenterTrack替换主干网络 

        CenterTrack代码的src/lib/model/neckworks/backbone目录下存放的为主干网络的代码文件,里面除了dla34外,还有resnet、mobilenetv2,本文的目的就加一个ghostnet.py
        这个代码是从别人哪找的,将其修改为了适合CenterTrack的形式,主要就是添加了不同特征层的多阶段输出。

"""
Creates a GhostNet Model as defined in:
GhostNet: More Features from Cheap Operations By Kai Han, Yunhe Wang, Qi Tian, Jianyuan Guo, Chunjing Xu, Chang Xu.
https://arxiv.org/abs/1911.11907
Modified from https://github.com/d-li14/mobilenetv3.pytorch
"""
import torch
import torch.nn as nn
import math

cfgs = [
    # k, t, c, SE, s
    [[3, 16, 16, 0, 1]],

    [[3, 48, 32, 0, 2]],
    [[3, 72, 32, 0, 1]],

    [[5, 72, 64, 1, 2]],
    [[5, 120, 64, 1, 1]],

    [[3, 240, 96, 0, 2]],
    [[3, 200, 96, 0, 1],
     [3, 184, 96, 0, 1],
     [3, 184, 96, 0, 1],
     [3, 480, 120, 1, 1],
     [3, 672, 120, 1, 1]],

    [[5, 672, 320, 1, 2]],
    [[5, 960, 320, 0, 1],
     [5, 960, 320, 1, 1],
     [5, 960, 320, 0, 1],
     [5, 960, 320, 1, 1]]
]


def _make_divisible(v, divisor, min_value=None):
    """
    This function is taken from the original tf repo.
    It ensures that all layers have a channel number that is divisible by 8
    It can be seen here:
    https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
    """
    if min_value is None:
        min_value = divisor
    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    # Make sure that round down does not go down by more than 10%.
    if new_v < 0.9 * v:
        new_v += divisor
    return new_v


class SELayer(nn.Module):
    def __init__(self, channel, reduction=4):
        super(SELayer, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction),
            nn.ReLU(inplace=True),
            nn.Linear(channel // reduction, channel), )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        y = torch.clamp(y, 0, 1)
        return x * y


def depthwise_conv(inp, oup, kernel_size=3, stride=1, relu=False):
    return nn.Sequential(
        nn.Conv2d(inp, oup, kernel_size, stride, kernel_size // 2, groups=inp, bias=False),
        nn.BatchNorm2d(oup),
        nn.ReLU(inplace=True) if relu else nn.Sequential(),
    )


class GhostModule(nn.Module):
    def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True):
        super(GhostModule, self).__init__()
        self.oup = oup
        init_channels = math.ceil(oup / ratio)
        new_channels = init_channels * (ratio - 1)

        self.primary_conv = nn.Sequential(
            nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size // 2, bias=False),
            nn.BatchNorm2d(init_channels),
            nn.ReLU(inplace=True) if relu else nn.Sequential(),
        )

        self.cheap_operation = nn.Sequential(
            nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size // 2, groups=init_channels, bias=False),
            nn.BatchNorm2d(new_channels),
            nn.ReLU(inplace=True) if relu else nn.Sequential(),
        )

    def forward(self, x):
        x1 = self.primary_conv(x)
        x2 = self.cheap_operation(x1)
        out = torch.cat([x1, x2], dim=1)
        return out[:, :self.oup, :, :]


class GhostBottleneck(nn.Module):
    def __init__(self, inp, hidden_dim, oup, kernel_size, stride, use_se):
        super(GhostBottleneck, self).__init__()
        assert stride in [1, 2]

        self.conv = nn.Sequential(
            # pw
            GhostModule(inp, hidden_dim, kernel_size=1, relu=True),
            # dw
            depthwise_conv(hidden_dim, hidden_dim, kernel_size, stride, relu=False) if stride == 2 else nn.Sequential(),
            # Squeeze-and-Excite
            SELayer(hidden_dim) if use_se else nn.Sequential(),
            # pw-linear
            GhostModule(hidden_dim, oup, kernel_size=1, relu=False),
        )

        if stride == 1 and inp == oup:
            self.shortcut = nn.Sequential()
        else:
            self.shortcut = nn.Sequential(
                depthwise_conv(inp, inp, kernel_size, stride, relu=False),
                nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
                nn.BatchNorm2d(oup),
            )

    def forward(self, x):
        return self.conv(x) + self.shortcut(x)


class GhostNet(nn.Module):
    def __init__(self, opt, channel_stage=None, width_mult=1.):
        super(GhostNet, self).__init__()
        # setting of inverted residual blocks
        self.cfgs = cfgs

        # building first layer
        # output_channel = _make_divisible(16 * width_mult, 4)
        self.channels = [32, 16, 32, 64, 120, 320]

        output_channel = 32
        self.con_stem = nn.Sequential(
            nn.Conv2d(3, output_channel, 3, 2, 1, bias=False),
            nn.BatchNorm2d(output_channel),
            nn.ReLU(inplace=True)
        )
        input_channel = output_channel

        if opt.pre_img:
            print('adding pre_img layer...')
            self.pre_img_layer = nn.Sequential(
                nn.Conv2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False),
                nn.BatchNorm2d(input_channel))
        if opt.pre_hm:
            print('adding pre_hm layer...')
            self.pre_hm_layer = nn.Sequential(
                nn.Conv2d(1, input_channel, kernel_size=3, padding=1, stride=2, bias=False),
                nn.BatchNorm2d(input_channel))

        # building inverted residual blocks
        stages = []
        block = GhostBottleneck
        for cfg in self.cfgs:
            layers = []
            for k, exp_size, c, use_se, s in cfg:
                output_channel = _make_divisible(c * width_mult, 4)
                hidden_channel = _make_divisible(exp_size * width_mult, 4)
                layers.append(block(input_channel, hidden_channel, output_channel, k, s, use_se))
                input_channel = output_channel
            stages.append(nn.Sequential(*layers))

        self.blocks1 = nn.Sequential(
            stages[0]
        )

        self.blocks2 = nn.Sequential(
            stages[1],
            stages[2]
        )

        self.blocks3 = nn.Sequential(
            stages[3],
            stages[4],
        )

        self.blocks4 = nn.Sequential(
            stages[5],
            stages[6]
        )

        self.blocks5 = nn.Sequential(
            stages[7],
            stages[8]
        )

        self._initialize_weights()

    def forward(self, x, pre_img=None, pre_hm=None):
        x = self.con_stem(x)

        if pre_img is not None:
            x = x + self.pre_img_layer(pre_img)
        if pre_hm is not None:
            x = x + self.pre_hm_layer(pre_hm)

        y = [x]

        x = self.blocks1(x)
        y.append(x)

        x = self.blocks2(x)
        y.append(x)

        x = self.blocks3(x)
        y.append(x)

        x = self.blocks4(x)
        y.append(x)

        x = self.blocks5(x)
        y.append(x)

        return y

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()


# def ghost_net(**kwargs):
#     """
#     Constructs a GhostNet model
#     """
#     cfgs = [
#         # k, t, c, SE, s
#         [[3, 16, 16, 0, 1]],
#
#         [[3, 48, 24, 0, 2]],
#         [[3, 72, 24, 0, 1]],
#
#         [[5, 72, 32, 1, 2]],
#         [[5, 120, 32, 1, 1]],
#
#         [[3, 240, 64, 0, 2]],
#         [[3, 200, 64, 0, 1],
#          [3, 184, 64, 0, 1],
#          [3, 184, 64, 0, 1],
#          [3, 480, 112, 1, 1],
#          [3, 672, 112, 1, 1]],
#
#         [[5, 672, 320, 1, 2]],
#         [[5, 960, 320, 0, 1],
#          [5, 960, 320, 1, 1],
#          [5, 960, 320, 0, 1],
#          [5, 960, 320, 1, 1]]
#     ]
#     return GhostNet(cfgs, **kwargs)


if __name__ == '__main__':
    model = GhostNet()
    model.eval()
    print(model)
    input = torch.randn(8, 3, 544, 960)
    y = model(input)
    print(y)

         在CenterTrack代码的src/lib/model/neckworks目录下有一个generate_network.py文件,其功能就是根据前面有的backbone创建模型。在这个文件里面需要导入并添加ghostnet,

#导入ghostnet
from lib.model.networks.backbones.ghostnet import GhostNet   

backbone_factory = {
    'dla34': dla34,
    'resnet': Resnet,
    'mobilenetv2': MobileNetV2,
    'ghostnet': GhostNet    # 添加ghostnet
}

        最后只需要在src/lib/opt.py中修改‘arch’参数改为‘generic’,‘backbone’参数为‘ghostnet’即可 

实验结果

        速度方面相比,相比dla34能快将近一倍作用,mota自然嘎嘎下降,哈哈哈

  • 3
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

XaoPage

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值