YOLOv8改进策略|MobileNetv4替换主干网络
MobileNetv4简介
论文链接:https://arxiv.org/pdf/2404.10518
项目仓库:https://github.com/jiaowoguanren0615/MobileNetV4/tree/main
摘要:MobileNetV4 (MNv4),具有适用于移动设备的通用高效架构设计。其核心是,我们引入了通用倒置瓶颈 (UIB) 搜索块,这是一种统一而灵活的结构,它合并了倒置瓶颈 (IB)、ConvNext、前馈网络 (FFN) 和新颖的 Extra Depthwise (ExtraDW) 变体。除了 UIB,我们还推出了 Mobile MQA,这是一款专为移动加速器量身定制的注意力块,可显着提高 39% 的加速速度。还引入了优化的神经架构搜索 (NAS) 方法,可提高 MNv4 搜索效率。UIB、移动 MQA 和改进的 NAS 配方的集成产生了一套新的 MNv4 模型,这些模型在移动 CPU、DSP、GPU 以及 Apple 神经引擎和 Google Pixel EdgeTPU 等专用加速器上大多是帕累托最优的——这是任何其他测试模型中都没有的特征。最后,为了进一步提高精度,我们引入了一种新颖的蒸馏技术。通过这种技术的增强,我们的 MNv4-Hybrid-Large 型号可提供 87% 的 ImageNet-1K 准确率,而 Pixel 8 EdgeTPU 的运行时间仅为 3.8 毫秒。
MobileNetv4在快速检测上相比前面的几个版本得到了有效提升,在之前的工作中,许多学者已经将MobileNetv3、v2、v1等网络与yolo进行有效结合并取得了不错的效果,本文通过采用最新的MobileNetv4与yolov8进行结合,替换yolov8的主干网络backbone,对于大型的结构如yolov8l、x等型号中能够有效降低模型参数,减小模型大小,识别速度得到了有效提升。
YOLOv8-mobilenetv4
替换后的模型结构
替换前的模型结构
yolov8-mobilenetv4.yaml
复制该文件一份ultralytics/cfg/models/v8/yolov8.yaml,并重命名为yolov8-mobilenetv4.yaml,内容修改如下
# Ultralytics YOLO 🚀, AGPL-3.0 license
# YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs
s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs
m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs
l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs
x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs
# YOLOv8.0n backbone
backbone:
# [from, repeats, module, args]
# - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
# - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
# - [-1, 3, C2f, [128, True]]
# - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
# - [-1, 6, C2f, [256, True]]
# - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
# - [-1, 6, C2f, [512, True]]
# - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
# - [-1, 3, C2f, [1024, True]]
- [-1, 1, mobilenetv4_small, []]
- [-1, 1, SPPF, [1024, 5]] # 9
# YOLOv8.0n head
head:
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 3], 1, Concat, [1]] # cat backbone P4
- [-1, 3, C2f, [512]] # 12
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 2], 1, Concat, [1]] # cat backbone P3
- [-1, 3, C2f, [256]] # 15 (P3/8-small)
- [-1, 1, Conv, [256, 3, 2]]
- [[-1, 8], 1, Concat, [1]] # cat head P4
- [-1, 3, C2f, [512]] # 18 (P4/16-medium)
- [-1, 1, Conv, [512, 3, 2]]
- [[-1, 5], 1, Concat, [1]] # cat head P5
- [-1, 3, C2f, [1024]] # 21 (P5/32-large)
- [[11, 14, 17], 1, Detect, [nc]] # Detect(P3, P4, P5)
MobileNetv4.py
在ultralytics/nn新建一个backbone文件夹,在里面创建 MobileNetv4.py文件
from timm.models import register_model
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from .model_config import MODEL_SPECS
__all__ = ['mobilenetv4_small','mobilenetv4_hybrid_large']
def make_divisible(
value: float,
divisor: int,
min_value: Optional[float] = None,
round_down_protect: bool = True,
) -> int:
"""
This function is copied from here
"https://github.com/tensorflow/models/blob/master/official/vision/modeling/layers/nn_layers.py"
This is to ensure that all layers have channels that are divisible by 8.
Args:
value: A `float` of original value.
divisor: An `int` of the divisor that need to be checked upon.
min_value: A `float` of minimum value threshold.
round_down_protect: A `bool` indicating whether round down more than 10%
will be allowed.
Returns:
The adjusted value in `int` that is divisible against divisor.
"""
if min_value is None:
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if round_down_protect and new_value < 0.9 * value:
new_value += divisor
return int(new_value)
def conv_2d(inp, oup, kernel_size=3, stride=1, groups=1, bias=False, norm=True, act=True):
conv = nn.Sequential()
padding = (kernel_size - 1) // 2
conv.add_module('conv', nn.Conv2d(inp, oup, kernel_size, stride, padding, bias=bias, groups=groups))
if norm:
conv.add_module('BatchNorm2d', nn.BatchNorm2d(oup))
if act:
conv.add_module('Activation', nn.ReLU6())
return conv
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, act=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.block = nn.Sequential()
if expand_ratio != 1:
self.block.add_module('exp_1x1', conv_2d(inp, hidden_dim, kernel_size=1, stride=1))
self.block.add_module('conv_3x3',
conv_2d(hidden_dim, hidden_dim, kernel_size=3, stride=stride, groups=hidden_dim))
self.block.add_module('red_1x1', conv_2d(hidden_dim, oup, kernel_size=1, stride=1, act=act))
self.use_res_connect = self.stride == 1 and inp == oup
def forward(self, x):
if self.use_res_connect:
return x + self.block(x)
else:
return self.block(x)
class UniversalInvertedBottleneckBlock(nn.Module):
def __init__(self,
inp,
oup,
start_dw_kernel_size,
middle_dw_kernel_size,
middle_dw_downsample,
stride,
expand_ratio
):
super().__init__()
# Starting depthwise conv.
self.start_dw_kernel_size = start_dw_kernel_size
if self.start_dw_kernel_size:
stride_ = stride if not middle_dw_downsample else 1
self._start_dw_ = conv_2d(inp, inp, kernel_size=start_dw_kernel_size, stride=stride_, groups=inp, act=False)
# Expansion with 1x1 convs.
expand_filters = make_divisible(inp * expand_ratio, 8)
self._expand_conv = conv_2d(inp, expand_filters, kernel_size=1)
# Middle depthwise conv.
self.middle_dw_kernel_size = middle_dw_kernel_size
if self.middle_dw_kernel_size:
stride_ = stride if middle_dw_downsample else 1
self._middle_dw = conv_2d(expand_filters, expand_filters, kernel_size=middle_dw_kernel_size, stride=stride_,
groups=expand_filters)
# Projection with 1x1 convs.
self._proj_conv = conv_2d(expand_filters, oup, kernel_size=1, stride=1, act=False)
# Ending depthwise conv.
# this not used
# _end_dw_kernel_size = 0
# self._end_dw = conv_2d(oup, oup, kernel_size=_end_dw_kernel_size, stride=stride, groups=inp, act=False)
def forward(self, x):
if self.start_dw_kernel_size:
x = self._start_dw_(x)
# print("_start_dw_", x.shape)
x = self._expand_conv(x)
# print("_expand_conv", x.shape)
if self.middle_dw_kernel_size:
x = self._middle_dw(x)
# print("_middle_dw", x.shape)
x = self._proj_conv(x)
# print("_proj_conv", x.shape)
return x
def build_blocks(layer_spec):
if not layer_spec.get('block_name'):
return nn.Sequential()
block_names = layer_spec['block_name']
layers = nn.Sequential()
if block_names == "convbn":
schema_ = ['inp', 'oup', 'kernel_size', 'stride']
args = {}
for i in range(layer_spec['num_blocks']):
args = dict(zip(schema_, layer_spec['block_specs'][i]))
layers.add_module(f"convbn_{i}", conv_2d(**args))
elif block_names == "uib":
schema_ = ['inp', 'oup', 'start_dw_kernel_size', 'middle_dw_kernel_size', 'middle_dw_downsample', 'stride',
'expand_ratio']
args = {}
for i in range(layer_spec['num_blocks']):
args = dict(zip(schema_, layer_spec['block_specs'][i]))
layers.add_module(f"uib_{i}", UniversalInvertedBottleneckBlock(**args))
elif block_names == "fused_ib":
schema_ = ['inp', 'oup', 'stride', 'expand_ratio', 'act']
args = {}
for i in range(layer_spec['num_blocks']):
args = dict(zip(schema_, layer_spec['block_specs'][i]))
layers.add_module(f"fused_ib_{i}", InvertedResidual(**args))
else:
raise NotImplementedError
return layers
class MobileNetV4(nn.Module):
def __init__(self, model):
# MobileNetV4ConvSmall MobileNetV4ConvMedium MobileNetV4ConvLarge
# MobileNetV4HybridMedium MobileNetV4HybridLarge
"""Params to initiate MobilenNetV4
Args:
model : support 5 types of models as indicated in
"https://github.com/tensorflow/models/blob/master/official/vision/modeling/backbones/mobilenet.py"
"""
super().__init__()
assert model in MODEL_SPECS.keys()
self.model = model
self.spec = MODEL_SPECS[self.model]
# conv0
self.conv0 = build_blocks(self.spec['conv0'])
# layer1
self.layer1 = build_blocks(self.spec['layer1'])
# layer2
self.layer2 = build_blocks(self.spec['layer2'])
# layer3
self.layer3 = build_blocks(self.spec['layer3'])
# layer4
self.layer4 = build_blocks(self.spec['layer4'])
# layer5
self.layer5 = build_blocks(self.spec['layer5'])
self.features = nn.ModuleList([self.conv0, self.layer1, self.layer2, self.layer3, self.layer4, self.layer5])
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def forward(self, x):
input_size = x.size(2)
scale = [4, 8, 16, 32]
features = [None, None, None, None]
for f in self.features:
x = f(x)
if input_size // x.size(2) in scale:
features[scale.index(input_size // x.size(2))] = x
return features
@register_model
def mobilenetv4_small(pretrained=False, pretrained_cfg=None, pretrained_cfg_overlay=None, **kwargs):
model = MobileNetV4('MobileNetV4ConvSmall', **kwargs)
return model
@register_model
def mobilenetv4_medium(pretrained=False, pretrained_cfg=None, pretrained_cfg_overlay=None, **kwargs):
model = MobileNetV4('MobileNetV4ConvMedium', **kwargs)
return model
@register_model
def mobilenetv4_large(pretrained=False, pretrained_cfg=None, pretrained_cfg_overlay=None, **kwargs):
model = MobileNetV4('MobileNetV4ConvLarge', **kwargs)
return model
@register_model
def mobilenetv4_hybrid_medium(pretrained=False, pretrained_cfg=None, pretrained_cfg_overlay=None, **kwargs):
model = MobileNetV4('MobileNetV4HybridMedium', **kwargs)
return model
@register_model
def mobilenetv4_hybrid_large(pretrained=False, pretrained_cfg=None, pretrained_cfg_overlay=None, **kwargs):
model = MobileNetV4('MobileNetV4HybridLarge', **kwargs)
return model
# if __name__ == '__main__':
# from torchinfo import summary
# model = mobilenetv4_hybrid_large()
# print("Check output shape ...")
# summary(model, input_size=(1, 3, 224, 224))
# x = torch.rand(1, 3, 224, 224)
# y = model(x)
# print(y.shape)
# for i in y:
# print(i.shape)
Results
采用该网络结构进行训练,模型参数为:
YoLov8-mobilenetv4 summary: 292 layers,5700668 parameters, 0 gradients
yolov8-mobilenetv4:准确率为0.96
完整代码已经进行开源,跪求个免费Star:
https://github.com/GuoquanPei/YOLOv8-MobileNetv4-ultralytics