本篇文章都是基于之前的两篇文章所配置的环境开始,如果是还未跑通YOLOv8的纯小白可以参考我之前的文章先配置跑通YOLOv8。如果已经成功跑通YOLOv8,那就直接参考这篇文章即可。之后博主也会更新更多的相关改进教程和目标分割的一些源码复现,希望大家多多关注!!!
目录
1.Ghost Conv
1.1 Ghost卷积简介
Ghost卷积是一种轻量化模型设计技术,通过“本征特征+廉价线性变换”策略,以少量标准卷积生成核心特征,再通过简单操作(如深度卷积)扩展出相似“幽灵特征”,从而大幅降低计算量与参数量(减少30%-50%)。在YOLOv8中应用后,模型体积大幅度缩小,推理速度也会提升,尤其适用于移动端、边缘设备等高实时、低资源场景,兼顾效率与性能平衡。
具体相关内容可以参考原文Ghost原文链接
1.2 Ghost Conv在YOLOv8中的改进应用
1.2.1 Ghost卷积的核心思想
Ghost卷积通过以下两步解决这一问题:
(1)先用少量标准卷积生成部分核心特征图(称为“本征特征图”)。
(2)生成幽灵特征图:对每个本征特征图进行廉价线性操作(如深度卷积、仿射变换等),生成额外的“幽灵特征图”(Ghost Features)。
最终,本征特征图与幽灵特征图拼接,形成与传统卷积同等数量的特征图,但计算成本显著降低。
1.2.2 Ghost卷积的数学优势
1.2.3 Ghost Conv的适用场景
1)移动端/嵌入式设备部署
2)实时检测任务(如无人机、监控摄像头)
3)资源受限的边缘计算场景
2. 融合Ghost Conv模块
2.1 Ghost Conv核心代码
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import drop_path, SqueezeExcite
from timm.models.layers import CondConv2d, hard_sigmoid, DropPath
__all__ = ['C2f_GhostConv']
_SE_LAYER = partial(SqueezeExcite, gate_fn=hard_sigmoid, divisor=4)
class DynamicConv(nn.Module):
def __init__(self, in_features, out_features, kernel_size=1, stride=1, padding='', dilation=1,
groups=1, bias=False, num_experts=4):
super().__init__()
self.routing = nn.Linear(in_features, num_experts)
self.cond_conv = CondConv2d(in_features, out_features, kernel_size, stride, padding, dilation,
groups, bias, num_experts)
def forward(self, x):
pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) # CondConv routing
routing_weights = torch.sigmoid(self.routing(pooled_inputs))
x = self.cond_conv(x, routing_weights)
return x
class ConvBnAct(nn.Module):
""" Conv + Norm Layer + Activation w/ optional skip connection
"""
def __init__(
self, in_chs, out_chs, kernel_size, stride=1, dilation=1, pad_type='',
skip=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_path_rate=0., num_experts=4):
super(ConvBnAct, self).__init__()
self.has_residual = skip and stride == 1 and in_chs == out_chs
self.drop_path_rate = drop_path_rate
# self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type)
self.conv = DynamicConv(in_chs, out_chs, kernel_size, stride, dilation=dilation, padding=pad_type,
num_experts=num_experts)
self.bn1 = norm_layer(out_chs)
self.act1 = act_layer()
def feature_info(self, location):
if location == 'expansion': # output of conv after act, same as block coutput
info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv.out_channels)
return info
def forward(self, x):
shortcut = x
x = self.conv(x)
x = self.bn1(x)
x = self.act1(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += shortcut
return x
class GhostModule(nn.Module):
def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, act_layer=nn.ReLU, num_experts=4):
super(GhostModule, self).__init__()
self.oup = oup
init_channels = math.ceil(oup / ratio)
new_channels = init_channels * (ratio - 1)
self.primary_conv = nn.Sequential(
DynamicConv(inp, init_channels, kernel_size, stride, kernel_size // 2, bias=False, num_experts=num_experts),
nn.BatchNorm2d(init_channels),
act_layer() if act_layer is not None else nn.Sequential(),
)
self.cheap_operation = nn.Sequential(
DynamicConv(init_channels, new_channels, dw_size, 1, dw_size // 2, groups=init_channels, bias=False,
num_experts=num_experts),
nn.BatchNorm2d(new_channels),
act_layer() if act_layer is not None else nn.Sequential(),
)
def forward(self, x):
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
out = torch.cat([x1, x2], dim=1)
return out[:, :self.oup, :, :]
class GhostBottleneck(nn.Module):
""" Ghost bottleneck w/ optional SE"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, act_layer=nn.ReLU, se_ratio=0., drop_path=0., num_experts=4):
super(GhostBottleneck, self).__init__()
has_se = se_ratio is not None and se_ratio > 0.
self.stride = stride
mid_chs = in_chs * 2
# Point-wise expansion
self.ghost1 = GhostModule(in_chs, mid_chs, act_layer=act_layer, num_experts=num_experts)
# Depth-wise convolution
if self.stride > 1:
self.conv_dw = nn.Conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size - 1) // 2, groups=mid_chs, bias=False)
self.bn_dw = nn.BatchNorm2d(mid_chs)
else:
self.conv_dw = None
self.bn_dw = None
# Squeeze-and-excitation
self.se = _SE_LAYER(mid_chs, se_ratio=se_ratio,
act_layer=act_layer if act_layer is not nn.GELU else nn.ReLU) if has_se else None
# Point-wise linear projection
self.ghost2 = GhostModule(mid_chs, out_chs, act_layer=None, num_experts=num_experts)
# shortcut
if in_chs == out_chs and self.stride == 1:
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Sequential(
DynamicConv(
in_chs, in_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size - 1) // 2, groups=in_chs, bias=False, num_experts=num_experts),
nn.BatchNorm2d(in_chs),
DynamicConv(in_chs, out_chs, 1, stride=1, padding=0, bias=False, num_experts=num_experts),
nn.BatchNorm2d(out_chs),
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
# 1st ghost bottleneck
x = self.ghost1(x)
# Depth-wise convolution
if self.conv_dw is not None:
x = self.conv_dw(x)
x = self.bn_dw(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# 2nd ghost bottleneck
x = self.ghost2(x)
x = self.shortcut(shortcut) + self.drop_path(x)
return x
def autopad(k, p=None, d=1): # kernel, padding, dilation
"""Pad to 'same' shape outputs."""
if d > 1:
k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class Conv(nn.Module):
"""Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)."""
default_act = nn.SiLU() # default activation
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
"""Initialize Conv layer with given arguments including activation."""
super().__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
def forward(self, x):
"""Apply convolution, batch normalization and activation to input tensor."""
return self.act(self.bn(self.conv(x)))
def forward_fuse(self, x):
"""Perform transposed convolution of 2D data."""
return self.act(self.conv(x))
class C2f_GhostConv(nn.Module):
# CSP Bottleneck with 2 convolutions
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super().__init__()
self.c = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, 2 * self.c, 1, 1)
self.cv2 = Conv((2 + n) * self.c, c2, 1) # optional act=FReLU(c2)
self.m = nn.ModuleList(GhostModule(self.c, self.c) for _ in range(n))
def forward(self, x):
y = list(self.cv1(x).split((self.c, self.c), 1))
y.extend(m(y[-1]) for m in self.m)
return self.cv2(torch.cat(y, 1))
2.2 在YOLOv8中添加Ghost Conv模块
首先在ultralytics\nn\modules文件夹中创建一个名为GhostConv的python文件,具体位置如下所示
然后将上面的代码部分进行复制后粘贴在GhostConv.py文件中,如下所示
然后打开modules文件夹中的__init__.py文件,找到如图所示位置之后,添加以下代码
from .GhostConv import C2f_GhostConv
然后还是在该文件当中,找到以下位置,并将名称添加,这个就是在下面的__all__中,在里面按要求添加上即可
然后打开nn文件夹中的task.py文件
在该文件中需要修改以下几个地方,大家根据上下文ctrl+f搜索查找到相关位置,有的地方就直接添加上就行,没有的就自己敲一下代码然后添加一下
1.在task文件的开始部分添加C2f_GhostConv
2.搜索 def parse_model,然后在下面位置进行相应修改
3.大家可以搜索Bottleneck,然后在以下位置进行添加修改
2.3配置对应yaml文件
依次打开以下文件夹ultralytics\cfg\models\v8,然后在该文件夹中任意复制一个.yaml文件,并粘贴在该文件夹中,将文件名命名为yolov8n-GhostConv。创建完成后将内容全部删除,然后将以下内容粘贴在里面
# YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs
s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs
m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs
l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs
x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOP
# YOLOv8.0n backbone
backbone:
# [from, repeats, module, args]
- [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
- [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
- [-1, 3, C2f_GhostConv, [128, True]]
- [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
- [-1, 6, C2f_GhostConv, [256, True]]
- [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
- [-1, 6, C2f_GhostConv, [512, True]]
- [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
- [-1, 3, C2f_GhostConv, [1024, True]]
- [-1, 1, SPPF, [1024, 5]] # 9
# YOLOv8.0n head
head:
- [-1, 1, nn.Upsample, [None, 2, 'nearest']]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
- [-1, 3, C2f_GhostConv, [512]] # 12
- [-1, 1, nn.Upsample, [None, 2, 'nearest']]
- [[-1, 4], 1, Concat, [1]] # cat backbone P3
- [-1, 3, C2f_GhostConv, [256]] # 15 (P3/8-small)
- [-1, 1, Conv, [256, 3, 2]]
- [[-1, 12], 1, Concat, [1]] # cat head P4
- [-1, 3, C2f_GhostConv, [512]] # 18 (P4/16-medium)
- [-1, 1, Conv, [512, 3, 2]]
- [[-1, 9], 1, Concat, [1]] # cat head P5
- [-1, 3, C2f_GhostConv, [1024]] # 21 (P5/32-large)
- [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
将上述代码复制在yaml文件后,复制该yaml文件的绝对路径
2.4运行代码
创建一个名为train的python训练脚本,首先新建一个空白的python文件,然后将以下内容复制进去(如果看过我上个博客的就不需要进行这一步了,直接将yaml文件的绝对路径粘贴进去后运行就行)ps:大家注意把相关路径换成自己对应的
from ultralytics import YOLO
# Load a model
model = YOLO(r'G:\add\ultralytics-main\ultralytics\cfg\models\v8\yolov8n-GhostConv.yaml') # build a new model from YAML
#model = YOLO(
#r'G:\add\ultralytics-main\ultralytics\cfg\models\v8\yolov8n-SimAM.yaml') # build from YAML and transfer weights
# 断点续训
# Load a model
#model = YOLO(r'G:\add\ultralytics-main\runs\detect\train24\weights\last.pt') # load a partially trained model
''
if __name__ == '__main__':
results = model.train(data=r'G:\add\ultralytics-main\ultralytics\cfg\datasets\VOC.yaml',batch=50,epochs=5000,imgsz=640,resume=True,workers=8)
全部改好之后大家直接运行这个代码即可,添加成功会出现以下界面,大家可以看到,参数量确实降低了很多,大家也可以试试不同组合!
3. 总结
博客主要详细教学了在YOLOv8中替换GhostConv的超详细步骤,基本大家只要按步骤来就肯定可以复现成功,后续博主也会再更新一些关于YOLOv8的其他改进方法,期待大家多多关注!!!大家有想看的内容也可以在评论区留言!