最新创新点改进推荐
-💡统一使用 YOLO 代码框架,结合不同模块来构建不同的YOLO目标检测模型。
🔥 《芒果书》系列改进专栏内的改进文章,均包含多种模型改进方式,均适用于YOLOv3
、YOLOv4
、 YOLOR
、 YOLOX
、YOLOv5
、 YOLOv7
、 YOLOv8
改进(重点)!!!
🔥 专栏创新点教程 均有不少同学反应和我说已经在自己的数据集上有效涨点啦!! 包括COCO数据集也能涨点
所有文章博客均包含 改进源代码部分,一键训练即可
🔥 对应专栏订阅的越早,就可以越早使用原创创新点
去改进模型,抢先一步
YOLOv8 + 改进注意力机制
详细内容
参考这篇博客:点击查看详情:YOLOv5改进、YOLOv7、YOLOv8改进|YOLO改进超过多种注意力机制,全篇共计30万字(内附改进源代码),原创改进多种Attention注意力机制和Transformer自注意力机制
YOLOv5、YOLOv7、YOLOv8 模型全系列最新改进方式教程(内附原代码)
🔥 《芒果书》系列改进专栏内的改进文章
专栏地址:独家全网首发专栏《芒果YOLOv8深度改进教程》🍊
专栏地址:独家全网首发专栏《芒果YOLOv5深度改进教程》🥝
专栏地址:独家全网首发专栏《芒果YOLOv7深度改进教程》🍉
文章目录
YOLOv5、YOLOv7 + 注意力机制一览
YOLOv5、YOLOv7 + 注意力机制一览
YOLOv5 + ShuffleAttention注意力机制
博客链接🔗🌟:改进YOLOv5系列:12.添加ShuffleAttention注意力机制
YOLOv5 + CrissCrossAttention注意力机制
博客链接🔗🌟:改进YOLOv5系列:13.添加CrissCrossAttention注意力机制
YOLOv5 + S2-MLPv2注意力机制
博客链接🔗🌟:改进YOLOv5系列:14.添加S2-MLPv2注意力机制
YOLOv5 + SimAM注意力机制
博客链接🔗🌟:改进YOLOv5系列:15.添加SimAM注意力机制
YOLOv5 + SKAttention注意力机制
博客链接🔗🌟:改进YOLOv5系列:16.添加SKAttention注意力机制
YOLOv5 + NAMAttention注意力机制
博客链接🔗🌟:改进YOLOv5系列:17.添加NAMAttention注意力机制
YOLOv5 + SOCA注意力机制
博客链接🔗🌟:改进YOLOv5系列:18.添加SOCA注意力机制
YOLOv5 + CBAM注意力机制
博客链接🔗🌟:改进YOLOv5系列:18.添加CBAM注意力机制
YOLOv5 + SEAttention注意力机制
博客链接🔗🌟:改进YOLOv5系列:19.添加SEAttention注意力机制
YOLOv5 + GAMAttention注意力机制
博客链接🔗🌟:改进YOLOv5系列:20.添加GAMAttention注意力机制
YOLOv5 + CA注意力机制
博客链接🔗🌟:github
YOLOv5 + ECA注意力机制 博客链接🔗🌟:github
更多模块详细解释持续更新中。。。
之后继续更新🔥🔥🔥
第一种、YOLOv5使用SOCA注意力机制
SOCA注意力机制原理图
1.1增加以下SOCA.yaml文件
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
# Parameters
nc: 80 # number of classes
depth_multiple: 0.33 # model depth multiple
width_multiple: 0.50 # layer channel multiple
anchors:
- [10,13, 16,30, 33,23] # P3/8
- [30,61, 62,45, 59,119] # P4/16
- [116,90, 156,198, 373,326] # P5/32
# YOLOv5 v6.0 backbone
backbone:
# [from, number, module, args]
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
[-1, 3, C3, [128]],
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 6, C3, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
[-1, 9, C3, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 3, C3, [1024]],
[-1, 1, SPPF, [1024, 5]], # 9
]
# YOLOv5 v6.0 head
head:
[[-1, 1, Conv, [512, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P4
[-1, 3, C3, [512, False]], # 13
[-1, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P3
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
[-1, 1, Conv, [256, 3, 2]],
[[-1, 14], 1, Concat, [1]], # cat head P4
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
[-1, 1, Conv, [512, 3, 2]],
[[-1, 10], 1, Concat, [1]], # cat head P5
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
[-1, 1, SOCA, [1024]],
[[17, 20, 24], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]
1.2common.py配置
./models/common.py文件增加以下模块
import numpy as np
import torch
from torch import nn
from torch.nn import init
from torch.autograd import Function
class Covpool(Function):
@staticmethod
def forward(ctx, input):
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
h = x.data.shape[2]
w = x.data.shape[3]
M = h*w
x = x.reshape(batchSize,dim,M)
I_hat = (-1./M/M)*torch.ones(M,M,device = x.device) + (1./M)*torch.eye(M,M,device = x.device)
I_hat = I_hat.view(1,M,M).repeat(batchSize,1,1).type(x.dtype)
y = x.bmm(I_hat).bmm(x.transpose(1,2))
ctx.save_for_backward(input,I_hat)
return y
@staticmethod
def backward(ctx, grad_output):
input,I_hat = ctx.saved_tensors
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
h = x.data.shape[2]
w = x.data.shape[3]
M = h*w
x = x.reshape(batchSize,dim,M)
grad_input = grad_output + grad_output.transpose(1,2)
grad_input = grad_input.bmm(x).bmm(I_hat)
grad_input = grad_input.reshape(batchSize,dim,h,w)
return grad_input
class Sqrtm(Function):
@staticmethod
def forward(ctx, input, iterN):
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
dtype = x.dtype
I3 = 3.0*torch.eye(dim,dim,device = x.device).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)
normA = (1.0/3.0)*x.mul(I3).sum(dim=1).sum(dim=1)
A = x.div(normA.view(batchSize,1,1).expand_as(x))
Y = torch.zeros(batchSize, iterN, dim, dim, requires_grad = False, device = x.device)
Z = torch.eye(dim,dim,device = x.device).view(1,dim,dim).repeat(batchSize,iterN,1,1)
if iterN < 2:
ZY = 0.5*(I3 - A)
Y[:,0,:,:] = A.bmm(ZY)
else:
ZY = 0.5*(I3 - A)
Y[:,0,:,:] = A.bmm(ZY)
Z[:,0,:,:] = ZY
for i in range(1, iterN-1):
ZY = 0.5*(I3 - Z[:,i-1,:,:].bmm(Y[:,i-1,:,:]))
Y[:,i,:,:] = Y[:,i-1,:,:].bmm(ZY)
Z[:,i,:,:] = ZY.bmm(Z[:,i-1,:,:])
ZY = 0.5*Y[:,iterN-2,:,:].bmm(I3 - Z[:,iterN-2,:,:].bmm(Y[:,iterN-2,:,:]))
y = ZY*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(x)
ctx.save_for_backward(input, A, ZY, normA, Y, Z)
ctx.iterN = iterN
return y
@staticmethod
def backward(ctx, grad_output):
input, A, ZY, normA, Y, Z = ctx.saved_tensors
iterN = ctx.iterN
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
dtype = x.dtype
der_postCom = grad_output*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(x)
der_postComAux = (grad_output*ZY).sum(dim=1).sum(dim=1).div(2*torch.sqrt(normA))
I3 = 3.0*torch.eye(dim,dim,device = x.device).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)
if iterN < 2:
der_NSiter = 0.5*(der_postCom.bmm(I3 - A) - A.bmm(der_sacleTrace))
else:
dldY = 0.5*(der_postCom.bmm(I3 - Y[:,iterN-2,:,:].bmm(Z[:,iterN-2,:,:])) -
Z[:,iterN-2,:,:].bmm(Y[:,iterN-2,:,:]).bmm(der_postCom))
dldZ = -0.5*Y[:,iterN-2,:,:].bmm(der_postCom).bmm(Y[:,iterN-2,:,:])
for i in range(iterN-3, -1, -1):
YZ = I3 - Y[:,i,:,:].bmm(Z[:,i,:,:])
ZY = Z[:,i,:,:].bmm(Y[:,i,:,:])
dldY_ = 0.5*(dldY.bmm(YZ) -
Z[:,i,:,:].bmm(dldZ).bmm(Z[:,i,:,:]) -
ZY.bmm(dldY))
dldZ_ = 0.5*(YZ.bmm(dldZ) -
Y[:,i,:,:].bmm(dldY).bmm(Y[:,i,:,:]) -
dldZ.bmm(ZY))
dldY = dldY_
dldZ = dldZ_
der_NSiter = 0.5*(dldY.bmm(I3 - A) - dldZ - A.bmm(dldY))
grad_input = der_NSiter.div(normA.view(batchSize,1,1).expand_as(x))
grad_aux = der_NSiter.mul(x).sum(dim=1).sum(dim=1)
for i in range(batchSize):
grad_input[i,:,:] += (der_postComAux[i] \
- grad_aux[i] / (normA[i] * normA[i])) \
*torch.ones(dim,device = x.device).diag()
return grad_input, None
def CovpoolLayer(var):
return Covpool.apply(var)
def SqrtmLayer(var, iterN):
return Sqrtm.apply(var, iterN)
class SOCA(nn.Module):
# second-order Channel attention
def __init__(self, channel, reduction=8):
super(SOCA, self).__init__()
self.max_pool = nn.MaxPool2d(kernel_size=2)
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
batch_size, C, h, w = x.shape # x: NxCxHxW
N = int(h * w)
min_h = min(h, w)
h1 = 1000
w1 = 1000
if h < h1 and w < w1:
x_sub = x
elif h < h1 and w > w1:
W = (w - w1) // 2
x_sub = x[:, :, :, W:(W + w1)]
elif w < w1 and h > h1:
H = (h - h1) // 2
x_sub = x[:, :, H:H + h1, :]
else:
H = (h - h1) // 2
W = (w - w1) // 2
x_sub = x[:, :, H:(H + h1), W:(W + w1)]
cov_mat = CovpoolLayer(x_sub) # Global Covariance pooling layer
cov_mat_sqrt = SqrtmLayer(cov_mat,5) # Matrix square root layer( including pre-norm,Newton-Schulz iter. and post-com. with 5 iteration)
cov_mat_sum = torch.mean(cov_mat_sqrt,1)
cov_mat_sum = cov_mat_sum.view(batch_size,C,1,1)
y_cov = self.conv_du(cov_mat_sum)
return y_cov*x
1.3yolo.py配置
在 models/yolo.py文件夹下
- 定位到parse_model函数中
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']):
内部- 对应位置 下方只需要新增以下代码
elif m is SOCA:
c1, c2 = ch[f], args[0]
if c2 != no:
c2 = make_divisible(c2 * gw, 8)
args = [c1, *args[1:]]
1.4训练模型
python train.py --cfg yolov5_SOCA.yaml
往期YOLO改进教程导航
11.改进YOLOv5系列:11.ConvNeXt结合YOLO | CVPR2022 多种搭配,即插即用 | Backbone主干CNN模型
10.改进YOLOv5系列:10.最新HorNet结合YOLO应用首发! | ECCV2022出品,多种搭配,即插即用 | Backbone主干、递归门控卷积的高效高阶空间交互
9.改进YOLOv5系列:9.BoTNet Transformer结构的修改
8.改进YOLOv5系列:8.增加ACmix结构的修改,自注意力和卷积集成
7.改进YOLOv5系列:7.修改DIoU-NMS,SIoU-NMS,EIoU-NMS,CIoU-NMS,GIoU-NMS
6.改进YOLOv5系列:6.修改Soft-NMS,Soft-CIoUNMS,Soft-SIoUNMS
5.改进YOLOv5系列:5.CotNet Transformer结构的修改
4.改进YOLOv5系列:4.YOLOv5_最新MobileOne结构换Backbone修改