1,本文介绍
这篇文章的改进机制是利用新推出的 渐近特征金字塔网络(AFPN)来优化yolov8的检测头, AFPN的核心 是引入一种渐近的特征融合策略,将底层和高层的特征逐渐整合到目标检测过程中。这种方式有助于减小不同层次特征之间的语义差距,提高特征融合效果,使得检测模型能更好地适应不同层次的语义信息。
关于AFPN的详细介绍可以看论文:https://arxiv.org/pdf/2306.15988.pdf
本文将讲解如何将AFPN融合进yolov8,以提高小目标检测的性能。
话不多说,上代码!
2,将AFPN融入YOLOv8
2.1 步骤一
首先找到如下的目录'ultralytics/nn/modules',然后在这个目录下创建一个afpn.py文件,文件名字可以根据你自己的习惯起,然后将afpn的核心代码复制进去。
# AFPN 核心代码
import math
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from ultralytics.nn.modules import DFL
from ultralytics.nn.modules.conv import Conv
from ultralytics.utils.tal import dist2bbox, make_anchors
__all__ =['Detect_AFPN']
def BasicConv(filter_in, filter_out, kernel_size, stride=1, pad=None):
if not pad:
pad = (kernel_size - 1) // 2 if kernel_size else 0
else:
pad = pad
return nn.Sequential(OrderedDict([
("conv", nn.Conv2d(filter_in, filter_out, kernel_size=kernel_size, stride=stride, padding=pad, bias=False)),
("bn", nn.BatchNorm2d(filter_out)),
("relu", nn.ReLU(inplace=True)),
]))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, filter_in, filter_out):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(filter_in, filter_out, 3, padding=1)
self.bn1 = nn.BatchNorm2d(filter_out, momentum=0.1)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(filter_out, filter_out, 3, padding=1)
self.bn2 = nn.BatchNorm2d(filter_out, momentum=0.1)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Upsample(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor=2):
super(Upsample, self).__init__()
self.upsample = nn.Sequential(
BasicConv(in_channels, out_channels, 1),
nn.Upsample(scale_factor=scale_factor, mode='bilinear')
)
def forward(self, x):
x = self.upsample(x)
return x
class Downsample_x2(nn.Module):
def __init__(self, in_channels, out_channels):
super(Downsample_x2, self).__init__()
self.downsample = nn.Sequential(
BasicConv(in_channels, out_channels, 2, 2, 0)
)
def forward(self, x, ):
x = self.downsample(x)
return x
class Downsample_x4(nn.Module):
def __init__(self, in_channels, out_channels):
super(Downsample_x4, self).__init__()
self.downsample = nn.Sequential(
BasicConv(in_channels, out_channels, 4, 4, 0)
)
def forward(self, x, ):
x = self.downsample(x)
return x
class Downsample_x8(nn.Module):
def __init__(self, in_channels, out_channels):
super(Downsample_x8, self).__init__()
self.downsample = nn.Sequential(
BasicConv(in_channels, out_channels, 8, 8, 0)
)
def forward(self, x, ):
x = self.downsample(x)
return x
class ASFF_2(nn.Module):
def __init__(self, inter_dim=512):
super(ASFF_2, self).__init__()
self.inter_dim = inter_dim
compress_c = 8
self.weight_level_1 = BasicConv(self.inter_dim, compress_c, 1, 1)
self.weight_level_2 = BasicConv(self.inter_dim, compress_c, 1, 1)
self.weight_levels = nn.Conv2d(compress_c * 2, 2, kernel_size=1, stride=1, padding=0)
self.conv = BasicConv(self.inter_dim, self.inter_dim, 3, 1)
def forward(self, input1, input2):
level_1_weight_v = self.weight_level_1(input1)
level_2_weight_v = self.weight_level_2(input2)
levels_weight_v = torch.cat((level_1_weight_v, level_2_weight_v), 1)
levels_weight = self.weight_levels(levels_weight_v)
levels_weight = F.softmax(levels_weight, dim=1)
fused_out_reduced = input1 * levels_weight[:, 0:1, :, :] + \
input2 * levels_weight[:, 1:2, :, :]
out = self.conv(fused_out_reduced)
return out
class ASFF_3(nn.Module):
def __init__(self, inter_dim=512):
super(ASFF_3, self).__init__()
self.inter_dim = inter_dim
compress_c = 8
self.weight_level_1 = BasicConv(self.inter_dim, compress_c, 1, 1)
self.weight_level_2 = BasicConv(self.inter_dim, compress_c, 1, 1)
self.weight_level_3 = BasicConv(self.inter_dim, compress_c, 1, 1)
self.weight_levels = nn.Conv2d(compress_c * 3, 3, kernel_size=1, stride=1, padding=0)
self.conv = BasicConv(self.inter_dim, self.inter_dim, 3, 1)
def forward(self, input1, input2, input3):
level_1_weight_v = self.weight_level_1(input1)
level_2_weight_v = self.weight_level_2(input2)
level_3_weight_v = self.weight_level_3(input3)
levels_weight_v = torch.cat((level_1_weight_v, level_2_weight_v, level_3_weight_v), 1)
levels_weight = self.weight_levels(levels_weight_v)
levels_weight = F.softmax(levels_weight, dim=1)
fused_out_reduced = input1 * levels_weight[:, 0:1, :, :] + \
input2 * levels_weight[:, 1:2, :, :] + \
input3 * levels_weight[:, 2:, :, :]
out = self.conv(fused_out_reduced)
return out
class ASFF_4(nn.Module):
def __init__(self, inter_dim=512):
super(ASFF_4, self).__init__()
self.inter_dim = inter_dim
compress_c = 8
self.weight_level_0 = BasicConv(self.inter_dim, compress_c, 1, 1)
self.weight_level_1 = BasicConv(self.inter_dim, compress_c, 1, 1)
self.weight_level_2 = BasicConv(self.inter_dim, compress_c, 1, 1)
self.weight_level_3 = BasicConv(self.inter_dim, compress_c, 1, 1)
self.weight_levels = nn.Conv2d(compress_c * 4, 4, kernel_size=1, stride=1, padding=0)
self.conv = BasicConv(self.inter_dim, self.inter_dim, 3, 1)
def forward(self, input0, input1, input2, input3):
level_0_weight_v = self.weight_level_0(input0)
level_1_weight_v = self.weight_level_1(input1)
level_2_weight_v = self.weight_level_2(input2)
level_3_weight_v = self.weight_level_3(input3)
levels_weight_v = torch.cat((level_0_weight_v, level_1_weight_v, level_2_weight_v, level_3_weight_v), 1)
levels_weight = self.weight_levels(levels_weight_v)
levels_weight = F.softmax(levels_weight, dim=1)
fused_out_reduced = input0 * levels_weight[:, 0:1, :, :] + \
input1 * levels_weight[:, 1:2, :, :] + \
input2 * levels_weight[:, 2:3, :, :] + \
input3 * levels_weight[:, 3:, :, :]
out = self.conv(fused_out_reduced)
return out
class BlockBody(nn.Module):
def __init__(self, channels=[64, 128, 256, 512]):
super(BlockBody, self).__init__()
self.blocks_scalezero1 = nn.Sequential(
BasicConv(channels[0], channels[0], 1),
)
self.blocks_scaleone1 = nn.Sequential(
BasicConv(channels[1], channels[1], 1),
)
self.blocks_scaletwo1 = nn.Sequential(
BasicConv(channels[2], channels[2], 1),
)
self.blocks_scalethree1 = nn.Sequential(
BasicConv(channels[3], channels[3], 1),
)
self.downsample_scalezero1_2 = Downsample_x2(channels[0], channels[1])
self.upsample_scaleone1_2 = Upsample(channels[1], channels[0], scale_factor=2)
self.asff_scalezero1 = ASFF_2(inter_dim=channels[0])
self.asff_scaleone1 = ASFF_2(inter_dim=channels[1])
self.blocks_scalezero2 = nn.Sequential(
BasicBlock(channels[0], channels[0]),
BasicBlock(channels[0], channels[0]),
BasicBlock(channels[0], channels[0]),
BasicBlock(channels[0], channels[0]),
)
self.blocks_scaleone2 = nn.Sequential(
BasicBlock(channels[1], channels[1]),
BasicBlock(channels[1], channels[1]),
BasicBlock(channels[1], channels[1]),
BasicBlock(channels[1], channels[1]),
)
self.downsample_scalezero2_2 = Downsample_x2(channels[0], channels[1])
self.downsample_scalezero2_4 = Downsample_x4(channels[0], channels[2])
self.downsample_scaleone2_2 = Downsample_x2(channels[1], channels[2])
self.upsample_scaleone2_2 = Upsample(channels[1], channels[0], scale_factor=2)
self.upsample_scaletwo2_2 = Upsample(channels[2], channels[1], scale_factor=2)
self.upsample_scaletwo2_4 = Upsample(channels[2], channels[0], scale_factor=4)
self.asff_scalezero2 = ASFF_3(inter_dim=channels[0])
self.asff_scaleone2 = ASFF_3(inter_dim=channels[1])
self.asff_scaletwo2 = ASFF_3(inter_dim=channels[2])
self.blocks_scalezero3 = nn.Sequential(
BasicBlock(channels[0], channels[0]),
BasicBlock(channels[0], channels[0]),
BasicBlock(channels[0], channels[0]),
BasicBlock(channels[0], channels[0]),
)
self.blocks_scaleone3 = nn.Sequential(
BasicBlock(channels[1], channels[1]),
BasicBlock(channels[1], channels[1]),
BasicBlock(channels[1], channels[1]),
BasicBlock(channels[1], channels[1]),
)
self.blocks_scaletwo3 = nn.Sequential(
BasicBlock(channels[2], channels[2]),
BasicBlock(channels[2], channels[2]),
BasicBlock(channels[2], channels[2]),
BasicBlock(channels[2], channels[2]),
)
self.downsample_scalezero3_2 = Downsample_x2(channels[0], channels[1])
self.downsample_scalezero3_4 = Downsample_x4(channels[0], channels[2])
self.downsample_scalezero3_8 = Downsample_x8(channels[0], channels[3])
self.upsample_scaleone3_2 = Upsample(channels[1], channels[0], scale_factor=2)
self.downsample_scaleone3_2 = Downsample_x2(channels[1], channels[2])
self.downsample_scaleone3_4 = Downsample_x4(channels[1], channels[3])
self.upsample_scaletwo3_4 = Upsample(channels[2], channels[0], scale_factor=4)
self.upsample_scaletwo3_2 = Upsample(channels[2], channels[1], scale_factor=2)
self.downsample_scaletwo3_2 = Downsample_x2(channels[2], channels[3])
self.upsample_scalethree3_8 = Upsample(channels[3], channels[0], scale_factor=8)
self.upsample_scalethree3_4 = Upsample(channels[3], channels[1], scale_factor=4)
self.upsample_scalethree3_2 = Upsample(channels[3], channels[2], scale_factor=2)
self.asff_scalezero3 = ASFF_4(inter_dim=channels[0])
self.asff_scaleone3 = ASFF_4(inter_dim=channels[1])
self.asff_scaletwo3 = ASFF_4(inter_dim=channels[2])
self.asff_scalethree3 = ASFF_4(inter_dim=channels[3])
self.blocks_scalezero4 = nn.Sequential(
BasicBlock(channels[0], channels[0]),
BasicBlock(channels[0], channels[0]),
BasicBlock(channels[0], channels[0]),
BasicBlock(channels[0], channels[0]),
)
self.blocks_scaleone4 = nn.Sequential(
BasicBlock(channels[1], channels[1]),
BasicBlock(channels[1], channels[1]),
BasicBlock(channels[1], channels[1]),
BasicBlock(channels[1], channels[1]),
)
self.blocks_scaletwo4 = nn.Sequential(
BasicBlock(channels[2], channels[2]),
BasicBlock(channels[2], channels[2]),
BasicBlock(channels[2], channels[2]),
BasicBlock(channels[2], channels[2]),
)
self.blocks_scalethree4 = nn.Sequential(
BasicBlock(channels[3], channels[3]),
BasicBlock(channels[3], channels[3]),
BasicBlock(channels[3], channels[3]),
BasicBlock(channels[3], channels[3]),
)
def forward(self, x):
x0, x1, x2, x3 = x
x0 = self.blocks_scalezero1(x0)
x1 = self.blocks_scaleone1(x1)
x2 = self.blocks_scaletwo1(x2)
x3 = self.blocks_scalethree1(x3)
scalezero = self.asff_scalezero1(x0, self.upsample_scaleone1_2(x1))
scaleone = self.asff_scaleone1(self.downsample_scalezero1_2(x0), x1)
x0 = self.blocks_scalezero2(scalezero)
x1 = self.blocks_scaleone2(scaleone)
scalezero = self.asff_scalezero2(x0, self.upsample_scaleone2_2(x1), self.upsample_scaletwo2_4(x2))
scaleone = self.asff_scaleone2(self.downsample_scalezero2_2(x0), x1, self.upsample_scaletwo2_2(x2))
scaletwo = self.asff_scaletwo2(self.downsample_scalezero2_4(x0), self.downsample_scaleone2_2(x1), x2)
x0 = self.blocks_scalezero3(scalezero)
x1 = self.blocks_scaleone3(scaleone)
x2 = self.blocks_scaletwo3(scaletwo)
scalezero = self.asff_scalezero3(x0, self.upsample_scaleone3_2(x1), self.upsample_scaletwo3_4(x2), self.upsample_scalethree3_8(x3))
scaleone = self.asff_scaleone3(self.downsample_scalezero3_2(x0), x1, self.upsample_scaletwo3_2(x2), self.upsample_scalethree3_4(x3))
scaletwo = self.asff_scaletwo3(self.downsample_scalezero3_4(x0), self.downsample_scaleone3_2(x1), x2, self.upsample_scalethree3_2(x3))
scalethree = self.asff_scalethree3(self.downsample_scalezero3_8(x0), self.downsample_scaleone3_4(x1), self.downsample_scaletwo3_2(x2), x3)
scalezero = self.blocks_scalezero4(scalezero)
scaleone = self.blocks_scaleone4(scaleone)
scaletwo = self.blocks_scaletwo4(scaletwo)
scalethree = self.blocks_scalethree4(scalethree)
return scalezero, scaleone, scaletwo, scalethree
class AFPN(nn.Module):
def __init__(self,
in_channels=[256, 512, 1024, 2048],
out_channels=128):
super(AFPN, self).__init__()
self.fp16_enabled = False
self.conv0 = BasicConv(in_channels[0], in_channels[0] // 8, 1)
self.conv1 = BasicConv(in_channels[1], in_channels[1] // 8, 1)
self.conv2 = BasicConv(in_channels[2], in_channels[2] // 8, 1)
self.conv3 = BasicConv(in_channels[3], in_channels[3] // 8, 1)
self.body = nn.Sequential(
BlockBody([in_channels[0] // 8, in_channels[1] // 8, in_channels[2] // 8, in_channels[3] // 8])
)
self.conv00 = BasicConv(in_channels[0] // 8, out_channels, 1)
self.conv11 = BasicConv(in_channels[1] // 8, out_channels, 1)
self.conv22 = BasicConv(in_channels[2] // 8, out_channels, 1)
self.conv33 = BasicConv(in_channels[3] // 8, out_channels, 1)
self.conv44 = nn.MaxPool2d(kernel_size=1, stride=2)
# init weight
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight, gain=0.02)
elif isinstance(m, nn.BatchNorm2d):
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
def forward(self, x):
x0, x1, x2, x3 = x
x0 = self.conv0(x0)
x1 = self.conv1(x1)
x2 = self.conv2(x2)
x3 = self.conv3(x3)
out0, out1, out2, out3 = self.body([x0, x1, x2, x3])
out0 = self.conv00(out0)
out1 = self.conv11(out1)
out2 = self.conv22(out2)
out3 = self.conv33(out3)
return out0, out1, out2, out3
class Detect_AFPN(nn.Module):
"""YOLOv8 Detect head for detection models."""
dynamic = False # force grid reconstruction
export = False # export mode
shape = None
anchors = torch.empty(0) # init
strides = torch.empty(0) # init
def __init__(self, nc=80, channel=128, ch=()):
"""Initializes the YOLOv8 detection layer with specified number of classes and channels."""
super().__init__()
self.nc = nc # number of classes
self.nl = len(ch) # number of detection layers
self.reg_max = 16 # DFL channels (ch[0] // 16 to scale 4/8/12/16/20 for n/s/m/l/x)
self.no = nc + self.reg_max * 4 # number of outputs per anchor
self.stride = torch.zeros(self.nl) # strides computed during build
c2, c3 = max((16, ch[0] // 4, self.reg_max * 4)), max(ch[0], min(self.nc, 100)) # channels
self.cv2 = nn.ModuleList(
nn.Sequential(Conv(channel, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch)
self.cv3 = nn.ModuleList(nn.Sequential(Conv(channel, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch)
self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity()
self.AFPN = AFPN(ch)
def forward(self, x):
"""Concatenates and returns predicted bounding boxes and class probabilities."""
x = list(self.AFPN(x))
shape = x[0].shape # BCHW
for i in range(self.nl):
x[i] = torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1)
if self.training:
return x
elif self.dynamic or self.shape != shape:
self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
self.shape = shape
x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2)
if self.export and self.format in ('saved_model', 'pb', 'tflite', 'edgetpu', 'tfjs'): # avoid TF FlexSplitV ops
box = x_cat[:, :self.reg_max * 4]
cls = x_cat[:, self.reg_max * 4:]
else:
box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides
if self.export and self.format in ('tflite', 'edgetpu'):
# Normalize xywh with image size to mitigate quantization error of TFLite integer models as done in YOLOv5:
# https://github.com/ultralytics/yolov5/blob/0c8de3fca4a702f8ff5c435e67f378d1fce70243/models/tf.py#L307-L309
# See this PR for details: https://github.com/ultralytics/ultralytics/pull/1695
img_h = shape[2] * self.stride[0]
img_w = shape[3] * self.stride[0]
img_size = torch.tensor([img_w, img_h, img_w, img_h], device=dbox.device).reshape(1, 4, 1)
dbox /= img_size
y = torch.cat((dbox, cls.sigmoid()), 1)
return y if self.export else (y, x)
def bias_init(self):
"""Initialize Detect() biases, WARNING: requires stride availability."""
m = self # self.model[-1] # Detect() module
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1
# ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency
for a, b, s in zip(m.cv2, m.cv3, m.stride): # from
a[-1].bias.data[:] = 1.0 # box
b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (.01 objects, 80 classes, 640 img)
2.2 步骤二
找到文件'ultralytics/nn/tasks.py'。这里需要修改的地方有点多,总共有7处,但都很简单。首先我们在该文件的头部导入我们AFPN文件中的检测头。
from .modules.afpn import Detect_AFPN
2.3 步骤三
找到下图的代码将检测头添加进去,可以用ctrl+f快捷键搜索Detect然后快速查找。
2.4 步骤四
将检测头添加到如下的代码里。
2.5 修改五
此处在新版本YOLOv8.1之后已经没有了,大家如果是新版本可以跳过此处,老版本照常修改!
2.6 修改六
此处在新版本YOLOv8.1之后已经没有了,大家如果是新版本可以跳过此处,老版本照常修改!
2.7 修改七
同步骤三,四
2.8 修改八
需要在图中位置加一段代码,任意一个if判断之后均可
else:
return 'detect'
2.9 修改九
这个地方,需要添加一个括号,不要直接添加,和下图保持一致。
到此注册成功,复制后面给的yaml文件,然后直接运行即可
AFPN:yaml文件
# Ultralytics YOLO 🚀, AGPL-3.0 license
# YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs
s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs
m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs
l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs
x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs
# YOLOv8.0n backbone
backbone:
# [from, repeats, module, args]
- [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
- [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
- [-1, 3, C2f, [128, True]]
- [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
- [-1, 6, C2f, [256, True]]
- [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
- [-1, 6, C2f, [512, True]]
- [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
- [-1, 3, C2f, [1024, True]]
- [-1, 1, SPPF, [1024, 5]] # 9
# YOLOv8.0n head
head:
- [[2, 4, 6, 9], 1, Detect_AFPN, [nc, 128]] # Detect(P3, P4, P5)
不知不觉已经看完了哦,动动小手留个点赞吧--_--