yolov5添加ASFF

该ASFF只适用于4个检测头的,也就是在基础的三个检测头上添加一个小目标层检测头

参考文章:

【Yolov5】Yolov5同时添加ASFF与新的检测层-CSDN博客

1、在common.py添加如下代码

def add_conv(in_ch, out_ch, ksize, stride, leaky=True):
    """
    Add a conv2d / batchnorm / leaky ReLU block.
    Args:
        in_ch (int): number of input channels of the convolution layer.
        out_ch (int): number of output channels of the convolution layer.
        ksize (int): kernel size of the convolution layer.
        stride (int): stride of the convolution layer.
    Returns:
        stage (Sequential) : Sequential layers composing a convolution block.
    """
    stage = nn.Sequential()
    pad = (ksize - 1) // 2
    stage.add_module('conv', nn.Conv2d(in_channels=in_ch,
                                       out_channels=out_ch, kernel_size=ksize, stride=stride,
                                       padding=pad, bias=False))
    stage.add_module('batch_norm', nn.BatchNorm2d(out_ch))
    if leaky:
        stage.add_module('leaky', nn.LeakyReLU(0.1))
    else:
        stage.add_module('relu6', nn.ReLU6(inplace=True))
    return stage


class ASFF_4L(nn.Module):
    def __init__(self, level, rfb=False, vis=False):
        super(ASFF_4L, self).__init__()
        self.level = level
        # 特征金字塔从上到下三层的channel数
        # 对应特征图大小(以640*640输入为例)分别为20*20, 40*40, 80*80
        self.dim = [512, 256, 128, 64]
        self.inter_dim = self.dim[self.level]
        if level==0: # 特征图20*20的一层,channel数512
            self.stride_level_1 = add_conv(256, self.inter_dim, 3, 2)
            self.stride_level_2 = add_conv(128, self.inter_dim, 3, 2)
            self.stride_level_3 = add_conv(64, self.inter_dim, 3, 2)
            self.expand = add_conv(self.inter_dim, 512, 3, 1)
        elif level==1: # 特征图40*40的一层,channel数256
            self.compress_level_0 = add_conv(512, self.inter_dim, 1, 1)
            self.stride_level_2 = add_conv(128, self.inter_dim, 3, 2)
            self.stride_level_3 = add_conv(64, self.inter_dim, 3, 2)
            self.expand = add_conv(self.inter_dim, 256, 3, 1)
        elif level==2: # 特征图80*80的一层,channel数128
            self.compress_level_0 = add_conv(512, self.inter_dim, 1, 1)
            self.compress_level_1 = add_conv(256, self.inter_dim, 1, 1)
            self.stride_level_3 = add_conv(64, self.inter_dim, 3, 2)
            self.expand = add_conv(self.inter_dim, 128, 3, 1)
        elif level==3: # 特征图160*160的一层,channel数64
            self.compress_level_0 = add_conv(512, self.inter_dim, 1, 1)
            self.compress_level_1 = add_conv(256, self.inter_dim, 1, 1)
            self.compress_level_2 = add_conv(128, self.inter_dim, 1, 1)
            self.expand = add_conv(self.inter_dim, 64, 3, 1)


        compress_c = 8 if rfb else 16  #when adding rfb, we use half number of channels to save memory

        self.weight_level_0 = add_conv(self.inter_dim, compress_c, 1, 1)
        self.weight_level_1 = add_conv(self.inter_dim, compress_c, 1, 1)
        self.weight_level_2 = add_conv(self.inter_dim, compress_c, 1, 1)
        self.weight_level_3 = add_conv(self.inter_dim, compress_c, 1, 1)

        self.weight_levels = nn.Conv2d(compress_c*4, 4, kernel_size=1, stride=1, padding=0)
        self.vis= vis


    def forward(self, x_level_0, x_level_1, x_level_2, x_level_3):
        if self.level==0: # 20*20
            level_0_resized = x_level_0 # 原特征图

            level_1_resized = self.stride_level_1(x_level_1) # 卷积后自然缩小

            level_2_downsampled_inter =F.max_pool2d(x_level_2, 3, stride=2, padding=1) # 尺寸缩小
            level_2_resized = self.stride_level_2(level_2_downsampled_inter) # 尺寸缩小同时调整通道

            level_3_downsampled_inter =F.max_pool2d(x_level_3, 5, stride=4, padding=2)
            level_3_resized = self.stride_level_3(level_3_downsampled_inter)

        elif self.level==1: # 40*40
            level_0_compressed = self.compress_level_0(x_level_0) # 通道压缩
            level_0_resized =F.interpolate(level_0_compressed, scale_factor=2, mode='nearest') # 放大第一层特征图

            level_1_resized =x_level_1 # 原特征图

            level_2_resized =self.stride_level_2(x_level_2) # 尺寸缩小同时调整通道

            level_3_downsampled_inter =F.max_pool2d(x_level_3, 3, stride=2, padding=1) # 缩小
            level_3_resized = self.stride_level_3(level_3_downsampled_inter)

        elif self.level==2: # 80*80
            level_0_compressed = self.compress_level_0(x_level_0)
            level_0_resized =F.interpolate(level_0_compressed, scale_factor=4, mode='nearest') # 放大第一层特征图

            level_1_compressed = self.compress_level_1(x_level_1)
            level_1_resized =F.interpolate(level_1_compressed, scale_factor=2, mode='nearest') # 放大第二层特征图

            level_2_resized =x_level_2

            level_3_resized = self.stride_level_3(x_level_3)

        elif self.level==3: # 160*160
            level_0_compressed = self.compress_level_0(x_level_0)
            level_0_resized =F.interpolate(level_0_compressed, scale_factor=8, mode='nearest') # 放大第一层特征图

            level_1_compressed = self.compress_level_1(x_level_1)
            level_1_resized =F.interpolate(level_1_compressed, scale_factor=4, mode='nearest') # 放大第二层特征图

            level_2_compressed = self.compress_level_2(x_level_2)
            level_2_resized =F.interpolate(level_2_compressed, scale_factor=2, mode='nearest') # 放大第三层特征图

            level_3_resized =x_level_3

        level_0_weight_v = self.weight_level_0(level_0_resized)
        level_1_weight_v = self.weight_level_1(level_1_resized)
        level_2_weight_v = self.weight_level_2(level_2_resized)
        level_3_weight_v = self.weight_level_3(level_3_resized)
        levels_weight_v = torch.cat((level_0_weight_v, level_1_weight_v, level_2_weight_v, level_3_weight_v),1)
        levels_weight = self.weight_levels(levels_weight_v)
        levels_weight = F.softmax(levels_weight, dim=1)

        fused_out_reduced = level_0_resized * levels_weight[:,0:1,:,:]+\
                            level_1_resized * levels_weight[:,1:2,:,:]+\
                            level_2_resized * levels_weight[:,2:3,:,:]+\
                            level_3_resized * levels_weight[:,3:,:,:]

        out = self.expand(fused_out_reduced)

        if self.vis:
            return out, levels_weight, fused_out_reduced.sum(dim=1)
        else:
            return out

2、在yolo.py文件下的detect类下面添加如下代码:

class ASFF_4L_Detect(Detect):
    # ASFF model for improvement
    def __init__(self, nc=80, anchors=(), ch=(), inplace=True):  # detection layer
        super().__init__(nc, anchors, ch, inplace)
        self.nl = len(anchors)
        self.asffs = nn.ModuleList(ASFF_4L(i) for i in range(self.nl))
        self.detect = Detect.forward

    def forward(self, x): # x中的特征图从大到小,与ASFF_4L中顺序相反,因此输入前先反向
        x = x[::-1]
        for i in range(self.nl):
            x[i] = self.asffs[i](*x)
        return self.detect(self, x[::-1])

添加位置: 

f90cdbfe9e314eca9f94ab89ace27255.png 

 注意是在图中detect类的下面添加,不是在detect中添加,添加后如图:

a2c8ea22d2cb4b13a426a7c3d7b351c9.png

3、在yolo.py文件中找到所有出现Detect, Segment的地方,在其后加上:ASFF_4L_Detect

cd35f75c97f54c628703dfd4aa181dc2.png

4、创建一个yaml文件,将最后的detect层改为 ASFF_4L_Detect,例如:

606fc85df9ee42679fae63a0c66937d0.png

改为:

d8e2ad156dda4a19a694193fa9a98aec.png

 

 

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值