YOLOV3网络结构搭建

YOLOV3

一、定义一个残差结构

#   残差结构
#   利用一个1x1卷积下降通道数,然后利用一个3x3卷积提取特征并且上升通道数
#   最后接上一个残差边
#---------------------------------------------------------------------#
class BasicBlock(nn.Module):
    def __init__(self, inplanes, planes):
        super(BasicBlock, self).__init__()
        self.conv1  = nn.Conv2d(inplanes, planes[0], kernel_size=1, stride=1, padding=0, bias=False)
        self.bn1    = nn.BatchNorm2d(planes[0])
        self.relu1  = nn.LeakyReLU(0.1)
        
        self.conv2  = nn.Conv2d(planes[0], planes[1], kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2    = nn.BatchNorm2d(planes[1])
        self.relu2  = nn.LeakyReLU(0.1)

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu1(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu2(out)

        out += residual
        return out

二、Darknet-53 主干模型结构

# 定义DarkNet53模型结构
# 因为最后定义了model = DarkNet([1, 2, 8, 8, 4]),也就是说layers对应列表[1, 2, 8, 8, 4]
class DarkNet(nn.Module):
    def __init__(self, layers):
        super(DarkNet, self).__init__()
        self.inplanes = 32
        # 416,416,3 -> 416,416,32
        self.conv1  = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1    = nn.BatchNorm2d(self.inplanes)
        self.relu1  = nn.LeakyReLU(0.1)

        # _make_layer(self, planes, blocks)里传入了两个参数,planes对应列表[32, 64],
        # blocks对应layers[0],代表残差块的堆叠次数
        # 416,416,32 -> 208,208,64
        self.layer1 = self._make_layer([32, 64], layers[0]) #layers[0]=1
        # 208,208,64 -> 104,104,128
        self.layer2 = self._make_layer([64, 128], layers[1]) #layers[1]=2
        # 104,104,128 -> 52,52,256
        self.layer3 = self._make_layer([128, 256], layers[2]) #layers[2]=8
        # 52,52,256 -> 26,26,512
        self.layer4 = self._make_layer([256, 512], layers[3]) #layers[3]=8
        # 26,26,512 -> 13,13,1024
        self.layer5 = self._make_layer([512, 1024], layers[4]) #layers[4]=4

        self.layers_out_filters = [64, 128, 256, 512, 1024]

        # 进行权值初始化
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    #---------------------------------------------------------------------#
    #   在每一个layer里面,首先利用一个步长为2的3x3卷积进行下采样
    #   然后进行残差结构的堆叠
    #---------------------------------------------------------------------#
    def _make_layer(self, planes, blocks):
        layers = []
        # 下采样,步长为2,卷积核大小为3
        layers.append(("ds_conv", nn.Conv2d(self.inplanes, planes[1], kernel_size=3, stride=2, padding=1, bias=False)))
        layers.append(("ds_bn", nn.BatchNorm2d(planes[1])))
        layers.append(("ds_relu", nn.LeakyReLU(0.1)))
        # 加入残差结构
        self.inplanes = planes[1]
        for i in range(0, blocks): #blocks规定了残差块堆叠的次数
            layers.append(("residual_{}".format(i), BasicBlock(self.inplanes, planes)))
        return nn.Sequential(OrderedDict(layers))

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu1(x)

        x = self.layer1(x)
        x = self.layer2(x)
        out3 = self.layer3(x)
        out4 = self.layer4(out3)
        out5 = self.layer5(out4)

        return out3, out4, out5

def darknet53():
    model = DarkNet([1, 2, 8, 8, 4])
    return model

三、YOLOv3 模型结构

#   卷积块 -> 卷积 + 标准化 + 激活函数
#   Conv2d + BatchNormalization + LeakyReLU
class ConvBNLeaky(nn.Module): #定义一个基本的卷积块
    def __init__(self, filter_in, filter_out, kernel_size, stride=1):
        super(ConvBNLeaky, self).__init__()

        pad = (kernel_size - 1) // 2 if kernel_size else 0
        self.conv = nn.Conv2d(filter_in, filter_out, kernel_size, stride, padding=pad, bias=False)
        self.bn = nn.BatchNorm2d(filter_out)
        self.activation = nn.LeakyReLU(0.1)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.activation(x)
        return x

#------------------------------------------------------------------------#
#   make_last_layers里面一共有七个卷积,前五个可组成ConvBNLeaky×5,用于提取特征
#   后两个卷积用于获得yolo网络的预测结果,相当于预测头
#   filters_list对应一个列表[512, 1024] in_filters对应out_filters[ ]
#   out_filter对应len(anchors_mask[0]) * (num_classes + 5)
#------------------------------------------------------------------------#
def make_last_layers(filters_list, in_filters, out_filter):
    m = nn.Sequential(
        ConvBNLeaky(in_filters, filters_list[0], 1),
        ConvBNLeaky(filters_list[0], filters_list[1], 3),
        ConvBNLeaky(filters_list[1], filters_list[0], 1),
        ConvBNLeaky(filters_list[0], filters_list[1], 3),
        ConvBNLeaky(filters_list[1], filters_list[0], 1),
        ConvBNLeaky(filters_list[0], filters_list[1], 3),
        nn.Conv2d(filters_list[1], out_filter, kernel_size=1, stride=1, padding=0, bias=True)
    )
    return m

class YoloBody(nn.Module):
    def __init__(self, anchors_mask, num_classes, pretrained = False):
        super(YoloBody, self).__init__()
        #---------------------------------------------------#   
        #   生成darknet53的主干模型
        #   获得三个有效特征层,他们的shape分别是:
        #   52,52,256
        #   26,26,512
        #   13,13,1024
        #---------------------------------------------------#
        self.backbone = darknet53() #获取darknet53的结构并存入backbone中
        if pretrained:
            self.backbone.load_state_dict(torch.load("model_data/darknet53_backbone_weights.pth"))

        #---------------------------------------------------#
        #   out_filters : [64, 128, 256, 512, 1024]
        #---------------------------------------------------#
        out_filters = self.backbone.layers_out_filters

        #------------------------------------------------------------------------#
        #   计算yolo_head的输出通道数,对于voc数据集而言
        #   final_out_filter0 = final_out_filter1 = final_out_filter2 = 75
        #------------------------------------------------------------------------#
        self.last_layer0            = make_last_layers([512, 1024], out_filters[-1], len(anchors_mask[0]) * (num_classes + 5))

        self.last_layer1_conv       = ConvBNLeaky(512, 256, 1)
        self.last_layer1_upsample   = nn.Upsample(scale_factor=2, mode='nearest')
        self.last_layer1            = make_last_layers([256, 512], out_filters[-2] + 256, len(anchors_mask[1]) * (num_classes + 5))

        self.last_layer2_conv       = ConvBNLeaky(256, 128, 1)
        self.last_layer2_upsample   = nn.Upsample(scale_factor=2, mode='nearest')
        self.last_layer2            = make_last_layers([128, 256], out_filters[-3] + 128, len(anchors_mask[2]) * (num_classes + 5))

    def forward(self, x):
        #---------------------------------------------------#   
        #   获得三个有效特征层,他们的shape分别是:
        #   52,52,256;26,26,512;13,13,1024
        #   x2, x1, x0对应out3, out4, out5三个预测特征层
        #---------------------------------------------------#
        x2, x1, x0 = self.backbone(x)

        #---------------------------------------------------#
        #   第一个特征层
        #   out0 = (batch_size,255,13,13)
        #---------------------------------------------------#
        # 13,13,1024 -> 13,13,512 -> 13,13,1024 -> 13,13,512 -> 13,13,1024 -> 13,13,512
        out0_branch = self.last_layer0[:5](x0) #x0对应的是13,13,1024,先经过五个卷积层
        out0        = self.last_layer0[5:](out0_branch) #再经过检测头部分,得到大目标预测特征层13*13

        # 13,13,512 -> 13,13,256 -> 26,26,256
        x1_in = self.last_layer1_conv(out0_branch) #经过一个ConvBNLeaky层
        x1_in = self.last_layer1_upsample(x1_in) #再经过一个Upsample上采样层

        # 26,26,256 + 26,26,512 -> 26,26,768
        x1_in = torch.cat([x1_in, x1], 1) # 将x1_in, x1进行拼接
        #---------------------------------------------------#
        #   第二个特征层
        #   out1 = (batch_size,255,26,26)
        #---------------------------------------------------#
        # 26,26,768 -> 26,26,256 -> 26,26,512 -> 26,26,256 -> 26,26,512 -> 26,26,256
        # 768 = out_filters[-2] + 256
        out1_branch = self.last_layer1[:5](x1_in) #经过五个卷积层
        out1        = self.last_layer1[5:](out1_branch) #再经过检测头部分,得到中目标预测特征层26*26

        # 26,26,256 -> 26,26,128 -> 52,52,128
        x2_in = self.last_layer2_conv(out1_branch) #经过一个ConvBNLeaky层
        x2_in = self.last_layer2_upsample(x2_in) #再经过一个Upsample上采样层

        # 52,52,128 + 52,52,256 -> 52,52,384
        x2_in = torch.cat([x2_in, x2], 1) # 将x2_in, x2进行拼接
        #---------------------------------------------------#
        #   第一个特征层
        #   out3 = (batch_size,255,52,52)
        #---------------------------------------------------#
        # 52,52,384 -> 52,52,128 -> 52,52,256 -> 52,52,128 -> 52,52,256 -> 52,52,128
        # 384 = out_filters[-3] + 128
        out2 = self.last_layer2(x2_in) #再经过五个卷积层和两个卷积层(检测头部分),得到小目标预测特征层52*52
        return out0, out1, out2

YOLOV3+SPP


#---------------------------------------------------#
#   卷积块 -> 卷积 + 标准化 + 激活函数
#   Conv2d + BatchNormalization + LeakyReLU
class ConvBNLeaky(nn.Module): #定义一个基本的卷积块
    def __init__(self, filter_in, filter_out, kernel_size, stride=1):
        super(ConvBNLeaky, self).__init__()

        pad = (kernel_size - 1) // 2 if kernel_size else 0
        self.conv = nn.Conv2d(filter_in, filter_out, kernel_size, stride, padding=pad, bias=False)
        self.bn = nn.BatchNorm2d(filter_out)
        self.activation = nn.LeakyReLU(0.1)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.activation(x)
        return x


#---------------------------------------------------#
#   SPP结构,利用不同大小的池化核进行池化 5*5 9*9 13*13
#   先构建kernel_size=5, stride=1, padding=2的最大池化层
#   再构建kernel_size=9, stride=1, padding=4的最大池化层
#   再构建kernel_size=13, stride=1, padding=6的最大池化层
#   池化后堆叠
#---------------------------------------------------#
class SpatialPyramidPooling(nn.Module):
    def __init__(self, pool_sizes=[5, 9, 13]):
        super(SpatialPyramidPooling, self).__init__()

        self.maxpools = nn.ModuleList([nn.MaxPool2d(kernel_size=pool_size, stride=1, padding=pool_size//2) for pool_size in pool_sizes])

    def forward(self, x):
        features = [maxpool(x) for maxpool in self.maxpools[::-1]]
        features = torch.cat(features + [x], dim=1) # x指的是未经过最大池化的层

        return features

#------------------------------------------------------------------------#
#   make_last_layers里面一共有七个卷积,前五个可组成ConvBNLeaky×5,用于提取特征
#   后两个卷积用于获得yolo网络的预测结果,相当于预测头
#   filters_list对应一个列表[512, 1024] in_filters对应out_filters[ ]
#   out_filter对应len(anchors_mask[0]) * (num_classes + 5)
#------------------------------------------------------------------------#
def make_last_layers(filters_list, in_filters, out_filter):
    m = nn.Sequential(
        ConvBNLeaky(in_filters, filters_list[0], 1),
        ConvBNLeaky(filters_list[0], filters_list[1], 3),
        ConvBNLeaky(filters_list[1], filters_list[0], 1),
        ConvBNLeaky(filters_list[0], filters_list[1], 3),
        ConvBNLeaky(filters_list[1], filters_list[0], 1),
        ConvBNLeaky(filters_list[0], filters_list[1], 3),
        nn.Conv2d(filters_list[1], out_filter, kernel_size=1, stride=1, padding=0, bias=True)
    )
    return m


# 加入了SPP结构
class YoloBody(nn.Module):
    def __init__(self, anchors_mask, num_classes, pretrained = False):
        super(YoloBody, self).__init__()
        #---------------------------------------------------#   
        #   生成darknet53的主干模型
        #   获得三个有效特征层,他们的shape分别是:
        #   52,52,256
        #   26,26,512
        #   13,13,1024
        #---------------------------------------------------#
        self.backbone = darknet53() #获取darknet53的结构并存入backbone中
        if pretrained:
            self.backbone.load_state_dict(torch.load("model_data/darknet53_backbone_weights.pth"))

        #---------------------------------------------------#
        #   out_filters : [64, 128, 256, 512, 1024]
        #---------------------------------------------------#
        out_filters = self.backbone.layers_out_filters

        self.last_layer0_conv = ConvBNLeaky(1024, 512, 1)
        self.last_layer01_conv = ConvBNLeaky(512, 1024, 3)
        self.last_layer02_conv = ConvBNLeaky(1024, 512, 1)
        self.SPP = SpatialPyramidPooling()
        self.last_layer03_conv = ConvBNLeaky(2048, 512, 1)
        self.last_layer04_conv = ConvBNLeaky(512, 1024, 3)

        #------------------------------------------------------------------------#
        #   计算yolo_head的输出通道数,对于voc数据集而言
        #   final_out_filter0 = final_out_filter1 = final_out_filter2 = 75
        #------------------------------------------------------------------------#
        self.last_layer0            = make_last_layers([512, 1024], out_filters[-1], len(anchors_mask[0]) * (num_classes + 5))
        self.last_layer1_conv       = ConvBNLeaky(512, 256, 1)
        self.last_layer1_upsample   = nn.Upsample(scale_factor=2, mode='nearest')
        self.last_layer1            = make_last_layers([256, 512], out_filters[-2] + 256, len(anchors_mask[1]) * (num_classes + 5))

        self.last_layer2_conv       = ConvBNLeaky(256, 128, 1)
        self.last_layer2_upsample   = nn.Upsample(scale_factor=2, mode='nearest')
        self.last_layer2            = make_last_layers([128, 256], out_filters[-3] + 128, len(anchors_mask[2]) * (num_classes + 5))

    def forward(self, x):
        #---------------------------------------------------#   
        #   获得三个有效特征层,他们的shape分别是:
        #   52,52,256;26,26,512;13,13,1024
        #   x2, x1, x0对应out3, out4, out5三个预测特征层
        #---------------------------------------------------#
        x2, x1, x0 = self.backbone(x)

        x0 = self.last_layer0_conv(x0)
        x0 = self.last_layer01_conv(x0)
        x0 = self.last_layer02_conv(x0)
        x0 = self.SPP(x0)
        x0 = self.last_layer03_conv(x0)
        x0 = self.last_layer04_conv(x0)

        #---------------------------------------------------#
        #   第一个特征层
        #   out0 = (batch_size,255,13,13)
        #---------------------------------------------------#
        # 13,13,1024 -> 13,13,512 -> 13,13,1024 -> 13,13,512 -> 13,13,1024 -> 13,13,512
        out0_branch = self.last_layer02_conv(x0)
        #out0_branch = self.last_layer0[:5](x0) #x0对应的是13,13,1024,先经过五个卷积层
        out0        = self.last_layer0[5:](out0_branch) #再经过检测头部分,得到大目标预测特征层13*13

        # 13,13,512 -> 13,13,256 -> 26,26,256
        x1_in = self.last_layer1_conv(out0_branch) #经过一个ConvBNLeaky层
        x1_in = self.last_layer1_upsample(x1_in) #再经过一个Upsample上采样层

        # 26,26,256 + 26,26,512 -> 26,26,768
        x1_in = torch.cat([x1_in, x1], 1) # 将x1_in, x1进行拼接
        #---------------------------------------------------#
        #   第二个特征层
        #   out1 = (batch_size,255,26,26)
        #---------------------------------------------------#
        # 26,26,768 -> 26,26,256 -> 26,26,512 -> 26,26,256 -> 26,26,512 -> 26,26,256
        # 768 = out_filters[-2] + 256
        out1_branch = self.last_layer1[:5](x1_in) #经过五个卷积层
        out1        = self.last_layer1[5:](out1_branch) #再经过检测头部分,得到中目标预测特征层26*26

        # 26,26,256 -> 26,26,128 -> 52,52,128
        x2_in = self.last_layer2_conv(out1_branch) #经过一个ConvBNLeaky层
        x2_in = self.last_layer2_upsample(x2_in) #再经过一个Upsample上采样层

        # 52,52,128 + 52,52,256 -> 52,52,384
        x2_in = torch.cat([x2_in, x2], 1) # 将x2_in, x2进行拼接
        #---------------------------------------------------#
        #   第一个特征层
        #   out3 = (batch_size,255,52,52)
        #---------------------------------------------------#
        # 52,52,384 -> 52,52,128 -> 52,52,256 -> 52,52,128 -> 52,52,256 -> 52,52,128
        # 384 = out_filters[-3] + 128
        out2 = self.last_layer2(x2_in) #再经过五个卷积层和两个卷积层(检测头部分),得到小目标预测特征层52*52
        return out0, out1, out2

reference

YOLO v3网络结构分析_太阳花的小绿豆的博客-CSDN博客_yolov3网络结构

Pytorch 搭建自己的YOLO3目标检测平台(Bubbliiiing 深度学习 教程)_哔哩哔哩_bilibili

  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值