【复现】DarkNet53

import torch
import torch.nn.modules as nn
import torch.nn.functional as F

class Darknetconv2D_BN_Leaky(nn.Module):
    def __init__(self, in_channels, out_channels, kernal_size, stride, padding):
        super(Darknetconv2D_BN_Leaky, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernal_size, stride, padding),
            nn.BatchNorm2d(out_channels),
            nn.LeakyReLU(0.1)
        )

    def forward(self, x):
        return self.conv(x)


class ResidualLayer(nn.Module):
    def __init__(self, in_channels):
        super(ResidualLayer, self).__init__()
        self.conv1 = Darknetconv2D_BN_Leaky(in_channels, in_channels//2, kernal_size=1, stride=1, padding=0)
        self.conv2 = Darknetconv2D_BN_Leaky(in_channels//2, in_channels, kernal_size=3, stride=1, padding=1)
        self.residual = nn.Sequential(
            self.conv1,
            self.conv2
        )

    def forward(self, x):
        return x + self.residual(x)

class ResidualLayerBlock(nn.Module):
    def __init__(self, lay_num, in_channels):
        '''
        :param in_channels:一组残差块的输入通道数
        :param lay_num: 残差块的个数
        '''
        super(ResidualLayerBlock, self).__init__()
        self.convs = []
        for _ in range(lay_num):
            self.convs.append(ResidualLayer(in_channels))
        self.residualLayerBlock = nn.Sequential(*self.convs)

    def forward(self, x):
        return self.residualLayerBlock(x)

class upSampleLayer(nn.Module):
    def __init__(self):
        super(upSampleLayer, self).__init__()

    def forward(self, x):
        return F.interpolate(x, scale_factor=2)

class darkNet53(nn.Module):
    def __init__(self, num_class=1000):
        super(darkNet53, self).__init__()
        self.cbl1 = Darknetconv2D_BN_Leaky(in_channels=3, out_channels=32, kernal_size=3, stride=1, padding=1)
        self.conv1 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1)
        self.res1 = ResidualLayer(64)
        self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1)
        self.res2 = ResidualLayerBlock(lay_num=2, in_channels=128)
        self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1)
        self.res3 = ResidualLayerBlock(lay_num=8, in_channels=256)
        self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1)
        self.res4 = ResidualLayerBlock(lay_num=8, in_channels=512)
        self.conv5 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, stride=2, padding=1)
        self.res5 = ResidualLayerBlock(lay_num=4, in_channels=1024)

        self.mid_result1 = nn.Sequential(self.cbl1, self.conv1, self.res1, self.conv2, self.res2,self.conv3, self.res3)
        self.mid_result2 = nn.Sequential(self.mid_result1, self.conv4, self.res4)
        self.mid_result3 = nn.Sequential(self.mid_result2, self.conv5, self.res5)

    #     特征图之后的处理
        self.avg_pool = nn.AvgPool2d(kernel_size=8, stride=1)
        self.linear = nn.Linear(in_features=1024, out_features=num_class)
        self.softmax = nn.Softmax(dim=1)


    def forward(self, x):
        x = self.mid_result3(self.mid_result2(self.mid_result1(x)))
        x = self.avg_pool(x)
        x = x.view(x.size(0), -1)
        out = self.softmax(self.linear(x))
        return out

if __name__ == '__main__':
    model = darkNet53()
    print(model)








各层参数,图源网络
各层参数,图源网络
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值