我谈谈自己的见解:
1. Darknet 本质上是为了 加深网络层数 的同时 便于 人为的改善正向传播的网络结构从而产生的。
加深网络层数很简单, DarkNet([1, 2, 8, 8, 4]) 就是当前 darknet53 拥有的Darknet块。
2. Darknet块实现起来特别简单,总之一句话:特征图在input 和 output 上, shape 没有改变,即 H、W、C。
3. 卷积层权值初始化的时候,m.weight.data.normal_(0, math.sqrt(2. / n)),0是均值, math.sqrt(2./n) 是标准差。由附加内容可知,准备工作就绪,就剩下标准化了。
附:var =Σ [(xi - x̅)^2]/n ; std= sqrt(var)
4. batchnorm层的权重和偏置值如下,根据维度也可以猜出来谁是w ,谁是 b。
它的Code中,将权重设置为1,偏置值设置为0。
m.weight.data.fill_(1)
m.bias.data.zero_()
Code:
import torch
import torch.nn as nn
import math
from collections import OrderedDict
# 基本的darknet块
class BasicBlock(nn.Module): # 64 [32,64]
def __init__(self, inplanes, planes): # resnet block中是 先进行一个1×1卷积 再进行一个3×3卷积
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes[0], kernel_size=1, # 1×1卷积目的是下降通道数
stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes[0]) # 32
self.relu1 = nn.LeakyReLU(0.1)
# 32,64
self.conv2 = nn.Conv2d(planes[0], planes[1], kernel_size=3, # 3×3卷积目的是扩张通道数,注意这里并不减少特征图的大小!!
stride=1, padding=1, bias=False) # 这样做可以帮助减少参数量
self.bn2 = nn.BatchNorm2d(planes[1])
self.relu2 = nn.LeakyReLU(0.1)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out += residual
return out
class DarkNet(nn.Module):
def __init__(self, layers):
super(DarkNet, self).__init__()
self.inplanes = 32
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) # 第一个卷积 3->32
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu1 = nn.LeakyReLU(0.1)
self.layer1 = self._make_layer([32, 64], layers[0]) # 1 # [1, 2, 8, 8, 4]
self.layer2 = self._make_layer([64, 128], layers[1]) # 2
self.layer3 = self._make_layer([128, 256], layers[2]) # 8
self.layer4 = self._make_layer([256, 512], layers[3]) # 8
self.layer5 = self._make_layer([512, 1024], layers[4]) # 4
self.layers_out_filters = [64, 128, 256, 512, 1024]
# 进行权值初始化
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, planes, blocks): # 进行下采样且不断堆叠残差块
layers = [] # [32, 64] 1
# 下采样,步长为2,卷积核大小为3,用于减少特征图尺寸
layers.append(("ds_conv", nn.Conv2d(self.inplanes, planes[1], kernel_size=3,
stride=2, padding=1, bias=False)))
layers.append(("ds_bn", nn.BatchNorm2d(planes[1])))
layers.append(("ds_relu", nn.LeakyReLU(0.1)))
# 加入darknet模块
self.inplanes = planes[1] # 64
for i in range(0, blocks): # 1
layers.append(("residual_{}".format(i), BasicBlock(self.inplanes, planes)))
return nn.Sequential(OrderedDict(layers))
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
out3 = self.layer3(x)
out4 = self.layer4(out3)
out5 = self.layer5(out4)
return out3, out4, out5
# pretrained为权重文件路径
def darknet53(pretrained, **kwargs):
model = DarkNet([1, 2, 8, 8, 4])
if pretrained:
if isinstance(pretrained, str):
model.load_state_dict(torch.load(pretrained))
else:
raise Exception("darknet request a pretrained path. got [{}]".format(pretrained))
return model