DenseNet
原文:
参考文章:
代码实现
网络结构
优点
- 减轻了 vanishing-gradient(梯度消失)
- 相当于对每一层的 feature 都使用了 deep supervision
- 加强了 feature 的传递
- 更有效地利用了 feature
- 一定程度上减少了参数数量
特征重用(Feature Reuse)
- 一些较早层提取出的特征仍可被较深层直接使用
- 即使是Transition layer也会使用到之前Denseblock中所有层的特征
- 第2-3个Denseblock中的层对之前Transition layer利用率很低,说明transition layer输出大量冗余特征。这也为DenseNet-BC提供了证据支持,既Compression的必要性。
- 最后的分类层虽然使用了之前Denseblock中的多层信息,但更偏向于使用最后几个feature map。说明在网络的最后几层,某些high-level的特征可能被产生.
补充
为什么文中 conv 是指 BN-ReLU-Conv,而不是 Conv-BN-ReLU ?
参考 resnet v1 和 resnet v2 的区别:
详解深度学习之经典网络架构(六):ResNet 两代(ResNet v1和ResNet v2)
代码细节
参考代码: densenet-pytorch
网络结构如下图:
使用如下命令构建 depth=100,growth_rate=12,具有3个DenseBlock的 DenseNet-BC 网络
model = DenseNet3(depth=100, num_classes=10, growth_rate=12, reduction=0.5, bottleneck=True, dropRate=0)
BottleneckBlock
根据原文,设置 1x1 卷积输出的 feature maps 数量为 4k(k = growth_rate),然后将每次经过 bottleneck 层 1x1 和 3x3 卷积运算得到的 feature maps 与该模块输入的原始 feature maps 进行 concat,从而实现 Dense 连接方式。
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BottleneckBlock, self).__init__()
inter_planes = out_planes * 4
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, inter_planes, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(inter_planes)
self.conv2 = nn.Conv2d(inter_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
out = self.conv2(self.relu(self.bn2(out)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return torch.cat([x, out], 1)
TransitionBlock
因为每个 DenseBlock 结束后输出的 channel 个数很多(in_planes+n*growth_rate),使用 1x1 卷积减少 feature maps 数量。默认 reduction=0.5,此时传给下一个 DenseBlock 的时候 feature maps 数量就会减少一半
class TransitionBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(TransitionBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return F.avg_pool2d(out, 2)
DenseBlock
实现单个 DenseBlock
class DenseBlock(nn.Module):
def __init__(self, nb_layers, in_planes, growth_rate, block, dropRate=0.0):
# DenseBlock(nb_layers=32, in_planes=24, growth_rate=12, block=BottleneckBlock, dropRate=0.0)
super(DenseBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, growth_rate, nb_layers, dropRate)
def _make_layer(self, block, in_planes, growth_rate, nb_layers, dropRate):
# _make_layer(block=BottleneckBlock, in_planes=24, growth_rate=12, nb_layers=32, dropRate=0)
layers = []
for i in range(nb_layers):
layers.append(block(in_planes+i*growth_rate, growth_rate, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
DenseNet
在该 DenseNet 的实现中,共有三个 DenseBlock,每个 DenseBlock 均由 32 个 BottleneckBlock 组成。
class DenseNet3(nn.Module):
def __init__(self, depth, num_classes, growth_rate=12,
reduction=0.5, bottleneck=True, dropRate=0.0):
super(DenseNet3, self).__init__()
in_planes = 2 * growth_rate # 24
n = (depth - 4) / 3 # 32
if bottleneck == True:
n = n/2
block = BottleneckBlock
else:
block = BasicBlock
n = int(n)
# 1st conv before any dense block 第一层卷积没有使用7x7, s=2的卷积核,且没有进行max pooling
self.conv1 = nn.Conv2d(3, in_planes, kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes+n*growth_rate)
self.trans1 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes*reduction))
# 2nd block
self.block2 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes+n*growth_rate)
self.trans2 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes*reduction))
# 3rd block
self.block3 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes+n*growth_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(in_planes, num_classes)
self.in_planes = in_planes
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.in_planes)
return self.fc(out)