● 难度:夯实基础⭐⭐
● 语言:Python3、Pytorch3
🍨 本文为🔗365天深度学习训练营 中的学习记录博客
- 🍖 原作者:K同学啊|接辅导、项目定制
目录
一、 前期准备
1. 设置GPU
如果设备上支持GPU就使用GPU,否则使用CPU
2. 导入数据
本地数据集位于./data/4-data/目录下
3. 显示图片信息
二、搭建Resnet-50模型
1.模型
见总结。
2.代码
class BN_Conv2d(nn.Module):
"""
BN_CONV_RELU
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False):
super(BN_Conv2d, self).__init__()
self.seq = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
return F.relu(self.seq(x))
class ResNeXt_Block(nn.Module):
"""
ResNeXt block with group convolutions
"""
def __init__(self, in_chnls, cardinality, group_depth, stride):
super(ResNeXt_Block, self).__init__()
self.group_chnls = cardinality * group_depth
self.conv1 = BN_Conv2d(in_chnls, self.group_chnls, 1, stride=1, padding=0)
self.conv2 = BN_Conv2d(self.group_chnls, self.group_chnls, 3, stride=stride, padding=1, groups=cardinality)
self.conv3 = nn.Conv2d(self.group_chnls, self.group_chnls*2, 1, stride=1, padding=0)
self.bn = nn.BatchNorm2d(self.group_chnls*2)
self.short_cut = nn.Sequential(
nn.Conv2d(in_chnls, self.group_chnls*2, 1, stride, 0, bias=False),
nn.BatchNorm2d(self.group_chnls*2)
)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.bn(self.conv3(out))
out += self.short_cut(x)
return F.relu(out)
class ResNeXt(nn.Module):
"""
ResNeXt
"""
def __init__(self, layers: object, cardinality, group_depth, num_classes) -> object:
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.channels = 64
self.conv1 = BN_Conv2d(3, self.channels, 7, stride=2, padding=3)
d1 = group_depth
self.conv2 = self.___make_layers(d1, layers[0], stride=1)
d2 = d1 * 2
self.conv3 = self.___make_layers(d2, layers[1], stride=2)
d3 = d2 * 2
self.conv4 = self.___make_layers(d3, layers[2], stride=2)
d4 = d3 * 2
self.conv5 = self.___make_layers(d4, layers[3], stride=2)
self.fc = nn.Linear(self.channels, num_classes) # 224x224 input size
def ___make_layers(self, d, blocks, stride):
strides = [stride] + [1] * (blocks-1)
layers = []
for stride in strides:
layers.append(ResNeXt_Block(self.channels, self.cardinality, d, stride))
self.channels = self.cardinality*d*2
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = F.max_pool2d(out, 3, 2, 1)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
out = self.conv5(out)
out = F.avg_pool2d(out, 7)
out = out.view(out.size(0), -1)
out = F.softmax(self.fc(out),dim=1)
return out
模型参数量统计:
三、训练模型
总结
ResNeXt-50网络简单讲就是在ResNet结构的基础上采用了聚合残差结构和局部连接结构,同时引入了Random Erasing和Mixup等数据增强和正则化方法。
1.ResNeXt-50和ResNet区别
-
ResNet 是256个通道的特征经过1×1卷积压缩4倍到64个通道,之后3×3的卷积核用于处理特征,经1×1卷积扩大通道数与原特征残差连接后输出。
-
ResNeXt 把256个通道的特征被分为32个组,每组被压缩64倍到4个通道后进行处理。32个组相加后与原特征残差连接后输出。
-
这不就是以前万教授的“分批同化”嘛,目的都是降低计算量,降低计算时候的巨型矩阵大小。
-
ResNeXt将特征图分为不同的组,再对每组特征图分别进行卷积,again, 和分批同化一个道理,一个是分特征,一个是分尺度。
2.普通卷积和分组卷积对比
分组卷积将特征图分为不同的组,再对每组特征图进行分别卷积。
3.ResNeXt-50和ResNet区别效果对比
参考
Aggregated Residual Transformations for Deep Neural Networks