def squeezenet1_0(pretrained=False, progress=True, **kwargs):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _squeezenet('1_0', pretrained, progress, **kwargs)
return _squeezenet('1_1', pretrained, progress, **kwargs)
def _squeezenet(version, pretrained, progress, **kwargs):
model = SqueezeNet(version, **kwargs)
if pretrained:
arch = 'squeezenet' + version
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
class SqueezeNet(nn.Module):
def __init__(self, version='1_0', num_classes=1000):
super(SqueezeNet, self).__init__()
self.num_classes = num_classes
if version == '1_0':
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2), # 7x7卷积
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), # 最大池化
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), # 最大池化
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), # 最大池化
Fire(512, 64, 256, 256),
)
elif version == '1_1':
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2), # 3x3卷积
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), # 最大池化
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), # 最大池化
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), # 最大池化
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
else:
# FIXME: Is this needed? SqueezeNet should only be called from the
# FIXME: squeezenet1_x() functions
# FIXME: This checking is not done for the other models
raise ValueError("Unsupported SqueezeNet version {version}:"
"1_0 or 1_1 expected".format(version=version))
# Final convolution is initialized differently from the rest
final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1) # 1x1卷积
self.classifier = nn.Sequential(
nn.Dropout(p=0.5), # dropout层
final_conv,
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d((1, 1)) #自适应均值池化,输出大小为(1,1)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m is final_conv:
init.normal_(m.weight, mean=0.0, std=0.01)
else:
init.kaiming_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return torch.flatten(x, 1)
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes,
expand1x1_planes, expand3x3_planes):
# inplanes=96, squeeze_planes=16, expand1x1_planes=64, expand3x3_planes=64
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) # 1x1卷积
self.squeeze_activation = nn.ReLU(inplace=True) # 激活函数
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
kernel_size=1) # 1x1卷积
self.expand1x1_activation = nn.ReLU(inplace=True) # 激活函数
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
kernel_size=3, padding=1) # 1x1卷积
self.expand3x3_activation = nn.ReLU(inplace=True) # 激活函数
def forward(self, x):
x = self.squeeze_activation(self.squeeze(x)) # 1x1卷积计算
return torch.cat([
self.expand1x1_activation(self.expand1x1(x)), # 再1x1卷积计算
self.expand3x3_activation(self.expand3x3(x)) # 并着3x3卷积计算
], 1) # 按通道数合并