LeNet
5x5 Conv(6),pad 2:意为 5x5的卷积核,输出通道为6,填充为2,由(f-1)/2 == 2可知,为same填充,即输入输出大小不变
速记:两卷平,三接
class Reshape(torch.nn.Module):
def forward(self, x):
return x.reshape(-1, 1, 28, 28)
net = torch.nn.Sequential(
Reshape(),
nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Flatten(),
nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),
nn.Linear(120, 84), nn.Sigmoid(),
nn.Linear(84, 10)
)
AlexNet
速记:两卷最池,三卷最池,三接
net = nn.Sequential(
# 这里,我们使用一个11*11的更大窗口来捕捉对象。
# 同时,步幅为4,以减少输出的高度和宽度。
# 另外,输出通道的数目远大于LeNet
nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2),
# 减小卷积窗口,使用填充为2来使得输入与输出的高和宽一致,且增大输出通道数
nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2),
# 使用三个连续的卷积层和较小的卷积窗口。
# 除了最后的卷积层,输出通道的数量进一步增加。
# 在前两个卷积层之后,池化层不用于减少输入的高度和宽度
nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),
nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),
nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2), nn.Flatten(),
# 这里,全连接层的输出数量是LeNet中的好几倍。使用dropout层来减轻过度拟合
nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),
nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),
# 最后是输出层。由于这里使用Fashion-MNIST,所以用类别数为10,而非论文中的1000
nn.Linear(4096, 10)
)
VGG
VGG使用可重复使用的
卷积块
来构建深度卷积神经网络
不同的卷积块个数和超参数可以得到不同复杂度的变种
n卷最池,n块,三接
def vgg_block(num_convs, in_channel, out_channel):
layer = []
for _ in range(num_convs):
layer.append(nn.Conv2d(in_channel, out_channel, kernel_size=3, padding=1))
layer.append(nn.ReLU())
in_channel = out_channel
layer.append(nn.MaxPool2d(2, stride=2))
return nn.Sequential(*layer)
conv_arch = ((1, 64), (1, 128), (2, 256), (2, 512), (2, 512))
def vgg(conv_arch):
in_channel = 1
conv_a = []
for num, out_channel in conv_arch:
conv_a.append(vgg_block(num, in_channel, out_channel))
in_channel = out_channel
return nn.Sequential(*conv_a, nn.Flatten(),
nn.Linear(out_channel * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5),
nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5),
nn.Linear(4096, 10))