VGG的设计思路就是比AlexNet更深更大。
VGG块就是封装了AlexNet的三个卷积层加上一个池化层
VGG可以理解为一个更大更深的AlexNet
使用块的网络
import torch
from torch import nn
from d2l import torch as d2l
def vgg_block(num_convs,in_channels,out_channels):
layers=[]
for _ in range(num_convs):
layers.append(nn.Conv2d(
in_channels,out_channels,kernel_size=3,padding=1
))
layers.append(nn.ReLU())
in_channels = out_channels
layers.append(nn.MaxPool2d(kernel_size=2,stride=2))
return nn.Sequential(*layers)
VGG网络
conv_arch = ((1,64),(1,128),(2,256),(2,512),(2,512))
def vgg(conv_arch):
conv_blks=[]
in_channels = 1
for (num_convs,out_channels) in conv_arch:
conv_blks.append(vgg_block(num_convs,in_channels,out_channels))
in_channels = out_channels
return nn.Sequential(
*conv_blks,nn.Flatten(),
nn.Linear(out_channels * 7 * 7,4096),nn.ReLU(),
nn.Dropout(0.5),nn.Linear(4096,4096),nn.ReLU(),
nn.Dropout(0.5),nn.Linear(4096,10))
net = vgg(conv_arch)
x = torch.randn(size=(1,1,224,224))
for blk in net:
x = blk(x)
print(blk.__class__.__name__,'output shape:\t',x.shape)
模型训练
lr,num_epochs,batch_size = 0.05,10,128
train_iter,test_iter = d2l.load_data_fashion_mnist(batch_size,resize=224)
d2l.train_ch6(net,train_iter,test_iter,num_epochs,lr,d2l.try_gpu())