本质是NiN块,用1*1的卷积网络取代全连接层
import torch
from torch import nn
from d2l import torch as d2l
#显卡
print(torch.cuda.device_count())
print(torch.cuda.is_available())
print(torch.backends.cudnn.is_available())
print(torch.cuda_version)
print(torch.backends.cudnn.version())
1
True
True
11.3
8200
网络架构
def nin_block(in_channels, out_channels, kernel_size, strides, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding),
# 自定义的kernel和padding和stride都用在第一个卷积层上面
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(),
# ReLU激活后,kernel=1,那stride和padding默认也等于1,这不会改变通道数,所以输入和输出通道数相同
nn.Conv2d(out_channels, out_channels, kernel_size=1),
# 再加一层和上面一样的卷积
nn.ReLU()
# 最后再加一层激活函数
)
NiN模型
net = nn.Sequential(
nin_block(1, 96, kernel_size=11, strides=4, padding=0),
#输入是1因为是灰度图,96是AlexNet里的96,这里是把AlexNet的卷积层搬过来,后面加了1*1的卷积层
nn.MaxPool2d(3, stride=2),
#高宽减半
nin_block(96, 256, kernel_size=5, strides=1, padding=2),
#通道增加到256
nn.MaxPool2d(3, stride=2),
nin_block(256, 384, kernel_size=3, strides=1, padding=1),
#这里没在后面加2个卷积层
nn.MaxPool2d(3, stride=2),
nn.Dropout(0.5),
nin_block(384, 10, kernel_size=3, strides=1, padding=1),
#把通道数降到10,因为Fashion-MNIST的类别是10,根据自己的需求改变输出的通道数
nn.AdaptiveAvgPool2d((1, 1)),
#全局平均池化层,1*1是高宽都会变成1
nn.Flatten()
#把最后两个维度flatten,直接可以用去softmax
)
查看每个块的输出形状
X = torch.rand(size=(1,1,224,224))
for layer in net:
X = layer(X) #之前漏掉了这一行,导致所有层都是224*224
#print(layer.__class__.__name__,'output shape:\t', X.shape)
Sequential output shape: torch.Size([1, 96, 54, 54]) 输入是224*224,第一个人NIN block后,直接把高宽变成了54*54 MaxPool2d output shape: torch.Size([1, 96, 26, 26]) Sequential output shape: torch.Size([1, 256, 26, 26]) 第二个NIN BLOCK的输出是26*26 MaxPool2d output shape: torch.Size([1, 256, 12, 12]) 用maxpool不改变通道数,但是高宽减半 Sequential output shape: torch.Size([1, 384, 12, 12]) MaxPool2d output shape: torch.Size([1, 384, 5, 5]) Dropout output shape: torch.Size([1, 384, 5, 5]) Sequential output shape: torch.Size([1, 10, 5, 5]) 这里把输出变成了我需要的类别数,从384变成10 AdaptiveAvgPool2d output shape: torch.Size([1, 10, 1, 1]) Flatten output shape: torch.Size([1, 10])
训练模型
#训练模型
lr,num_epochs,batch_size = 0.1,10,128
train_iter,test_iter = d2l.load_data_fashion_mnist(batch_size,resize=224)
d2l.train_ch6(net,train_iter,test_iter,num_epochs,lr,d2l.try_gpu())
loss 0.349, train acc 0.869, test acc 0.869
1772.7 examples/sec on cuda:0
用了GPU后果然快乐很多