一.NiN的由来-改进过程及思想
·全连接层的问题:内存 带宽 过拟合
二.NiN框架学习
import torch
from torch import nn
from d2l import torch as d2l
def nin_block(in_channels,out_channels,kernel_size,strides,padding):
return nn.Sequential(
nn.Conv2d(in_channels,out_channels,kernel_size,strides,padding),
nn.ReLU(),
nn.Conv2d(out_channels,out_channels,kernel_size=1),nn.ReLU(),
nn.Conv2d(out_channels,out_channels,kernel_size=1),nn.ReLU())
net = nn.Sequential(
nin_block(1,96,kernel_size=11,strides=4,padding=0),
nn.MaxPool2d(3,stride=2),
nin_block(96,256,kernel_size=5,strides=1,padding=2),
nn.MaxPool2d(3,stride=2),
nin_block(256,384,kernel_size=3,strides=1,padding=1),
nn.MaxPool2d(3,stride=2),
nn.Dropout(0.5),
#标签类别数是10
nin_block(384,10,kernel_size=3,strides=1,padding=1),
nn.AdaptiveAvgPool2d((1,1)),
#将四维的输出转成二维的输出,其形状为(批量大小,10)
nn.Flatten())
#创建一个数据样本来查看每个块的输出形状。
X = torch.rand(size=(1,1,224,224))
for layer in net:
X = layer(X)
print(layer.__class__.__name__,'output shape:\t',X.shape)
三.模型训练
import torch
from torch import nn
from d2l import torch as d2l
def nin_block(in_channels,out_channels,kernel_size,strides,padding):
return nn.Sequential(
nn.Conv2d(in_channels,out_channels,kernel_size,strides,padding),
nn.ReLU(),
nn.Conv2d(out_channels,out_channels,kernel_size=1),nn.ReLU(),
nn.Conv2d(out_channels,out_channels,kernel_size=1),nn.ReLU())
net = nn.Sequential(
nin_block(1,96,kernel_size=11,strides=4,padding=0),
nn.MaxPool2d(3,stride=2),
nin_block(96,256,kernel_size=5,strides=1,padding=2),
nn.MaxPool2d(3,stride=2),
nin_block(256,384,kernel_size=3,strides=1,padding=1),
nn.MaxPool2d(3,stride=2),
nn.Dropout(0.5),
#标签类别数是10
nin_block(384,10,kernel_size=3,strides=1,padding=1),
nn.AdaptiveAvgPool2d((1,1)), #全局平均池化,尺寸变成1x1
#将四维的输出转成二维的输出,其形状为(批量大小,10)
nn.Flatten())#直接消掉1x1
'''
#创建一个数据样本来查看每个块的输出形状。
X = torch.rand(size=(1,1,224,224))
for layer in net:
X = layer(X)
print(layer.__class__.__name__,'output shape:\t',X.shape)
'''
lr,num_epochs,batch_size = 0.1,10,128
train_iter,test_iter = d2l.load_data_fashion_mnist(batch_size,resize=224)
d2l.train_ch6(net,train_iter,test_iter,num_epochs,lr,d2l.try_gpu())
d2l.plt.show()