相关理论&故事背景书上都有,不打算复述一遍,这里仅作代码记录&分享
此处非直接可用代码,由于学习过程中多次使用相同函数,故而将一些常用函数整理成了工具包,MxNet学习——自定义工具包
两者结合,方可运行代码
# -------------------------------------------------------------------------------
# Description: GoogleLeNet 含并行连接的网络
# Reference:
# Author: Sophia
# Date: 2021/3/11
# -------------------------------------------------------------------------------
from IPython import display
from mxnet import autograd, nd, init, gluon
from mxnet.gluon import data as gdata, loss as gloss, nn
import random, sys, time, matplotlib.pyplot as plt, mxnet as mx, os
from plt_so import *
'''
Inception 块
'''
class Inception(nn.Block):
def __init__(self, c1, c2, c3, c4, **kwargs):
super(Inception, self).__init__(**kwargs)
# 线路1,单 1x1 卷积层
self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu')
# 线路2,1x1 卷积层 + 3x3 卷积层
self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu')
self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu')
# 线路3,1x1 卷积层 + 5x5 卷积层
self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu')
self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu')
# 线路4,3x3 最大池化层 + 1x1 卷积层
self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1)
self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu')
def forward(self, x):
p1 = self.p1_1(x)
p2 = self.p2_2(self.p2_1(x))
p3 = self.p3_2(self.p3_1(x))
p4 = self.p4_2(self.p4_1(x))
return nd.concat(p1, p2, p3, p4, dim=1) # 在通道维上连结输出
'''
GoogleLeNet 模型
'''
b1 = nn.Sequential()
b1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'),
nn.MaxPool2D(pool_size=3, strides=2, padding=1))
b2 = nn.Sequential()
b2.add(nn.Conv2D(64, kernel_size=1, activation='relu'),
nn.Conv2D(192, kernel_size=3, padding=1),
nn.MaxPool2D(pool_size=3, strides=2, padding=1))
b3 = nn.Sequential()
b3.add(Inception(64, (96, 128), (16, 32), 32),
Inception(128, (128, 192), (32, 96), 64),
nn.MaxPool2D(pool_size=3, strides=2, padding=1))
b4 = nn.Sequential()
b4.add(Inception(192, (96, 208), (16, 48), 64),
Inception(160, (112, 224), (24, 64), 64),
Inception(128, (128, 256), (24, 64), 64),
Inception(112, (144, 288), (32, 64), 64),
Inception(256, (160, 320), (32, 128), 128),
nn.MaxPool2D(pool_size=3, strides=2, padding=1))
b5 = nn.Sequential()
b5.add(Inception(256, (160, 320), (32, 128), 128),
Inception(384, (192, 384), (48, 128), 128),
nn.GlobalAvgPool2D())
net = nn.Sequential()
net.add(b1, b2, b3, b4, b5, nn.Dense(10))
# X = nd.random.uniform(shape=(1, 1, 224, 224))
# net.initialize()
# print(net)
# for layer in net:
# X = layer(X)
# print(layer.name, 'output shape:\t', X.shape)
# 输出:
# Sequential(
# (0): Sequential(
# (0): Conv2D(None -> 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), Activation(relu))
# (1): MaxPool2D(size=(3, 3), stride=(2, 2), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
# )
# (1): Sequential(
# (0): Conv2D(None -> 64, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (1): Conv2D(None -> 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (2): MaxPool2D(size=(3, 3), stride=(2, 2), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
# )
# (2): Sequential(
# (0): Inception(
# (p1_1): Conv2D(None -> 64, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_1): Conv2D(None -> 96, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_2): Conv2D(None -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), Activation(relu))
# (p3_1): Conv2D(None -> 16, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p3_2): Conv2D(None -> 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), Activation(relu))
# (p4_1): MaxPool2D(size=(3, 3), stride=(1, 1), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
# (p4_2): Conv2D(None -> 32, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# )
# (1): Inception(
# (p1_1): Conv2D(None -> 128, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_1): Conv2D(None -> 128, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_2): Conv2D(None -> 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), Activation(relu))
# (p3_1): Conv2D(None -> 32, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p3_2): Conv2D(None -> 96, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), Activation(relu))
# (p4_1): MaxPool2D(size=(3, 3), stride=(1, 1), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
# (p4_2): Conv2D(None -> 64, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# )
# (2): MaxPool2D(size=(3, 3), stride=(2, 2), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
# )
# (3): Sequential(
# (0): Inception(
# (p1_1): Conv2D(None -> 192, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_1): Conv2D(None -> 96, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_2): Conv2D(None -> 208, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), Activation(relu))
# (p3_1): Conv2D(None -> 16, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p3_2): Conv2D(None -> 48, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), Activation(relu))
# (p4_1): MaxPool2D(size=(3, 3), stride=(1, 1), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
# (p4_2): Conv2D(None -> 64, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# )
# (1): Inception(
# (p1_1): Conv2D(None -> 160, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_1): Conv2D(None -> 112, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_2): Conv2D(None -> 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), Activation(relu))
# (p3_1): Conv2D(None -> 24, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p3_2): Conv2D(None -> 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), Activation(relu))
# (p4_1): MaxPool2D(size=(3, 3), stride=(1, 1), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
# (p4_2): Conv2D(None -> 64, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# )
# (2): Inception(
# (p1_1): Conv2D(None -> 128, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_1): Conv2D(None -> 128, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_2): Conv2D(None -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), Activation(relu))
# (p3_1): Conv2D(None -> 24, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p3_2): Conv2D(None -> 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), Activation(relu))
# (p4_1): MaxPool2D(size=(3, 3), stride=(1, 1), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
# (p4_2): Conv2D(None -> 64, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# )
# (3): Inception(
# (p1_1): Conv2D(None -> 112, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_1): Conv2D(None -> 144, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_2): Conv2D(None -> 288, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), Activation(relu))
# (p3_1): Conv2D(None -> 32, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p3_2): Conv2D(None -> 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), Activation(relu))
# (p4_1): MaxPool2D(size=(3, 3), stride=(1, 1), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
# (p4_2): Conv2D(None -> 64, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# )
# (4): Inception(
# (p1_1): Conv2D(None -> 256, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_1): Conv2D(None -> 160, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_2): Conv2D(None -> 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), Activation(relu))
# (p3_1): Conv2D(None -> 32, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p3_2): Conv2D(None -> 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), Activation(relu))
# (p4_1): MaxPool2D(size=(3, 3), stride=(1, 1), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
# (p4_2): Conv2D(None -> 128, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# )
# (5): MaxPool2D(size=(3, 3), stride=(2, 2), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
# )
# (4): Sequential(
# (0): Inception(
# (p1_1): Conv2D(None -> 256, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_1): Conv2D(None -> 160, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_2): Conv2D(None -> 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), Activation(relu))
# (p3_1): Conv2D(None -> 32, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p3_2): Conv2D(None -> 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), Activation(relu))
# (p4_1): MaxPool2D(size=(3, 3), stride=(1, 1), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
# (p4_2): Conv2D(None -> 128, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# )
# (1): Inception(
# (p1_1): Conv2D(None -> 384, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_1): Conv2D(None -> 192, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p2_2): Conv2D(None -> 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), Activation(relu))
# (p3_1): Conv2D(None -> 48, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# (p3_2): Conv2D(None -> 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), Activation(relu))
# (p4_1): MaxPool2D(size=(3, 3), stride=(1, 1), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
# (p4_2): Conv2D(None -> 128, kernel_size=(1, 1), stride=(1, 1), Activation(relu))
# )
# (2): GlobalAvgPool2D(size=(1, 1), stride=(1, 1), padding=(0, 0), ceil_mode=True, global_pool=True, pool_type=avg, layout=NCHW)
# )
# (5): Dense(None -> 10, linear)
# )
# sequential0 output shape: (1, 64, 56, 56)
# sequential1 output shape: (1, 192, 28, 28)
# sequential2 output shape: (1, 480, 14, 14)
# sequential3 output shape: (1, 832, 7, 7)
# sequential4 output shape: (1, 1024, 1, 1)
# dense0 output shape: (1, 10)
'''
训练模型
'''
lr, num_epochs, batch_size, ctx = 0.1, 5, 128, try_gpu()
net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
train_iter, test_iter = load_data_fashion_mnist_ch5(batch_size, resize=224)
train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)