05keras入门多输入多输出模型(下)GoogleNet

模型结构

Note

  1. LocalRespNorm一般不太使用了,取而代之的是BatchNormal
  2. 因为图像数据不同,有部分参数也会发生改变
  3. 我们只实现**GoogelNet(v1)**版本

Inception模块

def inception(x, filter_size, layer_number):
    """由1x1,3x3,5x5以及Maxpool构成的Inception模块

    Args:
        x(Tesnsor): 输入张量
        filter_size: 卷积核列表,总共有6个卷积核。
        layer_number: Inception序号

    Returns:
        经过Inception模块后的Tensor
    """
    layer_number = str(layer_number)
    with K.name_scope('Inception_' + layer_number):
        # 1x1卷积
        with K.name_scope("conv_1x1"):
            conv_1x1 = Conv2D(filters=filter_size[0], kernel_size=(1, 1),
                              strides=1, padding='same', activation='relu',
                              kernel_regularizer=l2(L2_RATE),
                              name='conv_1x1' + layer_number)(x)
        # 3x3 Bottleneck layer(瓶颈模块)和 3x3 卷积
        with K.name_scope('conv_3x3'):
            conv_3x3 = Conv2D(filters=filter_size[1], kernel_size=(1, 1),
                              strides=1, padding='same', activation='relu',
                              kernel_regularizer=l2(L2_RATE),
                              name='conv_3x3_bottleneck' + layer_number)(x)
            conv_3x3 =  Conv2D(filters=filter_size[2], kernel_size=(3, 3),
                              strides=1, padding='same', activation='relu',
                              kernel_regularizer=l2(L2_RATE),
                              name='conv_3x3' + layer_number)(conv_3x3)
        with K.name_scope('conv_5x5'):
            # 5x5 Bottleneck layer(瓶颈层)和 5x5 卷积
            conv_5x5 = Conv2D(filters=filter_size[3], kernel_size=(1, 1),
                              strides=1, padding='same', activation='relu',
                              kernel_regularizer=l2(L2_RATE),
                              name='conv_5x5_bottleneck' + layer_number)(x)
            conv_5x5 = Conv2D(filters=filter_size[4], kernel_size=(5, 5),
                              strides=1, padding='same', activation='relu',
                              kernel_regularizer=l2(L2_RATE),
                              name='conv_5x5' + layer_number)(conv_5x5)
        with K.name_scope('Max_Conv'):
            # Max pooling(最大池化层) 和 Bottleneck layer(瓶颈层)
            max_pool = MaxPooling2D(pool_size=3, strides=1, padding='same',
                                    name='maxpool'+layer_number)(x)
            max_pool = Conv2D(filters=filter_size[5], kernel_size=(1, 1),
                              strides=1, padding='same', activation='relu',
                              kernel_regularizer=l2(L2_RATE),
                              name='maxpool_conv1x1' + layer_number)(max_pool)
        with K.name_scope('concatenate'):
            # 将所有张量拼接在一起
            x = concatenate([conv_1x1, conv_3x3, conv_5x5, max_pool], axis=-1)
            # high/width上相同,channels拼接在一起
    return x

辅助输出轴

ef aux_classifier(x, filter_size, layer_number):
    """辅助轴,输出softmax分类结果

    Args:
        x: 输入张量
        filter_size: 卷积核列表,长度为3
        layer_number: 序列

    Returns: 辅助轴分类结果

    """
    layer_number = str(layer_number)
    with K.name_scope('aux_ckassifier'+layer_number):
        # 平均池化层
        x = AveragePooling2D(pool_size=3, strides=2, padding='same',
                             name='AveragePooling2D'+layer_number)(x)
        # (0)1x1 卷积层
        x = Conv2D(filters=filter_size[0], kernel_size=1, strides=1,
                   padding='valid', activation='relu',
                   kernel_regularizer=l2(L2_RATE),
                   name='aux_conv' + layer_number)(x)
        # 展平
        x = Flatten()(x)
        # (1)全连接层1
        x = Dense(units=filter_size[1], activation='relu',
                  kernel_regularizer=l2(L2_RATE),
                  name='aux_dense1_' + layer_number)(x)
        x = Dropout(0.7)(x)
        # (3)softmax输出层
        x = Dense(units=NUM_CLASS, activation='softmax',
                  kernel_regularizer=l2(L2_RATE),
                  name='aux_output' + layer_number)(x)
    return x

前面的推断过程

def front(x, filter_size):
    # (0)conv2d
    x = Conv2D(filters=filter_size[0], kernel_size=5, strides=1,
               padding='same', activation='relu',
               kernel_regularizer=l2(L2_RATE))(x)
    x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x)
    x = BatchNormalization(axis=-1)(x)
    # (1)conv2d
    x = Conv2D(filters=filter_size[1], kernel_size=1, strides=1,
               padding='same', activation='relu',
               kernel_regularizer=l2(L2_RATE))(x)
    # (2)conv2d
    x = Conv2D(filters=filter_size[2], kernel_size=3, strides=1,
               padding='same', activation='relu',
               kernel_regularizer=l2(L2_RATE))(x)
    x = BatchNormalization(axis=-1)(x)
    x = MaxPooling2D(pool_size=3,strides=1,padding='same')(x)
    return x

模型构建

知识点

  1. 多输入多输出模型
  2. GoogelNet(V1)
  3. K.name_scope()

Note

  • 模型的参数不完全一样,因为输入的是cifar-10数据集
  • 训练的时候没有用生成器,也没有图像增强;因为多输入多输出模型利用fit_generator比较麻烦,可以自己百度一下,在这里就不用了
  • 利用TensorBoard查看计算图以及训练进程
  • 并没有去保存训练模型等操作
  • K.name_scope是可以用的,利用它组织好网络结构,使用时只需要训练的时候将TensorBoard回调对象传回去
import keras.backend as K
from keras.datasets import cifar10
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers import concatenate, BatchNormalization, Flatten, Dropout
from keras.regularizers import l2
from keras.utils import to_categorical
from keras.models import Model
from keras.callbacks import TensorBoard
from keras.optimizers import Adam


L2_RATE = 0.002
NUM_CLASS = 10
BATCH_SIZE = 128
EPOCH = 10


def inception(x, filter_size, layer_number):
    """由1x1,3x3,5x5以及Maxpool构成的Inception模块

    Args:
        x(Tesnsor): 输入张量
        filter_size: 卷积核列表,总共有6个卷积核。
        layer_number: Inception序号

    Returns:
        经过Inception模块后的Tensor
    """
    layer_number = str(layer_number)
    with K.name_scope('Inception_' + layer_number):
        # 1x1卷积
        with K.name_scope("conv_1x1"):
            conv_1x1 = Conv2D(filters=filter_size[0], kernel_size=(1, 1),
                              strides=1, padding='same', activation='relu',
                              kernel_regularizer=l2(L2_RATE),
                              name='conv_1x1' + layer_number)(x)
        # 3x3 Bottleneck layer(瓶颈模块)和 3x3 卷积
        with K.name_scope('conv_3x3'):
            conv_3x3 = Conv2D(filters=filter_size[1], kernel_size=(1, 1),
                              strides=1, padding='same', activation='relu',
                              kernel_regularizer=l2(L2_RATE),
                              name='conv_3x3_bottleneck' + layer_number)(x)
            conv_3x3 = Conv2D(filters=filter_size[2], kernel_size=(3, 3),
                              strides=1, padding='same', activation='relu',
                              kernel_regularizer=l2(L2_RATE),
                              name='conv_3x3' + layer_number)(conv_3x3)
        with K.name_scope('conv_5x5'):
            # 5x5 Bottleneck layer(瓶颈层)和 5x5 卷积
            conv_5x5 = Conv2D(filters=filter_size[3], kernel_size=(1, 1),
                              strides=1, padding='same', activation='relu',
                              kernel_regularizer=l2(L2_RATE),
                              name='conv_5x5_bottleneck' + layer_number)(x)
            conv_5x5 = Conv2D(filters=filter_size[4], kernel_size=(5, 5),
                              strides=1, padding='same', activation='relu',
                              kernel_regularizer=l2(L2_RATE),
                              name='conv_5x5' + layer_number)(conv_5x5)
        with K.name_scope('Max_Conv'):
            # Max pooling(最大池化层) 和 Bottleneck layer(瓶颈层)
            max_pool = MaxPooling2D(pool_size=3, strides=1, padding='same',
                                    name='maxpool'+layer_number)(x)
            max_pool = Conv2D(filters=filter_size[5], kernel_size=(1, 1),
                              strides=1, padding='same', activation='relu',
                              kernel_regularizer=l2(L2_RATE),
                              name='maxpool_conv1x1' + layer_number)(max_pool)
        with K.name_scope('concatenate'):
            # 将所有张量拼接在一起
            x = concatenate([conv_1x1, conv_3x3, conv_5x5, max_pool], axis=-1)  # high/width上相同,channels拼接在一起
    return x


def aux_classifier(x, filter_size, layer_number):
    """辅助轴,输出softmax分类结果

    Args:
        x: 输入张量
        filter_size: 卷积核列表,长度为3
        layer_number: 序列

    Returns:

    """
    layer_number = str(layer_number)
    with K.name_scope('aux_ckassifier'+layer_number):
        # 平均池化层
        x = AveragePooling2D(pool_size=3, strides=2, padding='same',
                             name='AveragePooling2D'+layer_number)(x)
        # (0)1x1 卷积层
        x = Conv2D(filters=filter_size[0], kernel_size=1, strides=1,
                   padding='valid', activation='relu',
                   kernel_regularizer=l2(L2_RATE),
                   name='aux_conv' + layer_number)(x)
        # 展平
        x = Flatten()(x)
        # (1)全连接层1
        x = Dense(units=filter_size[1], activation='relu',
                  kernel_regularizer=l2(L2_RATE),
                  name='aux_dense1_' + layer_number)(x)
        x = Dropout(0.7)(x)
        # (3)softmax输出层
        x = Dense(units=NUM_CLASS, activation='softmax',
                  kernel_regularizer=l2(L2_RATE),
                  name='aux_output' + layer_number)(x)
    return x


def front(x, filter_size):
    # (0)conv2d
    x = Conv2D(filters=filter_size[0], kernel_size=5, strides=1,
               padding='same', activation='relu',
               kernel_regularizer=l2(L2_RATE))(x)
    x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x)
    x = BatchNormalization(axis=-1)(x)
    # (1)conv2d
    x = Conv2D(filters=filter_size[1], kernel_size=1, strides=1,
               padding='same', activation='relu',
               kernel_regularizer=l2(L2_RATE))(x)
    # (2)conv2d
    x = Conv2D(filters=filter_size[2], kernel_size=3, strides=1,
               padding='same', activation='relu',
               kernel_regularizer=l2(L2_RATE))(x)
    x = BatchNormalization(axis=-1)(x)
    x = MaxPooling2D(pool_size=3, strides=1, padding='same')(x)
    return x


def load():
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    y_train = to_categorical(y_train, NUM_CLASS)
    y_test = to_categorical(y_test, NUM_CLASS)
    x_train = x_train.astype('float32') / 255
    x_test = x_test.astype('float32') / 255
    return x_train, y_train, x_test, y_test


def googlenet_model():
    # 搭建模型
    X_Input = Input(shape=input_shape, name='Input')
    X = front(X_Input, [64, 64, 192])
    # Inception_0
    X = inception(X, filter_size=[64, 96, 128, 16, 32, 32], layer_number=0)
    # Inception_1
    X = inception(X, [128, 128, 192, 32, 96, 64], layer_number=1)
    X = MaxPooling2D(pool_size=3, strides=2, padding='same')(X)
    # Inception_2
    X = inception(X, [192, 96, 208, 16, 48, 64], layer_number=2)
    # aux1
    aux_output_1 = aux_classifier(X, [128, 1024], layer_number=1)
    # Inception_3
    X = inception(X, [160, 112, 225, 24, 64, 64], layer_number=3)
    # Inception_4
    X = inception(X, [128, 128, 256, 24, 64, 64], layer_number=4)
    # Incepetion_5
    X = inception(X, [112, 144, 288, 32, 64, 64], layer_number=5)
    # aux2
    aux_output_2 = aux_classifier(X, [128, 1024], layer_number=2)
    # Inception_6
    X = inception(X, [256, 160, 320, 32, 128, 128], layer_number=6)
    X = MaxPooling2D(pool_size=3, strides=2, padding='same')(X)
    # Inception_7
    X = inception(X, [256, 160, 320, 32, 128, 128], layer_number=7)
    # Inception_8
    X = inception(X, [386, 192, 384, 48, 128, 128], layer_number=8)
    # 输出模型
    X = AveragePooling2D(pool_size=4, strides=1, padding='valid')(X)
    X = Flatten()(X)
    X = Dropout(0.4)(X)
    main_output = Dense(NUM_CLASS, activation='softmax', kernel_regularizer=l2(L2_RATE))(X)
    # 定义多输入多输出模型
    model = Model(inputs=X_Input, outputs=[main_output, aux_output_1, aux_output_2])
    return model


if __name__ == '__main__':
    x_train, y_train, x_test, y_test = load()
    input_shape = x_train.shape[1:]
    # 创建模型
    GoogleNet = googlenet_model()
    optimizer = Adam(epsilon=1e-08)
    GoogleNet.compile(optimizer=optimizer, loss='categorical_crossentropy',
                      metrics=['accuracy'], loss_weights=[1, 0.3, 0.3])
    GoogleNet.summary()
    tfck = TensorBoard(log_dir='logs/GoogleNet')
    GoogleNet.fit(x=x_train, y=[y_train, y_train, y_train], validation_data=(x_test, [y_test, y_test, y_test]),
                  epochs=EPOCH, callbacks=[tfck], batch_size=BATCH_SIZE)
  • 1
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值