Tensorflow实现Google Inception Net

今天对照Tensorflow的书,实现了Inception Net。在Inception Net实际应用时,是基于ImageNet的。考虑到耗时,在本篇博客(甚至是在Tensorflow的书中),只计算每个batch的前馈计算(Forward)。

Inception Net最大的几个特点:

1. 使用了分支结构,并使用concat将分支结合在一起

2. 使用1X1的卷积,可以低成本的将特征进行跨通道的组合

3. 使用Batch Normalization(BN),对每个batch的数据进行标准化处理,使输出规范到N(0, 1)的正态分布

代码如下:

from datetime import datetime
import time
import math
import tensorflow as tf

# slim是一个使构建,训练,评估神经网络变得简单的库,相当于模板
slim = tf.contrib.slim
# 用lambda定义一个简单的函数,产生截断的正态分布
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)


# 用来生成网络中经常用到的函数的默认参数,比如卷积的激活函数、标准化器等
# moving_vars用以完成平均滑动
def inception_v3_arg_scope(weight_decay=0.00004,
                           stddev=0.1,
                           batch_norm_var_collection='moving_vars'):
    # 用以使输出保持分布相同
    batch_norm_params = {
        'decay': 0.9997,
        'epsilon': 0.001,
        'updates_collections': tf.GraphKeys.UPDATE_OPS,
        'variables_collections': {
            'beta': None,
            'gamma': None,
            'moving_mean': [batch_norm_var_collection],
            'moving_variance': [batch_norm_var_collection],
        }
    }

    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_regularizer=slim.l2_regularizer(weight_decay)):
        with slim.arg_scope(
                [slim.conv2d],
                weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
                activation_fn=tf.nn.relu,
                normalizer_fn=slim.batch_norm,
                normalizer_params=batch_norm_params) as sc:
            return sc


# 用以生成网络的卷积部分
def inception_v3_base(inputs, scope=None):
    # 用来保存某些关键节点以供之后使用
    end_points = {}

    # 为slim.conv2d,slim.max_pool2d,slim.avg_pool2d三个函数设置默认的stride,padding参数值
    with tf.variable_scope(scope, 'InceptionV3', [inputs]):
        with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
                            stride=1, padding='VALID'):
            # conv2d(输入的tensor, 通道数, 卷积核尺寸, 步长, padding模式)
            net = slim.conv2d(inputs, 32, [3, 3], stride=2, scope='Conv2d_1a_3X3')
            net = slim.conv2d(net, 32, [3, 3], scope='Conv2d_2a_3X3')
            net = slim.conv2d(net, 64, [3, 3], padding='SAME',
                              scope='Conv2d_2b_3X3')
            net = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_3a_3X3')
            # 1X1的卷积核可以低成本的将特征进行跨通道组合
            net = slim.conv2d(net, 80, [1, 1], scope='Conv2d_3b_1X1')
            net = slim.conv2d(net, 192, [3, 3], scope='Conv2d_4a_3X3')
            net = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_5a_3X3')

        # 第一个inception模块组
        # 第一个inception module
        # 最后通过concat将四个分支合并
        with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
                            stride=1, padding='SAME'):
            with tf.variable_scope('Mixed_5b'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1X1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1X1')
                    branch_1 = slim.conv2d(branch_1, 64, [5, 5],
                                           scope='Conv2d_0b_5X5')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1X1')
                    branch_2 = slim.conv2d(branch_2, 96, [3, 3],
                                           scope='Conv2d_0b_3X3')
                    branch_2 = slim.conv2d(branch_2, 96, [3, 3],
                                           scope='Conv2d_0c_3X3')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3X3')
                    branch_3 = slim.conv2d(branch_3, 32, [1, 1],
                                           scope='Conv2d_0b_1X1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

            # 第二个inception module
            with tf.variable_scope('Mixed_5c'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0b_1X1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0b_1X1')
                    branch_1 = slim.conv2d(branch_1, 64, [5, 5],
                                           scope='Conv2d_0c_5X5')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1X1')
                    branch_2 = slim.conv2d(branch_2, 96, [3, 3],
                                           scope='Conv2d_0b_3X3')
                    branch_2 = slim.conv2d(branch_2, 96, [3, 3],
                                           scope='Conv2d_0c_3X3')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3X3')
                    branch_3 = slim.conv2d(branch_3, 64, [1, 1],
                                           scope='Conv2d_0b_1X1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

            # 第三个inception module
            with tf.variable_scope('Mixed_5d'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0b_1X1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0b_1X1')
                    branch_1 = slim.conv2d(branch_1, 64, [5, 5],
                                           scope='Conv2d_0c_5X5')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1X1')
                    branch_2 = slim.conv2d(branch_2, 96, [3, 3],
                                           scope='Conv2d_0b_3X3')
                    branch_2 = slim.conv2d(branch_2, 96, [3, 3],
                                           scope='Conv2d_0c_3X3')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3X3')
                    branch_3 = slim.conv2d(branch_3, 64, [1, 1],
                                           scope='Conv2d_0b_1X1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

            # 第二个inception模块组
            # 第一个inception module
            with tf.variable_scope('Mixed_6a'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net, 384, [3, 3], stride=2,
                                           padding='VALID', scope='Conv2d_1a_1X1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1X1')
                    branch_1 = slim.conv2d(branch_1, 96, [3, 3],
                                           scope='Conv2d_0b_3X3')
                    branch_1 = slim.conv2d(branch_1, 96, [3, 3], stride=2,
                                           padding='VALID', scope='Conv2d_1a_1X1')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
                                               scope='MaxPool_1a_3X3')
                net = tf.concat([branch_0, branch_1, branch_2], 3)

            # 第二个inception module
            with tf.variable_scope('Mixed_6b'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1X1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1X1')
                    branch_1 = slim.conv2d(branch_1, 128, [1, 7],
                                           scope='Conv2d_0b_1X7')
                    branch_1 = slim.conv2d(branch_1, 192, [7, 1],
                                           scope='Conv2d_0c_7X1')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1X1')
                    branch_2 = slim.conv2d(branch_2, 128, [7, 1],
                                           scope='Conv2d_0b_7X1')
                    branch_2 = slim.conv2d(branch_2, 128, [1, 7],
                                           scope='Conv2d_0c_1X7')
                    branch_2 = slim.conv2d(branch_2, 128, [7, 1],
                                           scope='Conv2d_0d_7X1')
                    branch_2 = slim.conv2d(branch_2, 192, [1, 7],
                                           scope='Conv2d_0e_1X7')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3X3')
                    branch_3 = slim.conv2d(branch_3, 192, [1, 1],
                                           scope='Conv2d_0b_1X1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

            # 第三个inception module
            with tf.variable_scope('Mixed_6c'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1X1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1X1')
                    branch_1 = slim.conv2d(branch_1, 128, [1, 7],
                                           scope='Conv2d_0b_1X7')
                    branch_1 = slim.conv2d(branch_1, 192, [7, 1],
                                           scope='Conv2d_0c_7X1')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1X1')
                    branch_2 = slim.conv2d(branch_2, 160, [7, 1],
                                           scope='Conv2d_0b_7X1')
                    branch_2 = slim.conv2d(branch_2, 160, [1, 7],
                                           scope='Conv2d_0c_1X7')
                    branch_2 = slim.conv2d(branch_2, 160, [7, 1],
                                           scope='Conv2d_0d_7X1')
                    branch_2 = slim.conv2d(branch_2, 192, [1, 7],
                                           scope='Conv2d_0e_1X7')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3X3')
                    branch_3 = slim.conv2d(branch_3, 192, [1, 1],
                                           scope='Conv2d_0b_1X1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

            # 第四个inception module
            with tf.variable_scope('Mixed_6d'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1X1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1X1')
                    branch_1 = slim.conv2d(branch_1, 128, [1, 7],
                                           scope='Conv2d_0b_1X7')
                    branch_1 = slim.conv2d(branch_1, 192, [7, 1],
                                           scope='Conv2d_0c_7X1')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1X1')
                    branch_2 = slim.conv2d(branch_2, 160, [7, 1],
                                           scope='Conv2d_0b_7X1')
                    branch_2 = slim.conv2d(branch_2, 160, [1, 7],
                                           scope='Conv2d_0c_1X7')
                    branch_2 = slim.conv2d(branch_2, 160, [7, 1],
                                           scope='Conv2d_0d_7X1')
                    branch_2 = slim.conv2d(branch_2, 192, [1, 7],
                                           scope='Conv2d_0e_1X7')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3X3')
                    branch_3 = slim.conv2d(branch_3, 192, [1, 1],
                                           scope='Conv2d_0b_1X1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

            # 第五个inception module
            with tf.variable_scope('Mixed_6e'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1X1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1X1')
                    branch_1 = slim.conv2d(branch_1, 128, [1, 7],
                                           scope='Conv2d_0b_1X7')
                    branch_1 = slim.conv2d(branch_1, 192, [7, 1],
                                           scope='Conv2d_0c_7X1')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1X1')
                    branch_2 = slim.conv2d(branch_2, 160, [7, 1],
                                           scope='Conv2d_0b_7X1')
                    branch_2 = slim.conv2d(branch_2, 160, [1, 7],
                                           scope='Conv2d_0c_1X7')
                    branch_2 = slim.conv2d(branch_2, 160, [7, 1],
                                           scope='Conv2d_0d_7X1')
                    branch_2 = slim.conv2d(branch_2, 192, [1, 7],
                                           scope='Conv2d_0e_1X7')
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3X3')
                    branch_3 = slim.conv2d(branch_3, 192, [1, 1],
                                           scope='Conv2d_0b_1X1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
                end_points['Mixed_6e'] = net

            # 第三个inception模块组
            # 第一个inception module
            with tf.variable_scope('Mixed_7a'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1X1')
                    branch_0 = slim.conv2d(branch_0, 320, [3, 3], stride=2,
                                           padding='VALID', scope='Conv2d_1a_3X3')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1X1')
                    branch_1 = slim.conv2d(branch_1, 192, [1, 7],
                                           scope='Conv2d_0b_1X7')
                    branch_1 = slim.conv2d(branch_1, 192, [7, 1],
                                           scope='Conv2d_0c_7X1')
                    branch_1 = slim.conv2d(branch_1, 192, [3, 3], stride=2,
                                           padding='VALID', scope='Conv2d_1a_3X3')
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
                                               scope='MaxPool_1a_3X3')
                net = tf.concat([branch_0, branch_1, branch_2], 3)

            # 第二个inception module
            with tf.variable_scope('Mixed_7b'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1X1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1X1')
                    branch_1 = tf.concat([
                        slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1X3'),
                        slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_0b_3X1')
                    ], 3)
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net, 448, [1, 3], scope='Conv2d_0a_1X1')
                    branch_2 = slim.conv2d(branch_2, 384, [3, 3],
                                           scope='Conv2d_0b_3X3')
                    branch_2 = tf.concat([
                        slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1X3'),
                        slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_0d_3X1')
                    ], 3)
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3X3')
                    branch_3 = slim.conv2d(branch_3, 192, [1, 1],
                                           scope='Conv2d_0b_1X1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)

            # 第三个inception module
            with tf.variable_scope('Mixed_7c'):
                with tf.variable_scope('Branch_0'):
                    branch_0 = slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1X1')
                with tf.variable_scope('Branch_1'):
                    branch_1 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1X1')
                    branch_1 = tf.concat([
                        slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1X3'),
                        slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_0b_3X1')
                    ], 3)
                with tf.variable_scope('Branch_2'):
                    branch_2 = slim.conv2d(net, 448, [1, 3], scope='Conv2d_0a_1X1')
                    branch_2 = slim.conv2d(branch_2, 384, [3, 3],
                                           scope='Conv2d_0b_3X3')
                    branch_2 = tf.concat([
                        slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1X3'),
                        slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_0d_3X1')
                    ], 3)
                with tf.variable_scope('Branch_3'):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3X3')
                    branch_3 = slim.conv2d(branch_3, 192, [1, 1],
                                           scope='Conv2d_0b_1X1')
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
            return net, end_points


# 实现全局平均池化,softmax,auxiliary logits,并构筑网络的整个部分
def inception_v3(inputs,
                 num_classes=1000,
                 is_training=True,
                 dropout_keep_prob=0.8,
                 prediction_fn=slim.softmax,
                 spatial_squeeze=True,
                 reuse=None,
                 scope='InceptionV3'):
    with tf.variable_scope(scope, 'InceptionV3', [inputs, num_classes],
                           reuse=reuse) as scope:
        with slim.arg_scope([slim.batch_norm, slim.dropout],
                            is_training=is_training):
            net, end_points = inception_v3_base(inputs, scope=scope)

    with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
                        stride=1, padding='SAME'):
        aux_logits = end_points['Mixed_6e']
        with tf.variable_scope('AuxLogits'):
            aux_logits = slim.avg_pool2d(
                aux_logits, [5, 5], stride=3, padding='VALID',
                scope='AvgPool_1a_5X5'
            )
            aux_logits = slim.conv2d(aux_logits, 128, [1, 1],
                                     scope='Conv2d_1b_1X1')

            aux_logits = slim.conv2d(
                aux_logits, 786, [5, 5],
                weights_initializer=trunc_normal(0.01),
                padding='VALID', scope='Conv2d_2a_5X5'
            )
            aux_logits = slim.conv2d(
                aux_logits, num_classes, [1, 1], activation_fn=None,
                normalizer_fn=None, weights_initializer=trunc_normal(0.001),
                scope='Conv2d_2b_1X1'
            )
            if spatial_squeeze:
                aux_logits = tf.squeeze(aux_logits, [1, 2],
                                        name='SpatialSqueeze')
            end_points['AuxLogits'] = aux_logits

        with tf.variable_scope('Logits'):
            net = slim.avg_pool2d(net, [8, 8], padding='VALID',
                                  scope='AvgPool_1a_8X8')
            net = slim.dropout(net, keep_prob=dropout_keep_prob,
                               scope='Dropout_1b')
            end_points['PreLogits'] = net
            logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
                                 normalizer_fn=None, scope='Conv2d_1c_1X1')

            if spatial_squeeze:
                logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')

        end_points['Logits'] = logits
        end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
    return logits, end_points


num_batches = 100

# 进行性能测试
def time_tensorflow_run(session, target, info_string):
    num_steps_burn_in = 10
    total_duration = 0.0
    total_duration_squared = 0.0

    for i in range(num_batches + num_steps_burn_in):
        start_time = time.time()
        _ = session.run(target)
        duration = time.time() - start_time
        if i >= num_steps_burn_in:
            if not i % 10:
                print('%s: step %d, duration = %.3f' %
                      (datetime.now(), i - num_steps_burn_in, duration))
            total_duration += duration
            total_duration_squared += duration * duration
            mn = total_duration / num_batches
            vr = total_duration_squared / num_batches - mn * mn
            sd = math.sqrt(vr)
            print('%s: %s across %d step, %.3f +/- %.3f sec / batch' %
                  (datetime.now(), info_string, num_batches, mn, sd))


batch_size = 32
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception_v3_arg_scope()):
    logits, end_points = inception_v3(inputs, is_training=False)

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

time_tensorflow_run(sess, logits, "Forward")

结果如下:

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
GoogleNet是2014年谷歌公司提出的一个深度神经网络模型,也被称为Inception v1。它是一个非常复杂的模型,但是可以使用TensorFlow相对容易地实现GoogleNet的主要创新是使用了“Inception模块”,该模块可以在不增加参数数量的情况下提高模型的性能。下面是GoogleNet的基本结构: ![GoogleNet](https://miro.medium.com/max/1838/1*ZFPOSAted10TPd3hBQU8iQ.png) GoogleNet由多个Inception模块组成,每个模块包含多个卷积层和池化层,以及一些1x1卷积核的处理。使用1x1卷积核可以减少模型的计算量,同时保持模型的表现力。 下面是使用TensorFlow实现GoogleNet的代码: ```python import tensorflow as tf from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Dropout, Dense, concatenate, AveragePooling2D, Flatten def inception_module(x, filters): # 1x1 Convolution path1 = Conv2D(filters=filters[0], kernel_size=1, activation='relu')(x) # 1x1 Convolution + 3x3 Convolution path2 = Conv2D(filters=filters[1], kernel_size=1, activation='relu')(x) path2 = Conv2D(filters=filters[2], kernel_size=3, padding='same', activation='relu')(path2) # 1x1 Convolution + 5x5 Convolution path3 = Conv2D(filters=filters[3], kernel_size=1, activation='relu')(x) path3 = Conv2D(filters=filters[4], kernel_size=5, padding='same', activation='relu')(path3) # 3x3 Max Pooling + 1x1 Convolution path4 = MaxPooling2D(pool_size=3, strides=1, padding='same')(x) path4 = Conv2D(filters=filters[5], kernel_size=1, activation='relu')(path4) # Concatenate all paths output = concatenate([path1, path2, path3, path4]) return output def create_model(input_shape, num_classes): inputs = Input(shape=input_shape) # First Convolution and Pooling Layers x = Conv2D(filters=64, kernel_size=7, strides=2, padding='same', activation='relu')(inputs) x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x) # Second Convolution and Pooling Layers x = Conv2D(filters=64, kernel_size=1, strides=1, padding='same', activation='relu')(x) x = Conv2D(filters=192, kernel_size=3, strides=1, padding='same', activation='relu')(x) x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x) # Inception Modules x = inception_module(x, [64, 96, 128, 16, 32, 32]) x = inception_module(x, [128, 128, 192, 32, 96, 64]) x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x) x = inception_module(x, [192, 96, 208, 16, 48, 64]) x = inception_module(x, [160, 112, 224, 24, 64, 64]) x = inception_module(x, [128, 128, 256, 24, 64, 64]) x = inception_module(x, [112, 144, 288, 32, 64, 64]) x = inception_module(x, [256, 160, 320, 32, 128, 128]) x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x) x = inception_module(x, [256, 160, 320, 32, 128, 128]) x = inception_module(x, [384, 192, 384, 48, 128, 128]) # Final Pooling and Output Layers x = AveragePooling2D(pool_size=7, strides=1, padding='valid')(x) x = Flatten()(x) outputs = Dense(units=num_classes, activation='softmax')(x) # Create Model model = tf.keras.Model(inputs=inputs, outputs=outputs) return model ``` 这个代码实现GoogleNet的基本结构。需要注意的是,这里的实现使用了TensorFlow 2.0的Keras API,因此可以很方便地创建模型,并且使用各种优化器和损失函数进行训练。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值