实现InceptionV1结构
def Inception_block(x, num_filter_for_each_path, name):
"""
Args:
-x: inputs
-num_filter_for_each_path: [10, 20, 30],池化层输入通道数和输出通道数一样
-name: variable scope name
"""
with tf.variable_scope(name):
conv1_1 = tf.layers.conv2d(x,
num_filter_for_each_path[0],
(1, 1),
strides = (1, 1),
activation = tf.nn.relu,
padding = 'same',
name = 'conv1_1')
conv3_3 = tf.layers.conv2d(conv1_1,
num_filter_for_each_path[1],
(3, 3),
strides = (1, 1),
activation = tf.nn.relu,
padding = 'same',
name = 'conv3_3')
conv5_5 = tf.layers.conv2d(conv3_3,
num_filter_for_each_path[2],
(5, 5),
strides = (1, 1),
activation = tf.nn.relu,
padding = 'same',
name = 'conv5_5')
pooling = tf.layers.max_pooling2d(conv5_5,
(2, 2),
(2, 2),
name = 'pooling')
# 因为卷基层padding=same并且步长为1,所以输出尺寸和输入尺寸一样,但是pooling层输出尺寸减小
pooling_shape = pooling.get_shape().as_list()[1:]
input_shape = x.get_shape().as_list()[1:]
width_padding = (input_shape[0] - pooling_shape[0]) // 2
height_padding = (input_shape[1] - pooling_shape[1]) // 2
padded_pooling = tf.pad(pooling,
[[0, 0],
[width_padding, width_padding],
[height_padding, height_padding],
[0, 0]])
layers = tf.concat([conv1_1, conv3_3, conv5_5], axis=3)
return layers
def inception_net(x, num_filter_base, class_num):
conv0 = tf.layers.conv2d(x,
num_filter_base,
(3, 3),
strides = (1, 1),
activation = tf.nn.relu,
padding = 'same',
name = 'conv0')
pool0 = tf.layers.max_pooling2d(conv0,
(2, 2),
(2, 2),
name = 'pool0')
inception_1 = Inception_block(pool0, [10, 20, 30], 'inception_1')
inception_2 = Inception_block(inception_1, [10, 20, 30], 'inception_2')
inception_3 = Inception_block(inception_2, [10, 20, 30], 'inception_3')
pool1 = tf.layers.max_pooling2d(inception_3,
(2, 2),
(2, 2),
name = 'pool1')
flatten = tf.layers.flatten(pool1)
y_ = tf.layers.dense(flatten, class_num)
return y_