InceptionV1: Going Deeper with Convolutions算法实现

实现InceptionV1结构

def Inception_block(x, num_filter_for_each_path, name):
    """
    Args:
    -x: inputs
    -num_filter_for_each_path: [10, 20, 30],池化层输入通道数和输出通道数一样
    -name: variable scope name
    """
    with tf.variable_scope(name):
        conv1_1 = tf.layers.conv2d(x,
                                   num_filter_for_each_path[0],
                                   (1, 1),
                                   strides = (1, 1),
                                   activation = tf.nn.relu,
                                   padding = 'same',
                                   name = 'conv1_1')
        conv3_3 = tf.layers.conv2d(conv1_1,
                                   num_filter_for_each_path[1],
                                   (3, 3),
                                   strides = (1, 1),
                                   activation = tf.nn.relu,
                                   padding = 'same',
                                   name = 'conv3_3')
        conv5_5 = tf.layers.conv2d(conv3_3,
                                   num_filter_for_each_path[2],
                                   (5, 5),
                                   strides = (1, 1),
                                   activation = tf.nn.relu,
                                   padding = 'same',
                                   name = 'conv5_5')
        pooling = tf.layers.max_pooling2d(conv5_5,
                                          (2, 2),
                                          (2, 2),
                                          name = 'pooling')
    # 因为卷基层padding=same并且步长为1,所以输出尺寸和输入尺寸一样,但是pooling层输出尺寸减小
    pooling_shape = pooling.get_shape().as_list()[1:]
    input_shape = x.get_shape().as_list()[1:]
    width_padding = (input_shape[0] - pooling_shape[0]) // 2
    height_padding = (input_shape[1] - pooling_shape[1]) // 2
    padded_pooling = tf.pad(pooling,
                            [[0, 0],
                             [width_padding, width_padding],
                             [height_padding, height_padding],
                             [0, 0]])
    
    layers = tf.concat([conv1_1, conv3_3, conv5_5], axis=3)
    
    return layers
    
def inception_net(x, num_filter_base, class_num):

    conv0 = tf.layers.conv2d(x,
                             num_filter_base,
                             (3, 3),
                             strides = (1, 1),
                             activation = tf.nn.relu,
                             padding = 'same',
                             name = 'conv0')
    pool0 = tf.layers.max_pooling2d(conv0,
                                    (2, 2),
                                    (2, 2),
                                    name = 'pool0')
    inception_1 = Inception_block(pool0, [10, 20, 30], 'inception_1')
    inception_2 = Inception_block(inception_1, [10, 20, 30], 'inception_2')
    inception_3 = Inception_block(inception_2, [10, 20, 30], 'inception_3')
    
    pool1 = tf.layers.max_pooling2d(inception_3,
                                    (2, 2),
                                    (2, 2),
                                    name = 'pool1')
    
    flatten = tf.layers.flatten(pool1)
    
    y_ = tf.layers.dense(flatten, class_num)
    
    return y_
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值