tensorflow使用densenet结构、inception结构和SEnet结构的卷积神经网络实现mnist手写数字识别系统

1.代码:

# -*- coding: utf-8 -*-
# 这里使用结合SEnet、inception结构和shortcut结构的卷积神经网络,所以将784reshape成28*28的矩阵
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

def get_data():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    return mnist

# 设置权重函数
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(initial)

# 设置阈值函数
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

# 设置卷积层
def conv2d(x,W,stride_x,stride_y):
    return tf.nn.conv2d(x,W,strides=[1,stride_x,stride_y,1],padding = "SAME")

# 设置池化层
def pool(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides = [1,2,2,1],padding = "SAME")

def average_pool(x):
    return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding="SAME")

# 实现Batch Normalization
def bn_layer(x,is_training,name='BatchNorm',moving_decay=0.9,eps=1e-5):
    shape = x.get_shape().as_list()
    assert len(shape) in [2,4]

    param_shape = shape[-1]
    with tf.variable_scope(name): # 声明BN中唯一需要学习的两个参数,y=gamma*x+beta
        beta = tf.Variable(tf.constant(0.0, shape=[shape[-1]]), name='beta', trainable=True)
        gamma = tf.Variable(tf.constant(1.0, shape=[shape[-1]]), name='gamma', trainable=True)

        # 计算当前整个batch的均值与方差
        axes = list(range(len(shape)-1))
        batch_mean, batch_var = tf.nn.moments(x,axes,name='moments')

        # 采用滑动平均更新均值与方差
        ema = tf.train.ExponentialMovingAverage(moving_decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean,batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        # 训练时,更新均值与方差,测试时使用之前最后一次保存的均值与方差
        mean, var = tf.cond(tf.equal(is_training,1),mean_var_with_update, lambda:(ema.average(batch_mean),ema.average(batch_var)))

        # 最后执行batch normalization
        return tf.nn.batch_normalization(x,mean,var,beta,gamma,eps)

def inception_dense_block(x,channel_in,channel_out1,channel_out2,channel_out3):
    # 第一条支路
    with tf.variable_scope("inception_way_first"):
        # 第一层,1*1的卷积层,输出channel_out1
        w_first_conv1 = weight_variable([1,1,channel_in,channel_out1])
        b_first_conv1 = bias_variable([channel_out1])
        h_first_conv1 = conv2d(x,w_first_conv1,1,1,) + b_first_conv1
        h_first_conv1_lrn = tf.nn.lrn(h_first_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
        # h_first_conv1_norm = bn_layer(h_first_conv1, 1)
        h_first_conv1_output = tf.nn.elu(h_first_conv1_lrn)

    # 第二条支路
    with tf.variable_scope("inception_way_second"):
        # 第一层,1*1的卷积层,输出1/4
        w_second_conv1 = weight_variable([1,1,channel_in,channel_out2/4])
        b_second_conv1 = bias_variable([channel_out2/4])
        h_second_conv1 = conv2d(x,w_second_conv1,1,1,) + b_second_conv1
        # h_second_conv1_norm = bn_layer(h_second_conv1, 1)
        h_second_conv1_lrn = tf.nn.lrn(h_second_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
        h_second_conv1_output = tf.nn.elu(h_second_conv1_lrn)

        # 第二层,3*3的卷积核,输出channel_out2
        w_second_conv2 = weight_variable([3,3,channel_out2/4,channel_out2])
        b_second_conv2 = bias_variable([channel_out2])
        h_second_conv2 = conv2d(h_second_conv1_output,w_second_conv2,1,1) +b_second_conv2
        h_second_conv2_lrn = tf.nn.lrn(h_second_conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
        # h_second_conv2_norm = bn_layer(h_second_conv2,1)
        h_second_conv2_output = tf.nn.elu(h_second_conv2_lrn)

    with tf.variable_scope("inception_way_second"):
        # 第一层,1*1的卷积核,输出1/4
        w_third_conv1 = weight_variable([1,1,channel_in,channel_out3/4])
        b_third_conv1 = bias_variable([channel_out3/4])
        h_third_conv1 = conv2d(x,w_third_conv1,1,1) + b_third_conv1
        h_third_conv1_lrn = tf.nn.lrn(h_third_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
        # h_third_conv1_norm = bn_layer(h_third_conv1,1)
        h_third_conv1_output = tf.nn.elu(h_third_conv1_lrn)
        # 第二层,3*3的卷积核,输出1/2
        w_third_conv2 = weight_variable([5, 5, channel_out3/4, channel_out3 ])
        b_third_conv2 = bias_variable([channel_out3 ])
        h_third_conv2 = conv2d(h_third_conv1_output, w_third_conv2, 1, 1) + b_third_conv2
        h_third_conv2_lrn = tf.nn.lrn(h_third_conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
        # h_third_conv2_norm = bn_layer(h_third_conv2, 1)
        h_third_conv2_output = tf.nn.elu(h_third_conv2_lrn)

    with tf.variable_scope("concat"):
        channel = channel_in + channel_out1 + channel_out2 + channel_out3
        h_concat = tf.concat(3, [h_first_conv1_output, h_second_conv2_output, h_third_conv2_output, x])

    return channel, h_concat

def SE_block(x,ratio):

    shape = x.get_shape().as_list()
    channel_out = shape[3]
    # print(shape)
    with tf.variable_scope("squeeze_and_excitation"):
        # 第一层,全局平均池化层
        squeeze = tf.nn.avg_pool(x,[1,shape[1],shape[2],1],[1,shape[1],shape[2],1],padding = "SAME")
        # 第二层,全连接层
        w_excitation1 = weight_variable([1,1,channel_out,channel_out/ratio])
        b_excitation1 = bias_variable([channel_out/ratio])
        excitation1 = conv2d(squeeze,w_excitation1,1,1) + b_excitation1
        excitation1_output = tf.nn.relu(excitation1)
        # 第三层,全连接层
        w_excitation2 = weight_variable([1, 1, channel_out / ratio, channel_out])
        b_excitation2 = bias_variable([channel_out])
        excitation2 = conv2d(excitation1_output, w_excitation2, 1, 1) + b_excitation2
        excitation2_output = tf.nn.sigmoid(excitation2)
        # 第四层,点乘
        excitation_output = tf.reshape(excitation2_output,[-1,1,1,channel_out])
        h_output = excitation_output * x

    return h_output


def reduction_block(x,channel_in,channel_out):
    # 一个1*1的卷积核
    with tf.variable_scope("reduction_first"):
        # 第一层
        w_first_conv1 = weight_variable([1,1,channel_in,channel_out/2])
        b_first_conv1 = bias_variable([channel_out/2])
        h_first_conv1 = tf.nn.relu(conv2d(x,w_first_conv1,1,1)+b_first_conv1)
        h_first_conv1 =  tf.nn.lrn(h_first_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 一个3*3的卷积核
    with tf.variable_scope("reduction_second"):
        # 第一层
        w_second_conv1 = weight_variable([ 1, 1,channel_in, channel_out / 2])
        b_second_conv1 = bias_variable([channel_out /2])
        h_second_conv1 = tf.nn.relu(conv2d(x, w_second_conv1,1,1) + b_second_conv1)
        h_second_conv1 = tf.nn.lrn(h_second_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第二层
        w_second_conv2 = weight_variable([ 3, 1,channel_out / 2, channel_out / 2])
        b_second_conv2 = bias_variable([channel_out / 2])
        h_second_conv2 = tf.nn.relu(conv2d(h_second_conv1, w_second_conv2,1,1) + b_second_conv2)

        # 第三层
        w_second_conv3 = weight_variable([ 1, 3,channel_out / 2, channel_out / 2])
        b_second_conv3 = bias_variable([channel_out / 2])
        h_second_conv3 = tf.nn.relu(conv2d(h_second_conv2, w_second_conv3, 1, 1) + b_second_conv3)
        h_second_conv3 = tf.nn.lrn(h_second_conv3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 两个3*3的卷积核
    with tf.variable_scope("reduction_third"):
        # 第一层
        w_third_conv1 = weight_variable([ 1, 1,channel_in, channel_out / 2])
        b_third_conv1 = bias_variable([channel_out / 2])
        h_third_conv1 = tf.nn.relu(conv2d(x, w_third_conv1, 1, 1) + b_third_conv1)
        h_third_conv1 = tf.nn.lrn(h_third_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第二层 3*1 步长1,1
        w_third_conv2 = weight_variable([ 3, 1,channel_out / 2, channel_out / 2])
        b_third_conv2 = bias_variable([channel_out / 2])
        h_third_conv2 = tf.nn.relu(conv2d(h_third_conv1, w_third_conv2, 1, 1) + b_third_conv2)

        # 第三层 1*3 步长1,1
        w_third_conv3 = weight_variable([ 1, 3,channel_out / 2, channel_out / 2])
        b_third_conv3 = bias_variable([channel_out / 2])
        h_third_conv3 = tf.nn.relu(conv2d(h_third_conv2, w_third_conv3, 1, 1) + b_third_conv3)
        h_third_conv3 = tf.nn.lrn(h_third_conv3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第四层 3*1 步长1,1
        w_third_conv4 = weight_variable([ 3, 1,channel_out / 2, channel_out / 2])
        b_third_conv4 = bias_variable([channel_out / 2])
        h_third_conv4 = tf.nn.relu(conv2d(h_third_conv3, w_third_conv4, 1, 1) + b_third_conv4)

        # 第五层 1*3 步长1,1
        w_third_conv5 = weight_variable([ 1, 3,channel_out / 2, channel_out / 2])
        b_third_conv5 = bias_variable([channel_out / 2])
        h_third_conv5 = tf.nn.relu(conv2d(h_third_conv4, w_third_conv5, 1, 1) + b_third_conv5)
        h_third_conv5 = tf.nn.lrn(h_third_conv5, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 一个average_pooling层
    with tf.variable_scope("reduction_third"):
        h_forth_pool = average_pool(x)
    with tf.variable_scope("concat"):
        h_concat = tf.concat(3,[h_first_conv1,h_second_conv3,h_third_conv5,h_forth_pool])

    channel = channel_out*3/2 + channel_in

    return channel, h_concat

if __name__ == "__main__":
    # 设置输入
    x = tf.placeholder(tf.float32,[None, 784])
    # 将其变为二维的图片,-1为x的第一维数量
    x_image = tf.reshape(x,[-1,28,28,1])

    channel1,h_dense1 = inception_dense_block(x_image,1,4,8,8)
    h_dense1 = pool(h_dense1)
    h_SE1 = SE_block(h_dense1,4)
    channel2,h_dense2 = inception_dense_block(h_SE1, 21,8,8,16)
    h_dense2 = pool(h_dense2)
    h_SE2 = SE_block(h_dense2, 4)

    # 配置全连接层1
    W_fc1 = weight_variable([7 * 7 * channel2, 1024])
    b_fc1 = bias_variable([1024])
    h2_pool_flat = tf.reshape(h_SE2, shape=[-1, 7 * 7 * channel2])
    h_fc1 = tf.nn.relu(tf.matmul(h2_pool_flat, W_fc1) + b_fc1)

    # 配置dropout层
    keep_prob = tf.placeholder("float")
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # 配置全连接层2
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y_predict = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
    # 接受y_label
    y_label = tf.placeholder(tf.float32,[None,10])
    mnist = get_data()
    # 交叉熵
    cross_entropy = -tf.reduce_sum(y_label * tf.log(y_predict))
    # 梯度下降法
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
    # train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy)
    # 求准确率
    correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_label, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  # 精确度计算
    sess = tf.InteractiveSession()
    sess.run(tf.initialize_all_variables())
    for i in range(20000):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:  # 训练100次,验证一次
            train_acc = accuracy.eval(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 1.0})
            print 'step %d, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y %g' % (i, train_acc)
            train_step.run(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 0.5})

    point = 0
    for i in xrange(10):
        testSet = mnist.test.next_batch(50)
        point += accuracy.eval(feed_dict={ x: testSet[0], y_label: testSet[1], keep_prob: 1.0})
    print "test accuracy:"+str(point/10)

2.准确率:

step 19500, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.96
step 19600, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.9
step 19700, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.98
step 19800, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.88
step 19900, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.94
test accuracy:0.9299999952316285
  • 3
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值