tensorflow 使用googlenet中的inception结构实现mnist手写数字识别系统

1.首先使用的inception结构由四条支路组成,第一条支路是一个11的卷积核,第二条支路由一层11的卷积核、一层13的卷积核和一层31的卷积核组成,第三条支路由一层11的卷积核、两层13的卷积核、两层31的卷积核组成,相当于一个55的卷积核,最后一层由一个平均池化层组成。

在实现mnist时,使用了两个这样的inception结构,同时每个inception结构下串联了一个最大池化层用于减小尺寸,代码如下:

# -*- coding: utf-8 -*-
# 这里使用结合SEnet、inception结构和shortcut结构的卷积神经网络,所以将784reshape成28*28的矩阵
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

def get_data():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    return mnist

# 设置权重函数
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(initial)

# 设置阈值函数
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

# 设置卷积层
def conv2d(x,W,stride_x,stride_y):
    return tf.nn.conv2d(x,W,strides=[1,stride_x,stride_y,1],padding = "SAME")

# 设置池化层
def pool(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides = [1,2,2,1],padding = "SAME")

def average_pool(x):
    return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding="SAME")

def res_block(x,kernal_size_x,kernal_size_y,channel_in,channel_out):

    X_shortcut = x

    with tf.variable_scope("res_sub1"):
        res_w_conv1 = weight_variable([kernal_size_x,kernal_size_y,channel_in,channel_out/2])
        res_b_conv1 = bias_variable([channel_out/2])
        res_h_conv1 = tf.nn.relu(conv2d(x,res_w_conv1) + res_b_conv1)

    with tf.variable_scope("res_sub2"):
        res_w_conv2 = weight_variable([kernal_size_x,kernal_size_y,channel_out/2,channel_out])
        res_b_conv2 = bias_variable([channel_out])
        res_h_conv2 = tf.nn.relu(conv2d(res_h_conv1,res_w_conv2) + res_b_conv2)

    with tf.variable_scope("shortcut"):
        res_w_shortcut = weight_variable([1, 1, channel_in, channel_out])
        res_b_shortcut = bias_variable([channel_out])
        X_shortcut = conv2d(X_shortcut,res_w_shortcut) + res_b_shortcut
        res_add = tf.add(res_h_conv2, X_shortcut)
        res_b_shortcut = bias_variable([channel_out])
        res_add_result = tf.nn.relu(res_add + res_b_shortcut)

    return res_add_result

def reduction_block(x,channel_in,channel_out):
    # 一个1*1的卷积核
    with tf.variable_scope("reduction_first"):
        # 第一层
        w_first_conv1 = weight_variable([1,1,channel_in,channel_out/2])
        b_first_conv1 = bias_variable([channel_out/2])
        h_first_conv1 = tf.nn.relu(conv2d(x,w_first_conv1,1,1)+b_first_conv1)
        h_first_conv1 =  tf.nn.lrn(h_first_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 一个3*3的卷积核
    with tf.variable_scope("reduction_second"):
        # 第一层
        w_second_conv1 = weight_variable([ 1, 1,channel_in, channel_out / 2])
        b_second_conv1 = bias_variable([channel_out /2])
        h_second_conv1 = tf.nn.relu(conv2d(x, w_second_conv1,1,1) + b_second_conv1)
        h_second_conv1 = tf.nn.lrn(h_second_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第二层
        w_second_conv2 = weight_variable([ 3, 1,channel_out / 2, channel_out / 2])
        b_second_conv2 = bias_variable([channel_out / 2])
        h_second_conv2 = tf.nn.relu(conv2d(h_second_conv1, w_second_conv2,1,1) + b_second_conv2)

        # 第三层
        w_second_conv3 = weight_variable([ 1, 3,channel_out / 2, channel_out / 2])
        b_second_conv3 = bias_variable([channel_out / 2])
        h_second_conv3 = tf.nn.relu(conv2d(h_second_conv2, w_second_conv3, 1, 1) + b_second_conv3)
        h_second_conv3 = tf.nn.lrn(h_second_conv3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 两个3*3的卷积核
    with tf.variable_scope("reduction_third"):
        # 第一层
        w_third_conv1 = weight_variable([ 1, 1,channel_in, channel_out / 2])
        b_third_conv1 = bias_variable([channel_out / 2])
        h_third_conv1 = tf.nn.relu(conv2d(x, w_third_conv1, 1, 1) + b_third_conv1)
        h_third_conv1 = tf.nn.lrn(h_third_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第二层 3*1 步长1,1
        w_third_conv2 = weight_variable([ 3, 1,channel_out / 2, channel_out / 2])
        b_third_conv2 = bias_variable([channel_out / 2])
        h_third_conv2 = tf.nn.relu(conv2d(h_third_conv1, w_third_conv2, 1, 1) + b_third_conv2)

        # 第三层 1*3 步长1,1
        w_third_conv3 = weight_variable([ 1, 3,channel_out / 2, channel_out / 2])
        b_third_conv3 = bias_variable([channel_out / 2])
        h_third_conv3 = tf.nn.relu(conv2d(h_third_conv2, w_third_conv3, 1, 1) + b_third_conv3)
        h_third_conv3 = tf.nn.lrn(h_third_conv3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第四层 3*1 步长1,1
        w_third_conv4 = weight_variable([ 3, 1,channel_out / 2, channel_out / 2])
        b_third_conv4 = bias_variable([channel_out / 2])
        h_third_conv4 = tf.nn.relu(conv2d(h_third_conv3, w_third_conv4, 1, 1) + b_third_conv4)

        # 第五层 1*3 步长1,1
        w_third_conv5 = weight_variable([ 1, 3,channel_out / 2, channel_out / 2])
        b_third_conv5 = bias_variable([channel_out / 2])
        h_third_conv5 = tf.nn.relu(conv2d(h_third_conv4, w_third_conv5, 1, 1) + b_third_conv5)
        h_third_conv5 = tf.nn.lrn(h_third_conv5, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 一个average_pooling层
    with tf.variable_scope("reduction_third"):
        h_forth_pool = average_pool(x)
    with tf.variable_scope("concat"):
        h_concat = tf.concat(3,[h_first_conv1,h_second_conv3,h_third_conv5,h_forth_pool])

    channel = channel_out*3/2 + channel_in

    return channel, h_concat

if __name__ == "__main__":
    # 设置输入
    x = tf.placeholder(tf.float32,[None, 784])
    # 将其变为二维的图片,-1为x的第一维数量
    x_image = tf.reshape(x,[-1,28,28,1])

    channel1,h_reduc1 = reduction_block(x_image,1,32)
    h_reduc1 = pool(h_reduc1)
    channel2,h_reduc2 = reduction_block(h_reduc1, channel1, 64)
    h_reduc2 = pool(h_reduc2)

    # 配置全连接层1
    W_fc1 = weight_variable([7 * 7 * channel2, 1024])
    b_fc1 = bias_variable([1024])
    h2_pool_flat = tf.reshape(h_reduc2, shape=[-1, 7 * 7 * channel2])
    h_fc1 = tf.nn.relu(tf.matmul(h2_pool_flat, W_fc1) + b_fc1)

    # 配置dropout层
    keep_prob = tf.placeholder("float")
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # 配置全连接层2
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y_predict = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
    # 接受y_label
    y_label = tf.placeholder(tf.float32,[None,10])
    mnist = get_data()
    # 交叉熵
    cross_entropy = -tf.reduce_sum(y_label * tf.log(y_predict))
    # 梯度下降法
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
    # train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy)
    # 求准确率
    correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_label, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  # 精确度计算
    sess = tf.InteractiveSession()
    sess.run(tf.initialize_all_variables())
    for i in range(20000):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:  # 训练100次,验证一次
            train_acc = accuracy.eval(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 1.0})
            print 'step %d, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y %g' % (i, train_acc)
            train_step.run(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 0.5})

    point = 0
    for i in xrange(10):
        testSet = mnist.test.next_batch(50)
        point += accuracy.eval(feed_dict={ x: testSet[0], y_label: testSet[1], keep_prob: 1.0})
    print "test accuracy:"+str(point/10)

准确率:

step 19600, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.96
step 19700, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.88
step 19800, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.9
step 19900, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.94
test accuracy:0.9079999983310699

2.如果使用33的卷积核而不是使用13的卷积核的话,效果又会怎么样呢?

# -*- coding: utf-8 -*-
# 这里使用结合SEnet、inception结构和shortcut结构的卷积神经网络,所以将784reshape成28*28的矩阵
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

def get_data():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    return mnist

# 设置权重函数
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(initial)

# 设置阈值函数
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

# 设置卷积层
def conv2d(x,W,stride_x,stride_y):
    return tf.nn.conv2d(x,W,strides=[1,stride_x,stride_y,1],padding = "SAME")

# 设置池化层
def pool(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides = [1,2,2,1],padding = "SAME")

def average_pool(x):
    return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding="SAME")

def res_block(x,kernal_size_x,kernal_size_y,channel_in,channel_out):

    X_shortcut = x

    with tf.variable_scope("res_sub1"):
        res_w_conv1 = weight_variable([kernal_size_x,kernal_size_y,channel_in,channel_out/2])
        res_b_conv1 = bias_variable([channel_out/2])
        res_h_conv1 = tf.nn.relu(conv2d(x,res_w_conv1) + res_b_conv1)

    with tf.variable_scope("res_sub2"):
        res_w_conv2 = weight_variable([kernal_size_x,kernal_size_y,channel_out/2,channel_out])
        res_b_conv2 = bias_variable([channel_out])
        res_h_conv2 = tf.nn.relu(conv2d(res_h_conv1,res_w_conv2) + res_b_conv2)

    with tf.variable_scope("shortcut"):
        res_w_shortcut = weight_variable([1, 1, channel_in, channel_out])
        res_b_shortcut = bias_variable([channel_out])
        X_shortcut = conv2d(X_shortcut,res_w_shortcut) + res_b_shortcut
        res_add = tf.add(res_h_conv2, X_shortcut)
        res_b_shortcut = bias_variable([channel_out])
        res_add_result = tf.nn.relu(res_add + res_b_shortcut)

    return res_add_result

def reduction_block(x,channel_in,channel_out):
    # 一个1*1的卷积核
    with tf.variable_scope("reduction_first"):
        # 第一层
        w_first_conv1 = weight_variable([1,1,channel_in,channel_out/2])
        b_first_conv1 = bias_variable([channel_out/2])
        h_first_conv1 = tf.nn.relu(conv2d(x,w_first_conv1,1,1)+b_first_conv1)
        h_first_conv1 =  tf.nn.lrn(h_first_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 一个3*3的卷积核
    with tf.variable_scope("reduction_second"):
        # 第一层
        w_second_conv1 = weight_variable([ 1, 1,channel_in, channel_out / 2])
        b_second_conv1 = bias_variable([channel_out /2])
        h_second_conv1 = tf.nn.relu(conv2d(x, w_second_conv1,1,1) + b_second_conv1)
        h_second_conv1 = tf.nn.lrn(h_second_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第二层
        w_second_conv2 = weight_variable([ 3, 3,channel_out / 2, channel_out / 2])
        b_second_conv2 = bias_variable([channel_out / 2])
        h_second_conv2 = tf.nn.relu(conv2d(h_second_conv1, w_second_conv2,1,1) + b_second_conv2)
        h_second_conv2 = tf.nn.lrn(h_second_conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 两个3*3的卷积核
    with tf.variable_scope("reduction_third"):
        # 第一层
        w_third_conv1 = weight_variable([ 1, 1,channel_in, channel_out / 2])
        b_third_conv1 = bias_variable([channel_out / 2])
        h_third_conv1 = tf.nn.relu(conv2d(x, w_third_conv1, 1, 1) + b_third_conv1)
        h_third_conv1 = tf.nn.lrn(h_third_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第二层 3*3 步长1,1
        w_third_conv2 = weight_variable([ 3, 3,channel_out / 2, channel_out / 2])
        b_third_conv2 = bias_variable([channel_out / 2])
        h_third_conv2 = tf.nn.relu(conv2d(h_third_conv1, w_third_conv2, 1, 1) + b_third_conv2)
        h_third_conv2 = tf.nn.lrn(h_third_conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第三层 3*3 步长1,1
        w_third_conv3 = weight_variable([ 3, 3,channel_out / 2, channel_out / 2])
        b_third_conv3 = bias_variable([channel_out / 2])
        h_third_conv3 = tf.nn.relu(conv2d(h_third_conv2, w_third_conv3, 1, 1) + b_third_conv3)
        h_third_conv3 = tf.nn.lrn(h_third_conv3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 一个average_pooling层
    with tf.variable_scope("reduction_third"):
        h_forth_pool = average_pool(x)
    with tf.variable_scope("concat"):
        h_concat = tf.concat(3,[h_first_conv1,h_second_conv2,h_third_conv3,h_forth_pool])

    channel = channel_out*3/2 + channel_in

    return channel, h_concat

if __name__ == "__main__":
    # 设置输入
    x = tf.placeholder(tf.float32,[None, 784])
    # 将其变为二维的图片,-1为x的第一维数量
    x_image = tf.reshape(x,[-1,28,28,1])

    channel1,h_reduc1 = reduction_block(x_image,1,32)
    h_reduc1 = pool(h_reduc1)
    channel2,h_reduc2 = reduction_block(h_reduc1, channel1, 64)
    h_reduc2 = pool(h_reduc2)

    # 配置全连接层1
    W_fc1 = weight_variable([7 * 7 * channel2, 1024])
    b_fc1 = bias_variable([1024])
    h2_pool_flat = tf.reshape(h_reduc2, shape=[-1, 7 * 7 * channel2])
    h_fc1 = tf.nn.relu(tf.matmul(h2_pool_flat, W_fc1) + b_fc1)

    # 配置dropout层
    keep_prob = tf.placeholder("float")
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # 配置全连接层2
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y_predict = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
    # 接受y_label
    y_label = tf.placeholder(tf.float32,[None,10])
    mnist = get_data()
    # 交叉熵
    cross_entropy = -tf.reduce_sum(y_label * tf.log(y_predict))
    # 梯度下降法
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
    # train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy)
    # 求准确率
    correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_label, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  # 精确度计算
    sess = tf.InteractiveSession()
    sess.run(tf.initialize_all_variables())
    for i in range(20000):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:  # 训练100次,验证一次
            train_acc = accuracy.eval(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 1.0})
            print 'step %d, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y %g' % (i, train_acc)
            train_step.run(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 0.5})

    point = 0
    for i in xrange(10):
        testSet = mnist.test.next_batch(50)
        point += accuracy.eval(feed_dict={ x: testSet[0], y_label: testSet[1], keep_prob: 1.0})
    print "test accuracy:"+str(point/10)

准确率:

step 19600, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.84
step 19700, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.96
step 19800, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.94
step 19900, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.94
test accuracy:0.9259999990463257

3.在inception结构中的卷积层中,在每个支路的最后一层使用一个步长为2的卷积层,首先是31和13的卷积核:

# -*- coding: utf-8 -*-
# 这里使用结合SEnet、inception结构和shortcut结构的卷积神经网络,所以将784reshape成28*28的矩阵
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

def get_data():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    return mnist

# 设置权重函数
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(initial)

# 设置阈值函数
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

# 设置卷积层
def conv2d(x,W,stride_x,stride_y):
    return tf.nn.conv2d(x,W,strides=[1,stride_x,stride_y,1],padding = "SAME")

# 设置池化层
def pool(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides = [1,2,2,1],padding = "SAME")

def average_pool(x):
    return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding="SAME")

def res_block(x,kernal_size_x,kernal_size_y,channel_in,channel_out):

    X_shortcut = x

    with tf.variable_scope("res_sub1"):
        res_w_conv1 = weight_variable([kernal_size_x,kernal_size_y,channel_in,channel_out/2])
        res_b_conv1 = bias_variable([channel_out/2])
        res_h_conv1 = tf.nn.relu(conv2d(x,res_w_conv1) + res_b_conv1)

    with tf.variable_scope("res_sub2"):
        res_w_conv2 = weight_variable([kernal_size_x,kernal_size_y,channel_out/2,channel_out])
        res_b_conv2 = bias_variable([channel_out])
        res_h_conv2 = tf.nn.relu(conv2d(res_h_conv1,res_w_conv2) + res_b_conv2)

    with tf.variable_scope("shortcut"):
        res_w_shortcut = weight_variable([1, 1, channel_in, channel_out])
        res_b_shortcut = bias_variable([channel_out])
        X_shortcut = conv2d(X_shortcut,res_w_shortcut) + res_b_shortcut
        res_add = tf.add(res_h_conv2, X_shortcut)
        res_b_shortcut = bias_variable([channel_out])
        res_add_result = tf.nn.relu(res_add + res_b_shortcut)

    return res_add_result

def reduction_block(x,channel_in,channel_out):

    # 一个3*3的卷积核
    with tf.variable_scope("reduction_second"):
        # 第一层
        w_second_conv1 = weight_variable([ 1, 1,channel_in, channel_out / 2])
        b_second_conv1 = bias_variable([channel_out /2])
        h_second_conv1 = tf.nn.relu(conv2d(x, w_second_conv1,1,1) + b_second_conv1)
        h_second_conv1 = tf.nn.lrn(h_second_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第二层
        w_second_conv2 = weight_variable([ 3, 1,channel_out / 2, channel_out / 2])
        b_second_conv2 = bias_variable([channel_out / 2])
        h_second_conv2 = tf.nn.relu(conv2d(h_second_conv1, w_second_conv2,2,1) + b_second_conv2)

        # 第三层
        w_second_conv3 = weight_variable([ 1, 3,channel_out / 2, channel_out / 2])
        b_second_conv3 = bias_variable([channel_out / 2])
        h_second_conv3 = tf.nn.relu(conv2d(h_second_conv2, w_second_conv3, 1, 2) + b_second_conv3)
        h_second_conv3 = tf.nn.lrn(h_second_conv3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 两个3*3的卷积核
    with tf.variable_scope("reduction_third"):
        # 第一层
        w_third_conv1 = weight_variable([ 1, 1,channel_in, channel_out / 2])
        b_third_conv1 = bias_variable([channel_out / 2])
        h_third_conv1 = tf.nn.relu(conv2d(x, w_third_conv1, 1, 1) + b_third_conv1)
        h_third_conv1 = tf.nn.lrn(h_third_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第二层 3*1 步长1,1
        w_third_conv2 = weight_variable([ 3, 1,channel_out / 2, channel_out / 2])
        b_third_conv2 = bias_variable([channel_out / 2])
        h_third_conv2 = tf.nn.relu(conv2d(h_third_conv1, w_third_conv2, 1, 1) + b_third_conv2)

        # 第三层 1*3 步长1,1
        w_third_conv3 = weight_variable([ 1, 3,channel_out / 2, channel_out / 2])
        b_third_conv3 = bias_variable([channel_out / 2])
        h_third_conv3 = tf.nn.relu(conv2d(h_third_conv2, w_third_conv3, 1, 1) + b_third_conv3)
        h_third_conv3 = tf.nn.lrn(h_third_conv3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第四层 3*1 步长1,1
        w_third_conv4 = weight_variable([ 3, 1,channel_out / 2, channel_out / 2])
        b_third_conv4 = bias_variable([channel_out / 2])
        h_third_conv4 = tf.nn.relu(conv2d(h_third_conv3, w_third_conv4, 2, 1) + b_third_conv4)

        # 第五层 1*3 步长1,1
        w_third_conv5 = weight_variable([ 1, 3,channel_out / 2, channel_out / 2])
        b_third_conv5 = bias_variable([channel_out / 2])
        h_third_conv5 = tf.nn.relu(conv2d(h_third_conv4, w_third_conv5, 1, 2) + b_third_conv5)
        h_third_conv5 = tf.nn.lrn(h_third_conv5, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 一个average_pooling层
    with tf.variable_scope("reduction_third"):
        h_forth_pool = pool(x)
    with tf.variable_scope("concat"):
        h_concat = tf.concat(3,[h_second_conv3,h_third_conv5,h_forth_pool])

    channel = channel_out + channel_in

    return channel, h_concat

if __name__ == "__main__":
    # 设置输入
    x = tf.placeholder(tf.float32,[None, 784])
    # 将其变为二维的图片,-1为x的第一维数量
    x_image = tf.reshape(x,[-1,28,28,1])

    channel1,h_reduc1 = reduction_block(x_image,1,32)
    channel2,h_reduc2 = reduction_block(h_reduc1, channel1, 64)

    # 配置全连接层1
    W_fc1 = weight_variable([7 * 7 * channel2, 1024])
    b_fc1 = bias_variable([1024])
    h2_pool_flat = tf.reshape(h_reduc2, shape=[-1, 7 * 7 * channel2])
    h_fc1 = tf.nn.relu(tf.matmul(h2_pool_flat, W_fc1) + b_fc1)

    # 配置dropout层
    keep_prob = tf.placeholder("float")
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # 配置全连接层2
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y_predict = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
    # 接受y_label
    y_label = tf.placeholder(tf.float32,[None,10])
    mnist = get_data()
    # 交叉熵
    cross_entropy = -tf.reduce_sum(y_label * tf.log(y_predict))
    # 梯度下降法
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
    # train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy)
    # 求准确率
    correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_label, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  # 精确度计算
    sess = tf.InteractiveSession()
    sess.run(tf.initialize_all_variables())
    for i in range(20000):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:  # 训练100次,验证一次
            train_acc = accuracy.eval(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 1.0})
            print 'step %d, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y %g' % (i, train_acc)
            train_step.run(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 0.5})

    point = 0
    for i in xrange(10):
        testSet = mnist.test.next_batch(50)
        point += accuracy.eval(feed_dict={ x: testSet[0], y_label: testSet[1], keep_prob: 1.0})
    print "test accuracy:"+str(point/10)

效果:

step 19600, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.96
step 19700, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.88
step 19800, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.9
step 19900, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.92
test accuracy:0.907999986410141

4.使用3*3的卷积核进行reduction块:

# -*- coding: utf-8 -*-
# 这里使用结合SEnet、inception结构和shortcut结构的卷积神经网络,所以将784reshape成28*28的矩阵
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

def get_data():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    return mnist

# 设置权重函数
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(initial)

# 设置阈值函数
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

# 设置卷积层
def conv2d(x,W,stride_x,stride_y):
    return tf.nn.conv2d(x,W,strides=[1,stride_x,stride_y,1],padding = "SAME")

# 设置池化层
def pool(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides = [1,2,2,1],padding = "SAME")

def average_pool(x):
    return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding="SAME")

def res_block(x,kernal_size_x,kernal_size_y,channel_in,channel_out):

    X_shortcut = x

    with tf.variable_scope("res_sub1"):
        res_w_conv1 = weight_variable([kernal_size_x,kernal_size_y,channel_in,channel_out/2])
        res_b_conv1 = bias_variable([channel_out/2])
        res_h_conv1 = tf.nn.relu(conv2d(x,res_w_conv1) + res_b_conv1)

    with tf.variable_scope("res_sub2"):
        res_w_conv2 = weight_variable([kernal_size_x,kernal_size_y,channel_out/2,channel_out])
        res_b_conv2 = bias_variable([channel_out])
        res_h_conv2 = tf.nn.relu(conv2d(res_h_conv1,res_w_conv2) + res_b_conv2)

    with tf.variable_scope("shortcut"):
        res_w_shortcut = weight_variable([1, 1, channel_in, channel_out])
        res_b_shortcut = bias_variable([channel_out])
        X_shortcut = conv2d(X_shortcut,res_w_shortcut) + res_b_shortcut
        res_add = tf.add(res_h_conv2, X_shortcut)
        res_b_shortcut = bias_variable([channel_out])
        res_add_result = tf.nn.relu(res_add + res_b_shortcut)

    return res_add_result

def reduction_block(x,channel_in,channel_out):

    # 一个3*3的卷积核
    with tf.variable_scope("reduction_second"):
        # 第一层
        w_second_conv1 = weight_variable([ 1, 1,channel_in, channel_out / 2])
        b_second_conv1 = bias_variable([channel_out /2])
        h_second_conv1 = tf.nn.relu(conv2d(x, w_second_conv1,1,1) + b_second_conv1)
        h_second_conv1 = tf.nn.lrn(h_second_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第二层
        w_second_conv2 = weight_variable([ 3, 3,channel_out / 2, channel_out / 2])
        b_second_conv2 = bias_variable([channel_out / 2])
        h_second_conv2 = tf.nn.relu(conv2d(h_second_conv1, w_second_conv2,2,2) + b_second_conv2)
        h_second_conv2 = tf.nn.lrn(h_second_conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 两个3*3的卷积核
    with tf.variable_scope("reduction_third"):
        # 第一层
        w_third_conv1 = weight_variable([ 1, 1,channel_in, channel_out / 2])
        b_third_conv1 = bias_variable([channel_out / 2])
        h_third_conv1 = tf.nn.relu(conv2d(x, w_third_conv1, 1, 1) + b_third_conv1)
        h_third_conv1 = tf.nn.lrn(h_third_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第二层 3*3 步长1,1
        w_third_conv2 = weight_variable([ 3, 3,channel_out / 2, channel_out / 2])
        b_third_conv2 = bias_variable([channel_out / 2])
        h_third_conv2 = tf.nn.relu(conv2d(h_third_conv1, w_third_conv2, 1, 1) + b_third_conv2)
        h_third_conv2 = tf.nn.lrn(h_third_conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

        # 第三层 3*3 步长1,1
        w_third_conv3 = weight_variable([ 3, 3,channel_out / 2, channel_out / 2])
        b_third_conv3 = bias_variable([channel_out / 2])
        h_third_conv3 = tf.nn.relu(conv2d(h_third_conv2, w_third_conv3, 2, 2) + b_third_conv3)
        h_third_conv3 = tf.nn.lrn(h_third_conv3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 一个average_pooling层
    with tf.variable_scope("reduction_third"):
        h_forth_pool = pool(x)
    with tf.variable_scope("concat"):
        h_concat = tf.concat(3,[h_second_conv2,h_third_conv3,h_forth_pool])

    channel = channel_out + channel_in

    return channel, h_concat

if __name__ == "__main__":
    # 设置输入
    x = tf.placeholder(tf.float32,[None, 784])
    # 将其变为二维的图片,-1为x的第一维数量
    x_image = tf.reshape(x,[-1,28,28,1])

    channel1,h_reduc1 = reduction_block(x_image,1,32)
    channel2,h_reduc2 = reduction_block(h_reduc1, channel1, 64)


    # 配置全连接层1
    W_fc1 = weight_variable([7 * 7 * channel2, 1024])
    b_fc1 = bias_variable([1024])
    h2_pool_flat = tf.reshape(h_reduc2, shape=[-1, 7 * 7 * channel2])
    h_fc1 = tf.nn.relu(tf.matmul(h2_pool_flat, W_fc1) + b_fc1)

    # 配置dropout层
    keep_prob = tf.placeholder("float")
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # 配置全连接层2
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y_predict = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
    # 接受y_label
    y_label = tf.placeholder(tf.float32,[None,10])
    mnist = get_data()
    # 交叉熵
    cross_entropy = -tf.reduce_sum(y_label * tf.log(y_predict))
    # 梯度下降法
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
    # train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy)
    # 求准确率
    correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_label, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  # 精确度计算
    sess = tf.InteractiveSession()
    sess.run(tf.initialize_all_variables())
    for i in range(20000):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:  # 训练100次,验证一次
            train_acc = accuracy.eval(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 1.0})
            print 'step %d, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y %g' % (i, train_acc)
            train_step.run(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 0.5})

    point = 0
    for i in xrange(10):
        testSet = mnist.test.next_batch(50)
        point += accuracy.eval(feed_dict={ x: testSet[0], y_label: testSet[1], keep_prob: 1.0})
    print "test accuracy:"+str(point/10)

准确率:

step 19500, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.92
step 19600, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.94
step 19700, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.92
step 19800, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.9
step 19900, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.9
test accuracy:0.9380000054836273

5.小结:
所以总结一下,使用33的卷积核效果还是要比31和1*3的卷积核效果要好一些的。

  • 2
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值