# 一.卷积神经网络

https://www.cnblogs.com/charlotte77/p/7759802.html

# 二.代码实现卷积神经网络提升mnist的识别率

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

x=tf.placeholder(tf.float32,[None,784])
y_=tf.placeholder(tf.float32,[None,10])

x_image=tf.reshape(x,[-1,28,28,1])

def weight_variable(shape):
initial=tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)

def bias_variable(shape):
initial=tf.constant(0.1,shape=shape)
return tf.Variable(initial)

def conv2d(x,W):

def max_pool_2x2(x):


W_conv1=weight_variable([5,5,1,32])
b_conv1=bias_variable([32])
h_conv1=tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)
h_pool1=max_pool_2x2(h_conv1)

W_conv2=weight_variable([5,5,32,64])
b_conv2=bias_variable([64])
h_conv2=tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)
h_pool2=max_pool_2x2(h_conv2)

W_fc1=weight_variable([7*7*64,1024])
b_fc1=bias_variable([1024])
h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64])
h_fc1=tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)

keep_prob=tf.placeholder(tf.float32)
h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)

dropout是指在深度学习网络的训练过程中，对于神经网络单元，按照一定的概率将其暂时从网络中丢弃。注意是暂时！dropout是CNN中是防止过拟合、提高效果的一个大杀器。

W_fc2=weight_variable([1024,10])
b_fc2=bias_variable([10])
y_conv=tf.matmul(h_fc1_drop,W_fc2)+b_fc2

cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y_conv))
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(2001):
#训练模型
batch_xs,batch_ys=mnist.train.next_batch(50)
sess.run(train_step,feed_dict={x:batch_xs,y_:batch_ys,keep_prob:0.5})

if(i%100 == 0):
test_acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y_:mnist.test.labels,keep_prob:1.0})
print('Iter ' + str(i) + ', Testing Accuary= ' + str(test_acc))

Iter 0, Testing Accuary= 0.1715
Iter 100, Testing Accuary= 0.8452
Iter 200, Testing Accuary= 0.9048
Iter 300, Testing Accuary= 0.9318
Iter 400, Testing Accuary= 0.9411
Iter 500, Testing Accuary= 0.9484
Iter 600, Testing Accuary= 0.95
Iter 700, Testing Accuary= 0.9555
Iter 800, Testing Accuary= 0.9623
Iter 900, Testing Accuary= 0.9599
Iter 1000, Testing Accuary= 0.9639
Iter 1100, Testing Accuary= 0.9669
Iter 1200, Testing Accuary= 0.9692
Iter 1300, Testing Accuary= 0.9695
Iter 1400, Testing Accuary= 0.971
Iter 1500, Testing Accuary= 0.9729
Iter 1600, Testing Accuary= 0.9723
Iter 1700, Testing Accuary= 0.9751
Iter 1800, Testing Accuary= 0.9766
Iter 1900, Testing Accuary= 0.9779
Iter 2000, Testing Accuary= 0.9773