卷积神经网络,将传统神经网络前面加上若干个卷积层,池化层
然后是全连接层,到全连接层就和传统神经网络一样了
卷积层和池化层能够减少传统多层神经网络神经元和调节参数过多以及对样本数要求很多的不足
#-*-coding:utf-8-*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
minist = input_data.read_data_sets('MNIST_data',one_hot=True)
#每个批次的大小
batch_size = 100
#计算一共有多少个批次
n_batch = minist.train.num_examples//batch_size
#初始化权值
def weight_variable(shape):
initial = tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
#初始化偏置值
def biais_variable(shape):
initial = tf.constant(0.1,shape=shape)
return tf.Variable(initial)
#卷积层
def conv2d(x,W):
# x input tansor of shape [batch,in_height,in_width,in_channels]
# W filter / kernel tansor of shape [filter_height,filter_width,in_channels,out_channels]
# strides[0] = strides[3] = 1,strides[1]代表x方向的步长,strices[2]代表y方向上的步长
# padding A string from SAME , VALID
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
#池化层
def max_pool_2x2(x):
# ksize [1,x,y,1]
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
#定义两个placeholder
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#改变x 的格式为4D的向量 [batch,in_height,in_width,in_channels]
x_image = tf.reshape(x,[-1,28,28,1])
#初始化第一个卷积层的权值和偏置值
#5*5 的卷积窗口,32个卷积核从一个平面抽取特征
w_conv1 = weight_variable([5,5,1,32])
#每一个卷积核一个偏置值
b_conv1 = biais_variable([32])
#把 x_image 和权值向量进行卷积,再加上偏置值,最后应用于relu 激活函数
h_conv1 = tf.nn.relu(conv2d(x_image,w_conv1)+b_conv1)
#进行池化
h_conv1 = max_pool_2x2(h_conv1)
#初始化第二个卷积层
w_conv2 = weight_variable([5,5,32,64])
b_conv2 = biais_variable([64])
#进行卷积
h_conv2 = tf.nn.relu(conv2d(h_conv1,w_conv2)+b_conv2)
h_conv2 = max_pool_2x2(h_conv2)
#28*28的图片第一次卷积后还是28*28的图片,第一次池化后变成14*14
#第二次卷积后为14*14,第二次池化后为7*7
#经过上面操作后变成64张7*7的平面
#初始化第一个全连接层的权值
#上一层有7*7*64个神经元,全连接层有1024个神经元
w_fc1 = weight_variable([7*7*64,1024])
b_fc1 = biais_variable([1024])
#把池化层的输出扁平化为一维
h_pool2_flat = tf.reshape(h_conv2,[-1,7*7*64])
#求第一个全连接层的输出
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,w_fc1)+b_fc1)
#用keep_prob 来表示神经元的输出概率
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
#初始化第二个全连接层
w_fc2 = weight_variable([1024,10])
b_fc2 = biais_variable([10])
#求第二个全连接层的输出
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop,w_fc2)+b_fc2)
#交叉熵代价函数
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))
#使用AdamOptimizer进行优化
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
#结果存放在一个布尔列表里面
correct_prediction = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
#计算准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(21):
for batch in range(n_batch):
batch_xs,batch_ys = minist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.7})
acc = sess.run(accuracy,feed_dict={x:minist.test.images,y:minist.test.labels,keep_prob:1.0})
print('Iteractor : '+str(epoch)+' acc: '+str(acc))
运行结果:
一开始就有很高的准确率
收敛速度也很快
最后能够达到99%