import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
#number 1 to 10 data
mnist=input_data.read_data_sets('MNIST_data',one_hot=True)
def compute_accuracy(v_xs,v_ys):
global prediction #全局变量
y_pre=sess.run(prediction,feed_dict={xs:v_xs,keep_prob:1.})
correct_prediction=tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))#计算database中多少个预测对的
result=sess.run(accuracy,feed_dict={xs:v_xs,ys:v_ys})
return result
def weight_varible(shape):
initial=tf.truncated_normal(shape,stddev=0.1)#截断的产生正态分布的随机数,常在CNN中使用
return tf.Variable(initial)
def bias_varible(shape):
initial=tf.constant(0.1,shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
#strides=[1,x_movemenr,y_movement,1],must have strides[0]=strides[3]=1
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
#ksize:池化窗口的大小,一般为[1,height,width,1],因为不想在batch和和channel上做文章,所以第一个和最后一个维度设为1
#strides同conv layer,x轴和y轴设置为2可以压缩图片
#def placeholder for inputs to network
xs=tf.placeholder(tf.float32,[None, 784])#没个sample大小为784,所给例子中每个图片有784个像素点
ys=tf.placeholder(tf.float32,[None,10])
keep_prob=tf.placeholder(tf.float32)
x_image=tf.reshape(xs,[-1,28,28,1])
#-1表示reshape的形状看情况而定,reshape之后的矩阵为nx28x28(n张),通道为1
#print(x_image.shape)#[n,28,28,1]
##conv1 layer##
W_conv1=weight_varible([5,5,1,32])
#patch=5x5,in size=1,channel,是image的厚度,out size=32,是卷积核的厚度
b_conv1=bias_varible([32])
h_conv1=tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)
#hidden layer 1,output size=28x28x32
h_pool1=max_pool_2x2(h_conv1)#output size=14x14x32
#注意same只有在strides=1时才输出图片不变
##conv2 layer##
W_conv2=weight_varible([5,5,32,64])
b_conv2=bias_varible([64])
h_conv2=tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)
#output size=14x14x64
h_pool2=max_pool_2x2(h_conv2)#output size=7x7x64
##func1 layer##
W_fc1=weight_varible([7*7*64,1024])#1024是自己定义的
b_fc1=bias_varible([1024])
h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64])
#拍平成[n,7*7*64]
h_fc1=tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)
h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob=0.5)
##fun2 layer##
W_fc2=weight_varible([1024,10])#10是判断0-9的数字
b_fc2=bias_varible([10])
prediction=tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2)
#the error between prediction and the real data
cross_entropy=tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))
train_step=tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
sess=tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(1000):
batch_xs,batch_ys=mnist.train.next_batch(100)#学习l00个
sess.run(train_step,feed_dict={xs:batch_xs,ys:batch_ys})
if i%50==0:
print(compute_accuracy(mnist.test.images,mnist.test.labels))
CNN,2 conv layer,2 pooling layer
最新推荐文章于 2022-11-28 13:30:44 发布