import tensorflow as tfw
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def add_layer(inputs,in_size,out_size,activation_function = None):
Weights = tfw.Variable(tfw.random_normal([in_size,out_size]))
biases = tfw.Variable(tfw.zeros([1,out_size]) + 0.1)
Wx_plus_b = tfw.matmul(inputs,Weights) +biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b,)
return outputs
def compute_accuracy(v_xs,v_ys):
global prediction
v_xs = v_xs.astype(np.float32)
y_pre = sess.run(prediction,feed_dict = {xs:v_xs})
correct_prediction = tfw.equal(tfw.argmax(y_pre,1),tfw.argmax(v_ys,1))
accuracy = tfw.reduce_mean(tfw.cast(correct_prediction,tfw.float32))
result = sess.run(accuracy,feed_dict ={xs:v_xs,ys:v_ys})
return result
xs= tfw.placeholder(tfw.float32,[None,784])
ys= tfw.placeholder(tfw.float32,[None,10])
prediction = add_layer(xs,784,10,activation_function = tfw.nn.softmax)
cross_entropy =tfw.reduce_mean(-tfw.reduce_sum(ys*tfw.log(prediction),reduction_indices = [1]))
train_step = tfw.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess =tfw.Session()
sess.run(tfw.initialize_all_variables())
for i in range(1000):
batch_xs,batch_ys = mnist.train.next_batch(100)
batch_xs = batch_xs.astype(np.float32)
batch_ys = batch_ys.astype(np.float32)
sess.run(train_step,feed_dict = {xs:batch_xs,ys:batch_ys})
if i%50 == 0:
print(compute_accuracy(mnist.test.images,mnist.test.labels))
Extracting MNIST_data\train-images-idx3-ubyte.gz
Extracting MNIST_data\train-labels-idx1-ubyte.gz
Extracting MNIST_data\t10k-images-idx3-ubyte.gz
Extracting MNIST_data\t10k-labels-idx1-ubyte.gz
0.0964
0.6393
0.747
0.7878
0.8094
0.82
0.8315
0.8421
0.8449
0.8527
0.8542
0.8595
0.8581
0.8631
0.8677
0.8723
0.872
0.872
0.8774
0.8742