import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import cv2
import numpy as np
# import os
mnist = input_data.read_data_sets('./1/', one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
x1 = tf.arg_max(y,1)
y1 = tf.arg_max(y_,1)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
saver = tf.train.Saver()
saver.restore(sess, "./SoftmaxSaver/model.ckpt") # restore the softmax model
for i in range(10):
# bat_x, bat_y = mnist.test.next_batch(1)
bat_x, bat_y = mnist.test.next_batch(1)
print len(bat_x[0])
img = np.reshape(bat_x,(28,28))
cv2.imshow("s",img)
cv2.waitKey()
print "forecast %g"%sess.run(x1, feed_dict={x: bat_x}) # return the forecast(predictable) labels
print "accurate %d"%sess.run(y1,feed_dict={y_: bat_y}) # return the accurate labels
# x2 = tf.arg_max(bat_y, 1)
# print x2
# print bat_y
correct_prediction = tf.equal(tf.arg_max(y, 1), tf.arg_max(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print sess.run(x1,feed_dict={x:mnist.test.images})
print sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})