# MNIST数据可视化

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
print (mnist.train.images.shape)
print (mnist.test.images.shape)
'''
(55000, 784)
(10000, 784)
'''


784是28×28，所以可以知道训练集是55000张28×28大小的图片，测试机是10000张28×28大小的图片。

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
plt.imshow(mnist.train.images[0,:].reshape(28,28))
plt.show()


# tensorflow实现

learning_rate = 0.01 #梯度下降的学习率
num_steps = 30000  #一个batch为一轮，这是训练的轮数
batch_size = 256  #每个batch的大小
display_step = 1000  #每隔1000轮输出一下loss，让我们直观的看看loss怎么下降

num_hidden_1=  256  #第一个隐藏层的神经元数目，因为对称，也是第四个隐藏层神经元个数
num_hidden_2 = 128  #第二个隐藏层神经元数目，因为对称，也是第三个隐藏层神经元个数
num_input = 784 #输入的特征维度，我们刚才看到输入的矩阵是[55000,784]，其中55000为输入的图片数目，784为像素，也就是我们的特征


X = tf.placeholder(tf.float32,[None,num_input])
weights ={
"encoder_h1":tf.Variable(tf.random_normal([num_input,num_hidden_1])),
"encoder_h2":tf.Variable(tf.random_normal([num_hidden_1,num_hidden_2])),
"decoder_h1":tf.Variable(tf.random_normal([num_hidden_2,num_hidden_1])),
"decoder_h2":tf.Variable(tf.random_normal([num_hidden_1,num_input]))
}
biases = {
"encoder_b1":tf.Variable(tf.random_normal([num_hidden_1])),
"encoder_b2":tf.Variable(tf.random_normal([num_hidden_2])),
"decoder_b1":tf.Variable(tf.random_normal([num_hidden_1])),
"decoder_b2":tf.Variable(tf.random_normal([num_input]))
}


def encoder(X):
return layer_2
def decoder(X):
return layer_2
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)


y_train = decoder_op
y_true = X
loss = tf.reduce_mean(tf.pow(y_true-y_train,2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)


init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(1,num_steps+1):
X_batch,_ = mnist.train.next_batch(batch_size)
_,l = sess.run([optimizer,loss],feed_dict={X:X_batch})
if i%display_step==0 or i==1:
print ("MiniBatch loss after %i is %f" %(i,l))
n = 4
canvas_orig = np.empty((28*n,28*n))
canvas_recon = np.empty((28*n,28*n))
for i  in range(n):
X_batch,_ = mnist.test.next_batch(n)
g = sess.run(decoder_op,feed_dict={X:X_batch})
for j in range(n):
canvas_orig[i*28:(i+1)*28,j*28:(j+1)*28] = X_batch[j].reshape(28,28)
for j in range(n):
canvas_recon[i*28:(i+1)*28,j*28:(j+1)*28] = g[j].reshape(28,28)
print ("Original Images")
plt.figure(figsize=(n,n))
plt.imshow(canvas_orig,origin="upper",cmap="gray")
plt.show()
print ("Reconstructed Images")
plt.figure(figsize=(n,n))
plt.imshow(canvas_recon,origin="upper",cmap="gray")
plt.show()


MiniBatch loss after 1 is 0.439250
MiniBatch loss after 1000 is 0.118592
MiniBatch loss after 2000 is 0.101509
MiniBatch loss after 3000 is 0.095219
MiniBatch loss after 4000 is 0.089146
MiniBatch loss after 5000 is 0.086151
MiniBatch loss after 6000 is 0.083348
MiniBatch loss after 7000 is 0.078410
MiniBatch loss after 8000 is 0.077592
MiniBatch loss after 9000 is 0.071425
MiniBatch loss after 10000 is 0.069894
MiniBatch loss after 11000 is 0.066917
MiniBatch loss after 12000 is 0.065995
MiniBatch loss after 13000 is 0.064769
MiniBatch loss after 14000 is 0.062543
MiniBatch loss after 15000 is 0.062631
MiniBatch loss after 16000 is 0.059229
MiniBatch loss after 17000 is 0.058978
MiniBatch loss after 18000 is 0.055369
MiniBatch loss after 19000 is 0.056187
MiniBatch loss after 20000 is 0.050991
MiniBatch loss after 21000 is 0.051912
MiniBatch loss after 22000 is 0.051403
MiniBatch loss after 23000 is 0.048605
MiniBatch loss after 24000 is 0.048961
MiniBatch loss after 25000 is 0.048916
MiniBatch loss after 26000 is 0.048469
MiniBatch loss after 27000 is 0.048156
MiniBatch loss after 28000 is 0.046060
MiniBatch loss after 29000 is 0.045244
MiniBatch loss after 30000 is 0.045483