import tensorflow.compat.v1 as tf
import tensorflow as tf2
tf.disable_v2_behavior()
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
# Hyper Parameters
learn_rate=1e-3
batch_size=128
size1=256
size2=128
pic_size=28*28
# tensorflow placeholders
xs = tf.placeholder(tf.float32, [None, pic_size])
#func=tf2.nn.sigmoid
func=tf2.nn.relu
output=tf.layers.dense(inputs=xs,units=size1, activation=func)
output=tf.layers.dense(inputs=output,units=size2, activation=func)
output=tf.layers.dense(inputs=output,units=size1, activation=func)
output=tf.layers.dense(inputs=output,units=pic_size, activation=func)
loss = tf.losses.mean_squared_error(labels=xs, predictions=output)
train_op = tf.train.AdamOptimizer(learn_rate).minimize(loss)
(x_train, y_train), (x_test, y_test)=mnist.load_data()
x_train=(x_train/255).reshape(x_train.shape[0],-1)
sess = tf.Session()
sess.run(tf.global_variables_initializer()) # initialize var in graph
for i in range(2000):
random_index = np.random.choice(x_train.shape[0], batch_size, replace=False)
batch_xs= x_train[random_index]
out_,los_,op_=sess.run([output,loss,train_op], feed_dict={xs: batch_xs})
if i % 50 == 0:
print(i,los_)
f,a=plt.subplots(2,10,figsize=(10,2))
for i in range(10):
a[0][i].imshow(out_[i].reshape(28,28))
a[1][i].imshow(batch_xs[i].reshape(28,28))
plt.show()
自编码器AutoEncoder之MNIST数据压缩
最新推荐文章于 2024-01-09 19:41:55 发布