自编码器AutoEncoder之MNIST数据压缩

40 篇文章 0 订阅
34 篇文章 0 订阅
import tensorflow.compat.v1 as tf
import tensorflow as tf2
tf.disable_v2_behavior()
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist

# Hyper Parameters
learn_rate=1e-3
batch_size=128
size1=256
size2=128
pic_size=28*28

# tensorflow placeholders
xs = tf.placeholder(tf.float32, [None, pic_size])

#func=tf2.nn.sigmoid
func=tf2.nn.relu

output=tf.layers.dense(inputs=xs,units=size1, activation=func)
output=tf.layers.dense(inputs=output,units=size2, activation=func)

output=tf.layers.dense(inputs=output,units=size1, activation=func)
output=tf.layers.dense(inputs=output,units=pic_size, activation=func)

loss = tf.losses.mean_squared_error(labels=xs, predictions=output)
train_op = tf.train.AdamOptimizer(learn_rate).minimize(loss)


(x_train, y_train), (x_test, y_test)=mnist.load_data()
x_train=(x_train/255).reshape(x_train.shape[0],-1)

sess = tf.Session()
sess.run(tf.global_variables_initializer())     # initialize var in graph

for i in range(2000):
    random_index = np.random.choice(x_train.shape[0], batch_size, replace=False)
    batch_xs= x_train[random_index]
    out_,los_,op_=sess.run([output,loss,train_op], feed_dict={xs: batch_xs})
    if i % 50 == 0:
        print(i,los_)

f,a=plt.subplots(2,10,figsize=(10,2))
for i in range(10):
    a[0][i].imshow(out_[i].reshape(28,28))
    a[1][i].imshow(batch_xs[i].reshape(28,28))
plt.show()

relu:

在这里插入图片描述

sigmoid:在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值