完整自编码器

import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt

mnist=input_data.read_data_sets("mnist",one_hot=True);
learning_rate=0.01;
n_hidden_1=256;
n_hidden_2=64;
n_hidden_3=16;
n_hidden_4=2;
n_input=784;
x=tf.placeholder(tf.float32,[None,n_input]);
y=x;

weights={
    "encoder_h1":tf.Variable(tf.random_normal([n_input,n_hidden_1])),
    "encoder_h2":tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2])),
    "encoder_h3":tf.Variable(tf.random_normal([n_hidden_2,n_hidden_3])),
    "encoder_h4":tf.Variable(tf.random_normal([n_hidden_3,n_hidden_4])),
    "decoder_h1":tf.Variable(tf.random_normal([n_hidden_4,n_hidden_3])),
    "decoder_h2":tf.Variable(tf.random_normal([n_hidden_3,n_hidden_2])),
    "decoder_h3":tf.Variable(tf.random_normal([n_hidden_2,n_hidden_1])),
    "decoder_h4":tf.Variable(tf.random_normal([n_hidden_1,n_input]))
}
biases={
    "encoder_b1":tf.Variable(tf.random_normal([n_hidden_1])),
    "encoder_b2":tf.Variable(tf.random_normal([n_hidden_2])),
    "encoder_b3":tf.Variable(tf.random_normal([n_hidden_3])),
    "encoder_b4":tf.Variable(tf.random_normal([n_hidden_4])),
    "decoder_b1":tf.Variable(tf.random_normal([n_hidden_3])),
    "decoder_b2":tf.Variable(tf.random_normal([n_hidden_2])),
    "decoder_b3":tf.Variable(tf.random_normal([n_hidden_1])),
    "decoder_b4":tf.Variable(tf.random_normal([n_input]))
}

def encoder(x):
    layer_1=tf.nn.sigmoid(tf.add(tf.matmul(x,weights["encoder_h1"]),biases["encoder_b1"]));
    layer_2=tf.nn.sigmoid(tf.add(tf.matmul(layer_1,weights["encoder_h2"]),biases["encoder_b2"]));
    layer_3=tf.nn.sigmoid(tf.add(tf.matmul(layer_2,weights["encoder_h3"]),biases["encoder_b3"]));
    layer_4=tf.nn.sigmoid(tf.add(tf.matmul(layer_3,weights["encoder_h4"]),biases["encoder_b4"]));
    return layer_4;


def decoder(x):
    layer_1=tf.nn.sigmoid(tf.add(tf.matmul(x,weights["decoder_h1"]),biases["decoder_b1"]));
    layer_2=tf.nn.sigmoid(tf.add(tf.matmul(layer_1,weights["decoder_h2"]),biases["decoder_b2"]));
    layer_3=tf.nn.sigmoid(tf.add(tf.matmul(layer_2,weights["decoder_h3"]),biases["decoder_b3"]));
    layer_4=tf.nn.sigmoid(tf.add(tf.matmul(layer_3,weights["decoder_h4"]),biases["decoder_b4"]));
    return layer_4;

encoder_op=encoder(x);
y_pred=decoder(encoder_op);

cost=tf.reduce_mean(tf.pow(y-y_pred,2));
optimizer=tf.train.AdamOptimizer(learning_rate).minimize(cost);

training_epochs=20;
batch_size=256;
display_step=1;

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer());
    total_batch=(int)(mnist.train.num_examples/batch_size);
    for epoch in range(training_epochs):
        for i in range(total_batch):
            batch_xs,batch_ys=mnist.train.next_batch(batch_size);
            _,c=sess.run([optimizer,cost],feed_dict={x:batch_xs});
        if epoch % display_step==0:
            print("Epoch","%04d"%(epoch+1),"cost=","{:.9f}".format(c));
    print("Finished");
    
    #可视化
    show_num=10;
    encoder_decoder=sess.run(y_pred,feed_dict={x:mnist.test.images[:show_num]});
    f,a=plt.subplots(2,10,figsize=(10,2));
    for i in range(show_num):
        a[0][i].imshow(np.reshape(mnist.test.images[i],(28,28)));
        a[1][i].imshow(np.reshape(encoder_decoder[i],(28,28)));
    plt.show();
    
    aa=[np.argmax(l) for l in mnist.test.labels];
    encoder_result=sess.run(encoder_op,feed_dict={x:mnist.test.images});
    plt.scatter(encoder_result[:,0],encoder_result[:,1],c=aa);
    print(encoder_result[:,0].shape);
    print(encoder_result[:,1].shape);
    plt.colorbar();
    plt.show();
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值