tensorflow实现自编码器

# -*- coding:utf-8 -*-
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def xavier_init(fan_in, fan_out, constant = 1):
    low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
    high = constant * np.sqrt(6.0 / (fan_in + fan_out))
    return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype = tf.float32)

class AdditiveGaussianNoiseAutoencoder(object):
    def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer(), scale=0.1):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function
        self.scale = tf.placeholder(tf.float32)
        self.training_scale = scale
        network_weights = self._initialize_weights()
        self.weights = network_weights

        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(
            self.x + scale * tf.random_normal((n_input,)),
            self.weights["w1"]), self.weights["b1"]
        ))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        self.optimiezer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init)

    def _initialize_weights(self):
        all_weights = dict()
        all_weights["w1"] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
        all_weights["b1"] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32))
        all_weights["w2"] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype = tf.float32))
        all_weights["b2"] = tf.Variable(tf.zeros([self.n_input], dtype = tf.float32))
        return all_weights

    def paritial_fit(self, X):
        cost, opt = self.sess.run((self.cost, self.optimiezer),
                                  feed_dict={self.x:X,self.scale:self.training_scale})
        return cost

    def calc_total_cost(self, X):
        return self.sess.run(self.cost, feed_dict={self.x:X, self.scale:self.training_scale})

    def transform(self, X):
        return self.sess.run(self.hidden, feed_dict={self.x:X, self.scale:self.training_scale})

    def generate(self, hidden = None):
        if hidden is None:
            hidden = np.random.normal(size=self.weights["b1"])
        return self.sess.run(self.reconstruction,feed_dict={self.hidden:hidden})

    def reconstruct(self, X):
        return self.sess.run(self.reconstruction, feed_dict={self.x:X, self.scale:self.training_scale})

    def getWeights(self):
        return self.sess.run(self.weights["w1"])

    def getBiases(self):
        return self.sess.run(self.weights["b1"])

mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
def standard_scale(X_train, X_test):
    preprocessor = prep.StandardScaler().fit(X_train)
    X_train = preprocessor.transform(X_train)
    X_test = preprocessor.transform(X_test)
    return X_train, X_test

def get_random_block_from_data(data, batch_size):
    start_index = np.random.randint(0, len(data) - batch_size)
    return data[start_index:(start_index + batch_size)]

X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1

autoencoder = AdditiveGaussianNoiseAutoencoder(n_input=784, n_hidden=200, transfer_function=tf.nn.softplus,
                                               optimizer=tf.train.AdamOptimizer(learning_rate=0.001),scale=0.01)

for epoch in range(training_epochs):
    avg_cost = 0
    total_batch = int(n_samples / batch_size)
    for i in range(total_batch):
        batch_xs = get_random_block_from_data(X_train, batch_size)
        cost = autoencoder.paritial_fit(batch_xs)
        avg_cost += cost / n_samples * batch_size

    if epoch % display_step == 0:
        print("Epoch:", '%04d' %(epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Total cost:" + str(autoencoder.calc_total_cost(X_test)))

Epoch: 0001 cost= 18638.615887500
Epoch: 0002 cost= 12971.389887500
Epoch: 0003 cost= 10659.026995455
Epoch: 0004 cost= 9836.657871023
Epoch: 0005 cost= 9484.297226705
Epoch: 0006 cost= 9271.624574432
Epoch: 0007 cost= 8899.998528409
Epoch: 0008 cost= 9324.328033523
Epoch: 0009 cost= 8969.404742045
Epoch: 0010 cost= 8170.966078409
Epoch: 0011 cost= 8705.253703977
Epoch: 0012 cost= 8673.962509659
Epoch: 0013 cost= 8482.184548864
Epoch: 0014 cost= 8119.183536932
Epoch: 0015 cost= 8492.594153409
Epoch: 0016 cost= 7844.617388636
Epoch: 0017 cost= 7948.973987500
Epoch: 0018 cost= 8431.035121591
Epoch: 0019 cost= 7783.220775568
Epoch: 0020 cost= 7945.380931250
Total cost:685076.2

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值