tensorflow实现自编码器

DBNs(Deep Belief Networks)由多个限制玻尔兹曼机(Restricted Boltzmann Machines)层组成。
先用自编码器的方法进行无监督的预训练,提取特征并初始化权重,然后使用标注信息进行监督式的训练。当然自编码器的作用不仅局限于给监督训练做预训练,直接使用自编码器进行特征提取和分析也是可以的。

import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def xavier_init(fan_in, fan_out, constant=1):
    low = -constant*np.sqrt(6.0/(fan_in+fan_out))
    high = constant*np.sqrt(6.0/(fan_in+fan_out))
    return tf.random_uniform((fan_in,fan_out),
                             minval=low, maxval=high,
                             dtype=tf.float32)
class AdditiveGaussianNoiseAutoencoder():
    def __init__(self,n_input,n_hidden,transfer_function=tf.nn.softplus,
                 optimizer=tf.train.AdamOptimizer(),scale=0.1):
        self.n_input=n_input
        self.n_hidden=n_hidden
        self.transfer=transfer_function
        self.scale=tf.placeholder(tf.float32)
        self.training_scale=scale
        network_weights = self._initialize_weights()
        self.weights=network_weights

        self.x=tf.placeholder(tf.float32,[None,self.n_input])
        self.hidden=self.transfer(tf.add(
            tf.matmul(self.x+scale*tf.random_normal((n_input,)),#输入要加上噪声扰动
                      self.weights['w1']),self.weights['b1']))
        self.reconstruction=tf.add(tf.matmul(self.hidden,self.weights['w2']),
                                   self.weights['b2'])
        self.cost=0.5*tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction,
                                                       self.x), 2.0))
        self.optimizer=optimizer.minimize(self.cost)
        init=tf.global_variables_initializer()
        self.sess=tf.Session()
        self.sess.run(init)
    def _initialize_weights(self):
        all_weights=dict()
        all_weights['w1']=tf.Variable(xavier_init(self.n_input,self.n_hidden))
        all_weights['b1']=tf.Variable(tf.zeros([self.n_hidden],
                                               dtype=tf.float32))
        all_weights['w2']=tf.Variable(tf.zeros([self.n_hidden,self.n_input],
                                               dtype=tf.float32))
        all_weights['b2']=tf.Variable(tf.zeros([self.n_input],
                                               dtype=tf.float32))
        return all_weights
    def partial_fit(self,X):
        cost, opt=self.sess.run((self.cost,self.optimizer),
                                feed_dict={self.x: X,
                                           self.scale: self.training_scale})
        return cost
    def calc_total_cost(self,X):
        return self.sess.run(self.cost,
                             feed_dict={self.x :X,
                                        self.scale: self.training_scale})
    def transform(self,X):
        return self.sess.run(self.hidden,
                             feed_dict={self.x :X,
                                        self.scale: self.training_scale})
    def generate(self,hidden=None):
        if hidden is None:
            hidden=np.random.normal(size=self.weights['b1'])
        return self.sess.run(self.reconstruction,
                             feed_dict={self.hidden: hidden})
    def reconstruct(self, X):
        return self.sess.run(self.reconstruction,feed_dict={
            self.x: X, self.scale:self.training_scale
        })
    def getWeights(self):
        return self.sess.run(self.weights['w1'])
    def getBiases(self):
        return self.sess.run(self.weights['b1'])

mnist=input_data.read_data_sets('MNIST_data', one_hot=True)
def standard_scale(X_train, X_test):
    preprocessor = prep.StandardScaler().fit(X_train)
    X_train=preprocessor.transform(X_train)
    X_test=preprocessor.transform(X_test)
    return X_train, X_test

def get_random_block_from_data(data, batch_size):
    start_index = np.random.randint(0, len(data)-batch_size)
    return data[start_index:(start_index+batch_size)]
X_train, X_test=standard_scale(mnist.train.images, mnist.test.images)
n_samples=int(mnist.train.num_examples)
training_epochs=20
batch_size=128
display_step=1
autoencoder=AdditiveGaussianNoiseAutoencoder(n_input=784,
              n_hidden=200,
              transfer_function=tf.nn.softplus,
              optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
              scale=0.1)
for epoch in range(training_epochs):
    avg_cost=0
    total_batch=int(n_samples/batch_size)
    for i in range(total_batch):
        batch_xs=get_random_block_from_data(X_train,batch_size)
        cost=autoencoder.partial_fit(batch_xs)
        avg_cost += cost/n_samples*batch_size
    if epoch % display_step==0:
        print('Epoch:',epoch+1,'Cost:',avg_cost)
print('Total_cost',autoencoder.calc_total_cost(X_test))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值