AGNA

MNIST数据集上的加性高斯自编码器

xavier initialization:

深度学习模型的参数应该初始化得不大不小,从而信号在backward时既不至于逐渐缩小而消失,也不至于逐渐放大而发散;Xavier能将权重初始化乘均值为0,方差为 2/(nin+nout),且呈均匀分布或高斯分布;

code:

import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
def xavier(fan_in,fan_out,constant=1):
    low=-constant*np.sqrt(6.0/(fan_in+fan_out))
    high=constant*np.sqrt(6.0/(fan_in+fan_out))
    return tf.random_uniform((fan_in,fan_out),
                             minval=low,maxval=high,
                             dtype=tf.float32)

class AdditiveGaussianNoiseAutoencoder(object):
    def __init__(self,n_input,n_hidden,#输入层和隐藏层的节点数
                 transfer_function=tf.nn.softplus,#隐藏层激活函数
                 optimizer=tf.train.AdamOptimizer,#优化器,默认为Adam
                 scale=0.1):
        self.n_input=n_input
        self.n_hidden=n_hidden
        self.transfer=transfer_function
        self.scale=tf.placeholder(tf.float32)
        self.training_scale=scale
        network_weight=self._initialize_weights()
        self.weights=network_weight

        #定义网络结构
        self.x=tf.placeholder(tf.float32,[None,self.n_input])
        self.hidden=self.transfer(tf.add(tf.matmul(self.x+scale*tf.random_normal((self.n_input,)),
                                                   self.weights['w1']),self.weights['b1']))
        self.reconstruction=tf.add(tf.matmul(self.hidden,self.weights['w2']),
                                   self.weights['b2'])

        #定义损失函数
        self.cost=0.5*tf.reduce_sum(tf.pow(tf.subtract(
            self.reconstruction,self.x),2.0))
        self.optimizer=optimizer.minimize(self.cost)

        init=tf.global_variables_initializer()
        self.sess=tf.Session()
        self.sess.run(init)

    def _initialize_weights(self):
        all_weights=dict()
        all_weights['w1']=tf.Variable(xavier(self.n_input,self.n_hidden))
        all_weights['b1']=tf.Variable(tf.zeros([self.n_hidden],dtype=tf.float32))
        all_weights['w2']=tf.Variable(tf.zeros([self.n_hidden,self.n_input],dtype=tf.float32))
        all_weights['b2']=tf.Variable(tf.zeros([self.n_input],dtype=tf.float32))
        return all_weights

    #接下来将实际的计算封装到函数中,即定义计算图中的子图
    #  计算损失函数并进行一步训练的函数partial_fit:
    def partial_fit(self,X):
        cost,opt=self.sess.run((self.cost,self.optimizer),
                               feed_dict={self.x:X,self.scale:self.training_scale})
        return cost
    #  计算损失函数,用于在测试集上检验模型的calc_total_cost
    def calc_total_cost(self,X):
        return self.sess.run(self.cost,feed_dict={self.x:X,self.scale:self.training_scale})
    #  返回隐藏层的输出结果,用于获取隐藏层抽取的高阶特征
    def transform(self,X):
        return self.sess.run(self.hidden,feed_dict={self.x:X,self.scale:self.training_scale})
    #  将隐藏层提取的高阶特征进行组合,重建出输入的原始数据
    def generate(self,hidden=None):
        if hidden is None:
            hidden=np.random.normal(size=self.weights['b1'])
            return self.sess.run(self.reconstruction,feed_dict={self.hidden:hidden})
    #  定义reconstruct函数,整体运行一遍复原过程,即 transform 和 generate
    def reconstruct(self,X):
        return self.sess.run(self.reconstruction,feed_dict={self.x:X,self.scale:self.training_scale})
    #  获取隐藏层权重w1
    def getWeights(self):
        return self.sess.run(self.weights['w1'])
    #  获取隐藏层偏置b1
    def getBiases(self):
        return self.sess.run(self.weights['b1'])

# 对训练、测试数据进行标准化处理的函数,使其均值为0,标准差为1
# 为了保证后面模型处理数据时候的一致性,训练集和测试集共用一个Scaler,所以先在训练集上fit出这个Scaler
def standard_scale(X_train,X_test):
    preprocessor=prep.StandardScaler().fit(X_train)
    X_train=preprocessor.transform(X_train)
    X_test=preprocessor.transform(X_test)
    return X_train,X_test
# 获取随机block数据的函数:
def get_random_block_from_data(data,batch_size):
    start_index=np.random.randint(0,len(data)-batch_size)
    return data[start_index:(start_index+batch_size)]

mnist=input_data.read_data_sets('MNIST_data',one_hot=True)
X_train,X_test=standard_scale(mnist.train.images,mnist.test.images)
# 样本数,训练轮数,随机block数据包大小,每隔1轮显示一次损失cost
n_samples=int(mnist.train.num_examples)
training_epochs=20
batch_size=128
display_step=1
# AGN实例
autoencoder=AdditiveGaussianNoiseAutoencoder(n_input=784,
                                             n_hidden=200,
                                             transfer_function=tf.nn.softplus,
                                             optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
                                             scale=0.01)
for epoch in range(training_epochs):
    avg_cost=0.
    total_batch=int(n_samples/batch_size)
    for i in range(total_batch):
        batch_xs=get_random_block_from_data(X_train,batch_size)
        cost=autoencoder.partial_fit(batch_xs)
        avg_cost+=cost/n_samples*batch_size
    if epoch % display_step ==0:
        print("Epoch:", '%04d' % (epoch+1),"cost=","{:.9f}".format(avg_cost))
print("Total cost: "+str(autoencoder.calc_total_cost(X_test)))
#选取测试集中10个样本,对比自编码器输出值与原始数据
encode_decode=autoencoder.reconstruct(mnist.test.images[:10])
f,a=plt.subplots(2,10,figsize=(10,2))
for i in range(10):
    a[0][i].imshow(np.reshape(mnist.test.images[i],(28,28)))
    a[1][i].imshow(np.reshape(encode_decode[i],(28,28)))
plt.show()

对比

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值