import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def xavier_initialization(fan_in,fan_out,constant = 1):
low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
high = constant * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in,fan_out),
minval = low,maxval = high,
dtype = tf.float32)
class AdditiveGaussianNoiseAutoEncoder(object):
'''
n_input:输入的变量个数
n_hidden: 隐藏层的节点数
transfer_function:隐层的激活函数,默认softplus
optimizer:优化器,默认Adam
scale:高斯噪声系数,默认0.1 --training_scale
weights:使用_initialize_weights()进行weights初始化
function:_initialize_weights()、
ps:这里只用了一个隐含层
'''
def __init__(self,n_input,n_hidden,transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(),scale = 0.1):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer =transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
network_weights = self._initialize_weights()
self.weights = network_weights
self.x = tf.placeholder(tf.float32,[None,n_input])
self.hidden = self.transfer(tf.add(tf.matmul(
self.x + scale * tf.random_normal((n_input,)),
self.weights['w1']),self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden,
self.weights['w2']),
self.weights['b2'])
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(
self.reconstruction,self.x),2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
'''
初始化权重参数函数
'''
all_weights = dict()
all_weights['w1'] = tf.Variable(xavier_initialization(self.n_input,self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden]),dtype = tf.float32)
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden,self.n_input]),dtype = tf.float32)
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input]),dtype = tf.float32)
return all_weights
def partial_fit(self,X):
'''
训练数据X,返回损失cost
feed_dict: 训练时需要feed的数据字典(输入数据x和高斯系数scale)
'''
cost,opt = self.sess.run((self.cost,self.optimizer),
feed_dict = {self.x:X,self.scale:self.training_scale})
return cost
def calc_total_cost(self,X):
'''
用于计算模型测试时用的函数,只求损失,不触发训练
'''
return self.sess.run(self.cost,
feed_dict = {self.x:X,self.scale:self.training_scale})
def tansform(self,X):
'''
函数学习数据高级特征,返回自编码器隐层的输出结果
'''
return self.sess.run(self.hidden,
feed_dict = {self.x:X,self.scale:self.training_scale})
def generate(self,hidden=None):
'''
将隐层的输出结果作为输入,复原提取到的高阶特征数据并返回
'''
if hidden is None:
hidden = np.random.normal(size = self.weights['b1'])
return self.sess.run(self.reconstruction,feed_dict = {self.hidden:hidden})
def reconstruct(self,X):
'''
return重建或复原隐层输出的自编码数据--等同于先后执行transform和generate函数
'''
return self.sess.run(self.reconstruction,
feed_dict = {self.x:X,self.scale:self.training_scale})
def getWeights(self):
'''
获取隐层权重w1
'''
return self.sess.run(self.weights['w1'])
def getBiases(self):
'''
获取隐层偏置b1
'''
return self.sess.run(self.weights['b1'])
mnist = input_data.read_data_sets('MNIST_data',one_hot = True)
Extracting MNIST_data\train-images-idx3-ubyte.gz
Extracting MNIST_data\train-labels-idx1-ubyte.gz
Extracting MNIST_data\t10k-images-idx3-ubyte.gz
Extracting MNIST_data\t10k-labels-idx1-ubyte.gz
def standard_scale(X_train,X_test):
'''
数据标准化处理
'''
preprocess = prep.StandardScaler().fit(X_train)
X_train = preprocess.transform(X_train)
X_test = preprocess.transform(X_test)
return X_train,X_test
def get_random_block_from_data(data,batch_size):
'''
从数据中随机获取batch_size大小的训练集,不放回的抽取
'''
start_index = np.random.randint(0,len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train,X_test = standard_scale(mnist.train.images,mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
AGN = AdditiveGaussianNoiseAutoEncoder(n_input=784,
n_hidden=200,
transfer_function=tf.nn.softplus,
optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
scale = 0.01)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train,batch_size)
cost = AGN.partial_fit(batch_xs)
avg_cost += cost / n_samples * batch_size
if epoch % display_step ==0:
print("Epoch:",'%04d' % (epoch + 1),"cost=","{:.9f}".format(avg_cost))
Epoch: 0001 cost= 18546.710154545
Epoch: 0002 cost= 13127.617153409
Epoch: 0003 cost= 10881.075117045
Epoch: 0004 cost= 9427.339469318
Epoch: 0005 cost= 9333.850439773
Epoch: 0006 cost= 9067.035839773
Epoch: 0007 cost= 9929.138722727
Epoch: 0008 cost= 9507.725019886
Epoch: 0009 cost= 8539.850939205
Epoch: 0010 cost= 9277.037648295
Epoch: 0011 cost= 8203.704468182
Epoch: 0012 cost= 7974.056809091
Epoch: 0013 cost= 7523.191403409
Epoch: 0014 cost= 8728.067648864
Epoch: 0015 cost= 8446.473536364
Epoch: 0016 cost= 8368.747946591
Epoch: 0017 cost= 8495.513869886
Epoch: 0018 cost= 8680.677721591
Epoch: 0019 cost= 7734.869307955
Epoch: 0020 cost= 8421.617193750
print("Total cost:" + str(AGN.calc_total_cost(X_test)))
Total cost:618007.6