实战tensorflow——自编码器

版权声明:本文为博主原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。
本文链接:https://blog.csdn.net/warrles/article/details/82753882

自编码器简介:
深度学习提取的是频繁出现的特征;特征是需要不断抽象的,它从见到的微观特征开始,不断抽象特征的层级,逐渐网复杂的宏观特征转变。
特征的稀疏表达:使用少量的基本特征组合拼装得到更高层抽象的特征
Hinton的思路就是先用自编码器的方法进行无监督的预训练,提取特征并初始化权重,然后使用标注信息进行监督式的学习。

层数越多,神经网络所需要的隐含节点可以越少。

层数较深的神经网络的缺点:容易过拟合,参数难以调试,梯度弥散
防止过拟合的方法:
①dropout:大致思路是,在训练时,将神经网络某一层的输出节点数据随机丢弃;实质上等于创造了很多新的随机样本

梯度弥散:当神经网络层数较多时,Sigmoid函数在反向传播中梯度值会逐渐减小,导致根据训练数据的反馈来更新神经网络的参数将会十分缓慢。

RELu对比Sigmoid的主要变化有如下
①单侧抑制
②相对宽阔的兴奋边界
③稀疏激活性

卷积神经网络的应用:
①图像和视频
②时间序列信号
③音频信号
④文本数据

卷积层的几个操作:
①Wx+b
②进行非线性的激活函数处理(ReLU函数)
③池化:即降采样,将2x2图片降为1x1的图片;目前使用最大池化,保留最显著的特征,提升模型的畸变容忍能力
④最常见的最
 

#%%
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

#实现的是标准的均匀分布的Xaiver初始化器
def xavier_init(fan_in, fan_out, constant = 1):
    low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
    high = constant * np.sqrt(6.0 / (fan_in + fan_out))
    return tf.random_uniform((fan_in, fan_out),
                             minval = low, maxval = high,
                             dtype = tf.float32)

class AdditiveGaussianNoiseAutoencoder(object):
	#n_input:输出变量数
	#n_hidden隐藏层节点数
	#transfer_function 隐含层激活函数
	#optimizer:优化器,默认为Adam
	#scale:高斯噪声系数
    def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
                 scale = 0.1):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function
        self.scale = tf.placeholder(tf.float32)
        self.training_scale = scale
        network_weights = self._initialize_weights()
        #初始化网络参数
        self.weights = network_weights

        # model 定义网络结构
        #创建一个维度为n_input的占位符
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        #建立一个能提取特征的隐藏层
        #限将输入x加上噪声,即self.x+scale*tf.random_normal((n_input),),
        self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)),
                self.weights['w1']),
                self.weights['b1']))
        #在输出层进行数据复原,重建操作
        #tf.add 向量加
        #tf.matmul 向量乘
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        #损失的定义,并求和
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        #定义训练操作为优化器self.optimizer对self.cost进行优化
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        #初始化自编码器的全部模型参数
        self.sess.run(init)
    #初始化权重
    def _initialize_weights(self):
        all_weights = dict()
        #初始化w1,这里是向量操作;使用前面定义xavier_init函数初始化
        all_weights['w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
        all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32))
        all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype = tf.float32))
        all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype = tf.float32))
        return all_weights
    #定义损失cost级执行一步训练的函数partial_fit
    def partial_fit(self, X):
    	#用一个batch数据进行训练并返回当前的损失cost
        cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict = {self.x: X,
                                                                            self.scale: self.training_scale
                                                                            })
        return cost

    def calc_total_cost(self, X):
    	#让session执行一个计算图节点
        return self.sess.run(self.cost, feed_dict = {self.x: X,
                                                     self.scale: self.training_scale
                                                     })
    #返回自编码器隐含层的输出结果
    def transform(self, X):
        return self.sess.run(self.hidden, feed_dict = {self.x: X,
                                                       self.scale: self.training_scale
                                                       })
    #将隐含层的输出结果作为输入,通过之后的重建层将提取到的高阶特征复原为原始数据。
    def generate(self, hidden = None):
        if hidden is None:
            hidden = np.random.normal(size = self.weights["b1"])
        return self.sess.run(self.reconstruction, feed_dict = {self.hidden: hidden})
    #包裹提取高阶特征和通过高阶特征复原数据
    def reconstruct(self, X):
        return self.sess.run(self.reconstruction, feed_dict = {self.x: X,
                                                               self.scale: self.training_scale
                                                               })
    #获取隐藏层的权证w1
    def getWeights(self):
        return self.sess.run(self.weights['w1'])
    #获取隐藏层额偏执系数b1
    def getBiases(self):
        return self.sess.run(self.weights['b1'])
        
        
        
        
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
#让数据变成0均值,且标准差为1的分布
def standard_scale(X_train, X_test):
	#保证训练,测试数据都使用完全相同的Scaler
    preprocessor = prep.StandardScaler().fit(X_train)
    X_train = preprocessor.transform(X_train)
    X_test = preprocessor.transform(X_test)
    return X_train, X_test
#随机获取block数据的函数
def get_random_block_from_data(data, batch_size):
    start_index = np.random.randint(0, len(data) - batch_size)
    return data[start_index:(start_index + batch_size)]
#对训练集,测试集进行标准化变换
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)

#总训练样本数
n_samples = int(mnist.train.num_examples)
#最大训练的轮数
training_epochs = 20
batch_size = 128
display_step = 1

autoencoder = AdditiveGaussianNoiseAutoencoder(n_input = 784,
                                               n_hidden = 200,
                                               transfer_function = tf.nn.softplus,
                                               optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),
                                               scale = 0.01)
#开始每轮循环
for epoch in range(training_epochs):
    avg_cost = 0.
    #batch的数量
    total_batch = int(n_samples / batch_size)
    # Loop over all batches
    for i in range(total_batch):
    	#获取一个batch的数据
        batch_xs = get_random_block_from_data(X_train, batch_size)

        # Fit training using batch data
        cost = autoencoder.partial_fit(batch_xs)
        # Compute average loss
        avg_cost += cost / n_samples * batch_size

    # Display logs per epoch step
    if epoch % display_step == 0:
        print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))

print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))

 

展开阅读全文

没有更多推荐了,返回首页