TensorFlow学习笔记01

摘自《Tensorflow:实战Goole深度学习框架》

前向传播及神经网络中参数mnist_inference.py

import tensorflow as tf

INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500


def get_weight_variable(shape, regularizer):
    weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
   # 将正则化项加入到损失losses集合(自定义集合)中
    if regularizer != None:
        tf.add_to_collection('losses', regularizer(weights))
    return weights


def inference(input_tensor, regularizer):
    with tf.variable_scope('layer1'):
        weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
        biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)

    with tf.variable_scope('layer2'):
        weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
        biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
        layer2 = tf.matmul(layer1, weights) + biases

    return layer2

神经网路训练过程mnist_train.py

import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import test1


BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./path/to/model"
MODEL_NAME = "model.ckpt"


def train(mnist):
    # placeholder用来占位,可理解为传送门传送输入数据
    x = tf.placeholder(tf.float32, shape=[None, test1.INPUT_NODE], name='x-input')
    y_ = tf.placeholder(tf.float32, shape=[None, test1.OUTPUT_NODE], name='y-input')
    # L2正则化项用来避免过拟合,此处REGULARIZATION_RATE即为lambda
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = test1.inference(x, regularizer)
    # global_step为训练计步器,通过下方train_step中的global_step=global_step使得其运转
    global_step = tf.Variable(0, trainable=False)
    # 滑动平均操作优化训练
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    # 交叉熵计算,使用二合一的函数,将softmax计算与交叉熵计算合并
    # 根据y来计算softmax,然后与labels计算交叉熵(argmax是获取数组中为1的下标)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    # 计算损失,交叉熵+前面获得的正则化项
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    # 获取科学的学习率
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY
    )
    # 使用梯度下降优化算法进行训练
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')
    # Saver持久化类,用来保存模型
    saver = tf.train.Saver()

    with tf.Session() as sess:
        # 全局参数初始化
        tf.global_variables_initializer().run()
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
            if i % 1000 == 0:
                print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
                # 每1000轮保存一个模型,其中参数global_step使得保存的文件末尾会加上训练的轮数
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)


def main(argv=None):
    mnist = input_data.read_data_sets("./mnist_data", one_hot=True)
    train(mnist)


if __name__ == '__main__':
    tf.app.run()

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值