tensorflow代码梳理2——完整的深层全连接神经网络样例

#一个tensor包括name,shape,dtype
#tensorflow运行模型为Session()
#第一个完整的神经网络样例 import tensorflow as tf
import tensorflow as tf
from numpy.random import RandomState
#定义训练数据大小
batch_size=8 
#定义神经网络参数,此处用两个输入node,三个神经元的单隐层,一个输出node的网络结构
w1=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))
w2=tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))
#放入训练数据x及其正确标签y_
x=tf.placeholder(tf.float32,shape=(None,2),name='x-input')
y_=tf.placeholder(tf.float32,shape=(None,1),name='y-input')
#网络的前向传播过程
a=tf.matmul(x,w1)
y=tf.matmul(a,w2)
#定义损失函数和反向传播算法
cross_entropy=-tf.reduce_mean(y_*tf.log(tf.clip_by_value(y,1e-10,1.0)))
train_step=tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
#随机数生成一个模拟数据集
rdm=RandomState(1)
dataset_size=128
X=rdm.rand(dataset_size,2)
Y=[[int(x1+x2<1)] for (x1,x2) in X]
#创建一个Session来运行tensorflow程序
with tf.Session() as sess:
    init_op=tf.global_variables_initializer()
    sess.run(init_op)
    print(sess.run(w1))
    print(sess.run(w2))
    Steps=5000
    for i in range(Steps):
        start=(i*batch_size)% dataset_size
        end=min(start+batch_size,dataset_size)
        sess.run(train_step,feed_dict={x:X[start:end],y_:Y[start:end]})
        if i % 1000==0:
            total_cross_entropy=sess.run(cross_entropy,feed_dict={x:X,y_:Y})
            print('After %d training steps,cross entropy on all data is %g' % (i,total_cross_entropy))
    print(sess.run(w1))
    print(sess.run(w2))
[[-0.81131822  1.48459876  0.06532937]
 [-2.4427042   0.0992484   0.59122431]]
[[-0.81131822]
 [ 1.48459876]
 [ 0.06532937]]
After 0 training steps,cross entropy on all data is 0.0674925
After 1000 training steps,cross entropy on all data is 0.0163385
After 2000 training steps,cross entropy on all data is 0.00907547
After 3000 training steps,cross entropy on all data is 0.00714436
After 4000 training steps,cross entropy on all data is 0.00578471
[[-1.96182752  2.58235407  1.68203771]
 [-3.46817183  1.06982315  2.11788988]]
[[-1.82471502]
 [ 2.68546653]
 [ 1.41819501]]
#深层网络完整样例
#激活函数、学习率进一步优化设置、过拟合正则化、滑动平均模型:提供模型在测试集上的效果
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#MNIST数据集相关常数
INPUT_NODE=784
OUTPUT_NODE=10
#配置神经网络的参数
LAYER1_NODE=500
BATCH_SIZE=100
LEARNING_RATE_BASE=0.8
LEARNING_RATE_DECAY=0.99    
REGULARIZATION_RATE=0.0001  #正则化权重
TRAINING_STEPS=4000
MOVING_AVERAGE_DECAY=0.99
def inference(input_tensor,avg_class,weights1,biases1,weights2,biases2):#前馈网络函数
    if avg_class==None:
        layer1=tf.nn.relu(tf.matmul(input_tensor,weights1)+biases1)
        return tf.matmul(layer1,weights2)+biases2
    else:
        layer1=tf.nn.relu(tf.matmul(input_tensor,avg_class.average(weights1))+avg_class.average(biases1))
        return tf.matmul(layer1,avg_class.average(weights2))+avg_class.average(biases2)
def train(mnist):#训练过程
    x=tf.placeholder(tf.float32,[None,INPUT_NODE],name='x-input')
    y_=tf.placeholder(tf.float32,[None,OUTPUT_NODE],name='y-input')
    weights1=tf.Variable(tf.truncated_normal([INPUT_NODE,LAYER1_NODE],stddev=0.1))
    biases1=tf.Variable(tf.constant(0.1,shape=[LAYER1_NODE]))
    weights2=tf.Variable(tf.truncated_normal([LAYER1_NODE,OUTPUT_NODE],stddev=0.1))
    biases2=tf.Variable(tf.constant(0.1,shape=[OUTPUT_NODE]))
    y=inference(x,None,weights1,biases1,weights2,biases2)
    global_step=tf.Variable(0,trainable=False)
    variable_averages=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
    variable_averages_op=variable_averages.apply(tf.trainable_variables())
    average_y=inference(x,variable_averages,weights1,biases1,weights2,biases2)
    cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
    cross_entropy_mean=tf.reduce_mean(cross_entropy)
    regularizer=tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    regularization=regularizer(weights1)+regularizer(weights2)
    loss=cross_entropy_mean+regularization
    learning_rate=tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY)
    train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
    #每过一遍数据,既然更新训练网络中的参数,又要更新每一个参数的滑动平均值
    train_op=tf.group(train_step,variable_averages_op)
    correct_prediction=tf.equal(tf.argmax(average_y,1),tf.argmax(y_,1))
    #cast将布尔型数据转换成实数型
    accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        #准备验证数据
        validate_feed={x:mnist.validation.images,y_:mnist.validation.labels}
        test_feed={x:mnist.test.images,y_:mnist.test.labels}
        for i in range(TRAINING_STEPS):
            if i % 1000 ==0:
                validate_acc=sess.run(accuracy,feed_dict=validate_feed)
                print('After %d training steps,validation accuracy using average model is %g' % (i,validate_acc))
            xs,ys=mnist.train.next_batch(BATCH_SIZE)
            sess.run(train_op,feed_dict={x:xs,y_:ys})
        test_acc=sess.run(accuracy,feed_dict=test_feed)
        print('After %d training steps,test accuracy using average model is %g' % (TRAINING_STEPS,test_acc))
def main(argv=None):
    mnist=input_data.read_data_sets('/tmp/data',one_hot=True)
    train(mnist)
if __name__=='__main__':
    tf.app.run()
Extracting /tmp/data\train-images-idx3-ubyte.gz
Extracting /tmp/data\train-labels-idx1-ubyte.gz
Extracting /tmp/data\t10k-images-idx3-ubyte.gz
Extracting /tmp/data\t10k-labels-idx1-ubyte.gz
After 0 training steps,validation accuracy using average model is 0.0808
After 1000 training steps,validation accuracy using average model is 0.9758
After 2000 training steps,validation accuracy using average model is 0.9796
After 3000 training steps,validation accuracy using average model is 0.9834
After 4000 training steps,test accuracy using average model is 0.983
#深层神经网络的三部曲inference.py  train.py  evaluate.py
#主要看训练后的权重如何保存、网络计算图如何载入、训练好的权重系数如何载入
#mnist_inference.py
import tensorflow as tf
INPUT_NODE=784
OUTPUT_NODE=10
LAYER1_NODE=500
def get_weight_variable(shape,regularizer):
    weights=tf.get_variable('weights',shape,initializer=tf.truncated_normal_initializer(stddev=0.1))
    if regularizer !=None:
        tf.add_to_collection('losses',regularizer(weights))
    return weights  

def inference(input_tensor,regularizer):
    with tf.variable_scope('layer1'):
        weights=get_weight_variable([INPUT_NODE,LAYER1_NODE],regularizer)
        biases=tf.get_variable('biases',[LAYER1_NODE],initializer=tf.constant_initializer(0.0))
        layer1=tf.nn.relu(tf.matmul(input_tensor,weights)+biases)
    with tf.variable_scope('layer2'):
        weights=get_weight_variable([LAYER1_NODE,OUTPUT_NODE],regularizer)
        biases=tf.get_variable('biases',[OUTPUT_NODE],initializer=tf.constant_initializer(0.0))
        layer2=tf.matmul(layer1,weights)+biases
    return layer2
#mnist_train.py
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#import mnist_inference
#配置神经网络参数
LAYER1_NODE=500
BATCH_SIZE=100
LEARNING_RATE_BASE=0.8
LEARNING_RATE_DECAY=0.99
REGULARIZTION_RATE=0.0001
TRAINING_STEPS=3000
MOVING_AVERAGE_DECAY=0.99
#模型保存的路径和文件名
MODEL_SAVE_PATH='/path/to/model/'
MODEL_NAME='model.ckpt'
def train(mnist):
    x=tf.placeholder(tf.float32,[None,INPUT_NODE],name='x-input')
    y_=tf.placeholder(tf.float32,[None,OUTPUT_NODE],name='y-input')
    regularizer=tf.contrib.layers.l2_regularizer(REGULARIZTION_RATE)
    #y=mnist_inference.inference(x,regularizer)
    y=inference(x,regularizer)
    global_step=tf.Variable(0,trainable=False)
    variable_averages=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
    variable_averages_op=variable_averages.apply(tf.trainable_variables())
    cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
    cross_entropy_mean=tf.reduce_mean(cross_entropy)
    loss=cross_entropy_mean+tf.add_n(tf.get_collection('losses'))
    learning_rate=tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY)
    train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
    train_op=tf.group(train_step,variable_averages_op)
#初始化tensorflow持久类
    saver=tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        for i in range(TRAINING_STEPS):
            xs,ys=mnist.train.next_batch(BATCH_SIZE)
            #记录每一个节点的信息
            _,loss_value,step=sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys})
            if i % 1000==0:
                print('After %d training step(s), loss on training batch is %g.' % (step, loss_value))
                saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step=global_step) 
def main(argv=None):
    mnist=input_data.read_data_sets('/tmp/data',one_hot=True)
    train(mnist)
if __name__=='__main__':
    tf.app.run()
Extracting /tmp/data\train-images-idx3-ubyte.gz
Extracting /tmp/data\train-labels-idx1-ubyte.gz
Extracting /tmp/data\t10k-images-idx3-ubyte.gz
Extracting /tmp/data\t10k-labels-idx1-ubyte.gz
After 1 training step(s), loss on training batch is 3.07135.
After 1001 training step(s), loss on training batch is 0.221773.
After 2001 training step(s), loss on training batch is 0.177926.
# 每隔10秒加载一次最新的模型,并在测试数据上测试最新模型的正确率  
EVAL_INTERVAL_SECS = 10  
import time
def evaluate(mnist):  
    with tf.Graph().as_default() as g:  
        # 定义输入输出格式  
        x = tf.placeholder(tf.float32, [None,INPUT_NODE], name='x-input')  
        y_ = tf.placeholder(tf.float32, [None,OUTPUT_NODE],name='y-input')  
        validate_feed = {x:mnist.validation.images,  
                         y_:mnist.validation.labels}  
   # 直接调用封装好的函数来计算前向传播的结果。  
        y =inference(x, None)  
  # 使用前向传播的结果计算正确率。使用tf.argmax(y,1)得到输入样例的预测类别  
        correct_prediction = tf.equal(tf.argmax(y, 1),tf.argmax(y_, 1))  
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  
  # 通过变量重命名的方式来加载模型,这样在前向传播的过程中就不需要调用求滑动 #  
        # 平均的函数来获取平均值了。这样就可以完全公用mnist_inference.py中定义的 #  
        # 前向传播过程 #  
        variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)  
        variable_to_restore = variable_averages.variables_to_restore()  
        saver = tf.train.Saver(variable_to_restore)  
  # 每隔EVAL_INTERVAL_SECS秒调用一次计算正确率的过程以检测训练过程中正确率的变化  
        while True:  
            with tf.Session() as sess:  
                # tf.train.get_checkpoint_state函数会通过checkpoint文件自动找到目录中最新模型的文件名  
                ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)  
                if ckpt and ckpt.model_checkpoint_path:  
                    # 加载模型  
                    saver.restore(sess, ckpt.model_checkpoint_path)  
                    # 通过文件名得到模型保存时迭代的轮数  
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]  
                    accuracy_score = sess.run(accuracy, feed_dict=validate_feed)  
                    print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))  
                else:  
                    print('No checkpoint file found')  
                    return  
                time.sleep(EVAL_INTERVAL_SECS)  
def main(argv=None):  
    mnist = input_data.read_data_sets("/tmp/data",one_hot=True)  
    evaluate(mnist)  
if __name__ == '__main__':  
    main()
Extracting /tmp/data\train-images-idx3-ubyte.gz
Extracting /tmp/data\train-labels-idx1-ubyte.gz
Extracting /tmp/data\t10k-images-idx3-ubyte.gz
Extracting /tmp/data\t10k-labels-idx1-ubyte.gz
INFO:tensorflow:Restoring parameters from /path/to/model/model.ckpt-2001
After 2001 training step(s), validation accuracy = 0.9832
INFO:tensorflow:Restoring parameters from /path/to/model/model.ckpt-2001
After 2001 training step(s), validation accuracy = 0.9832
INFO:tensorflow:Restoring parameters from /path/to/model/model.ckpt-2001
After 2001 training step(s), validation accuracy = 0.9832
INFO:tensorflow:Restoring parameters from /path/to/model/model.ckpt-2001
After 2001 training step(s), validation accuracy = 0.9832
INFO:tensorflow:Restoring parameters from /path/to/model/model.ckpt-2001
After 2001 training step(s), validation accuracy = 0.9832
INFO:tensorflow:Restoring parameters from /path/to/model/model.ckpt-2001
After 2001 training step(s), validation accuracy = 0.9832
INFO:tensorflow:Restoring parameters from /path/to/model/model.ckpt-2001
After 2001 training step(s), validation accuracy = 0.9832
INFO:tensorflow:Restoring parameters from /path/to/model/model.ckpt-2001
After 2001 training step(s), validation accuracy = 0.9832
INFO:tensorflow:Restoring parameters from /path/to/model/model.ckpt-2001
After 2001 training step(s), validation accuracy = 0.9832
INFO:tensorflow:Restoring parameters from /path/to/model/model.ckpt-2001
After 2001 training step(s), validation accuracy = 0.9832
INFO:tensorflow:Restoring parameters from /path/to/model/model.ckpt-2001
After 2001 training step(s), validation accuracy = 0.9832
INFO:tensorflow:Restoring parameters from /path/to/model/model.ckpt-2001
After 2001 training step(s), validation accuracy = 0.9832
INFO:tensorflow:Restoring parameters from /path/to/model/model.ckpt-2001
After 2001 training step(s), validation accuracy = 0.9832

variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variable_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variable_to_restore)
在会话中开启
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)(将路径下的check_point文件最新模型载入到ckpt)
if ckpt and ckpt.model_checkpoint_path (将ckpt对象下的图和数据restore)
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split(‘/’)[-1].split(‘-‘)[-1]
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)

  • 1
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值