tensorflow课堂笔记(七)mnist识别

mnist_forward.py

import tensorflow as tf

INPUT_NODE = 784      #28*28个输入结点
OUTPUT_NODE = 10      #0-9数字的概率列表 (十分类)
LAYER1_NODE = 500     #一层隐藏层500个节点

def get_weight(shape, regularizer):
    #tf.truncates_normal(shape, mean=0.0, stddev=1., dtype=tf.float32, seed=None, name=None)
    #从截断的正态分布中输出随机值
    #shape  维数
    #mean   正态分布均值
    #stddev 正态分布的标准差
    #dtype  输出的类型
    #seed   随机种子
    #name   操作的名字
    #tf.random_normal 在数量很大的情况下可能出现越界情况,truncates_normal不会
    w = tf.Variable(tf.truncated_normal(shape, stddev=0.1))
    if regularizer != None :           #如果正则化参数存在
        #将每个w的正则化损失加入到总损失集合losses中
        tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w))
    return w

def get_bias(shape):
    b = tf.Variable(tf.zeros(shape))   #获取许多个0
    return b

#param x 1行INPUT_NODE列
def forward(x, regularizer):
    w1 = get_weight([INPUT_NODE,LAYER1_NODE], regularizer)
    b1 = get_bias([LAYER1_NODE])       #获取500个bias
    y1 = tf.nn.relu(tf.matmul(x, w1) + b1)   #激活函数为tt.nn.relu

    w2 = get_weight([LAYER1_NODE, OUTPUT_NODE], regularizer)
    b2 = get_bias([OUTPUT_NODE])       #只要输入层不需要偏置
    y = tf.matmul(y1, w2) + b2         #这里不需要激活函数
    return y

mnist_backward.py

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import os

BATCH_SIZE = 200              #每一组数据有200个
LEARNING_RATE_BASE = 0.1      #基准学习率
LEARNING_RATE_DECAY = 0.99    #学习率衰减
REGULARIZER = 0.0001          #正则化参数
STEPS = 50000                 #训练步数
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./model/" #模型保存路径
MODEL_NAME = "mnist_model"   #模型名字

def backward(mnist):
    #输入是1行INPUT_NODE列   输出是1行OUTPUT_NODE列
    x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
    y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
    y = mnist_forward.forward(x, REGULARIZER)    #经过前向传播获得一个y
    global_step = tf.Variable(0, trainable=False) #初始化为0并且不可训练

    #logits表示模型计算出来的结果  labels表示训练数据的label信息
    #这个表示的就是交叉熵损失率
    #tf.reduce_mean就是把张量每个值加起来求算术平均
    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cem = tf.reduce_mean(ce)
    #tf.get_collection()从一个集合中取出全部变量,是一个列表
    loss = cem + tf.add_n(tf.get_collection("losses"))   #列表对应相加

    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY,
        staircase=True
    )
    #定义训练过程为梯度衰减
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    #定义滑动平均
    #有关滑动平均讲解 https://www.cnblogs.com/cloud-ken/p/7521609.html
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    ema_op = ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step, ema_op]):
        train_op = tf.no_op(name='train')     #合成滑动平均和梯度下降

    saver = tf.train.Saver()

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)

        for i in range(STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)   #下一组数据
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:xs, y_:ys})
            if i%1000 == 0:
                print("After %d training step(s), loss on training batch is %g"%(step, loss_value))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)

def main():
    mnist = input_data.read_data_sets('./data/', one_hot = True)
    backward(mnist)

if __name__=='__main__':
    main()

mnist_test.py

#coding:utf-8
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import mnist_backward
TEST_INTERVAL_SECS = 5

def test(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
        y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
        y = mnist_forward.forward(x, mnist_backward.REGULARIZER)

        ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)

        #预测正确的数量
        correct_prediction = tf.equal(tf.argmax(y, 1),tf.argmax(y_, 1))
        #准确度
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy, feed_dict={x:mnist.test.images, y_:mnist.test.labels})
                    print("After %s training step(s), test accuracy = %g"%(global_step, accuracy_score))
                else:
                    print("No checkpoint file found")
                    return
            time.sleep(TEST_INTERVAL_SECS)

def main():
    mnist = input_data.read_data_sets("./data/", one_hot=True)
    test(mnist)

if __name__=='__main__':
    main()

训练结果

Extracting ./data/train-images-idx3-ubyte.gz
Extracting ./data/train-labels-idx1-ubyte.gz
Extracting ./data/t10k-images-idx3-ubyte.gz
Extracting ./data/t10k-labels-idx1-ubyte.gz
2018-11-13 16:48:19.447836: I C:\tf_jenkins\home\workspace\rel-win\M\windows\PY\36\tensorflow\core\platform\cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX AVX2
After 1 training step(s), loss on training batch is 3.17369
After 1001 training step(s), loss on training batch is 0.297716
After 2001 training step(s), loss on training batch is 0.291129
After 3001 training step(s), loss on training batch is 0.270674
After 4001 training step(s), loss on training batch is 0.215635
After 5001 training step(s), loss on training batch is 0.22806
After 6001 training step(s), loss on training batch is 0.214063
After 7001 training step(s), loss on training batch is 0.201608
After 8001 training step(s), loss on training batch is 0.180816
After 9001 training step(s), loss on training batch is 0.175806
After 10001 training step(s), loss on training batch is 0.179737
After 11001 training step(s), loss on training batch is 0.182252
After 12001 training step(s), loss on training batch is 0.165699
After 13001 training step(s), loss on training batch is 0.166102
After 14001 training step(s), loss on training batch is 0.181359
After 15001 training step(s), loss on training batch is 0.164315
After 16001 training step(s), loss on training batch is 0.168706
After 17001 training step(s), loss on training batch is 0.172612
After 18001 training step(s), loss on training batch is 0.153352
After 19001 training step(s), loss on training batch is 0.149103
After 20001 training step(s), loss on training batch is 0.143983
After 21001 training step(s), loss on training batch is 0.141471
After 22001 training step(s), loss on training batch is 0.14262
After 23001 training step(s), loss on training batch is 0.144945
After 24001 training step(s), loss on training batch is 0.140799
After 25001 training step(s), loss on training batch is 0.142459
After 26001 training step(s), loss on training batch is 0.142817
After 27001 training step(s), loss on training batch is 0.155631
After 28001 training step(s), loss on training batch is 0.137138
After 29001 training step(s), loss on training batch is 0.134814
After 30001 training step(s), loss on training batch is 0.143814
After 31001 training step(s), loss on training batch is 0.141215
After 32001 training step(s), loss on training batch is 0.139977
After 33001 training step(s), loss on training batch is 0.135799
After 34001 training step(s), loss on training batch is 0.138739
After 35001 training step(s), loss on training batch is 0.144513
After 36001 training step(s), loss on training batch is 0.127639
After 37001 training step(s), loss on training batch is 0.136286
After 38001 training step(s), loss on training batch is 0.131197
After 39001 training step(s), loss on training batch is 0.129318
After 40001 training step(s), loss on training batch is 0.124964
After 41001 training step(s), loss on training batch is 0.129228
After 42001 training step(s), loss on training batch is 0.154971
After 43001 training step(s), loss on training batch is 0.12766
After 44001 training step(s), loss on training batch is 0.123435
After 45001 training step(s), loss on training batch is 0.125604
After 46001 training step(s), loss on training batch is 0.122904
After 47001 training step(s), loss on training batch is 0.122641
After 48001 training step(s), loss on training batch is 0.129812
After 49001 training step(s), loss on training batch is 0.126799

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值