(Tensorflow之八)MNIST数字识别源码--实战Google深度学习框架5.2小节

针对原书本的一点小bug进行了修改,直接贴上源码,供大家参考

训练集与测试集的模型需手动下载,下载地址:http://yann.lecun.com/exdb/mnist/

import tensorflow as tf
#引入tf中的tf
from tensorflow.examples.tutorials.mnist import input_data

#相关常数
INPUT_NODE = 784 #即图片的像素
OUTPUT_NODE = 10 #输出字母,0~9
LAYER1_NODE = 500 #输入层数
BATCH_SIZE = 100 #一次训练的个数
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DACAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
global_step = 100

#前向传播
def inference(input_tensor,avg_class,weights1,biases1,weights2,biases2):
    if avg_class == None:
        layer1 = tf.nn.relu(tf.matmul(input_tensor,weights1)+biases1)
        return tf.matmul(layer1,weights2) + biases2
    else:
        layer1 = tf.nn.relu(tf.matmul(input_tensor,avg_class.average(weights1))+avg_class.average(biases1))
        return tf.matmul(layer1,avg_class.average(weights2))+avg_class.average(biases2)


def train(mnist):
    x = tf.placeholder(tf.float32,[None, INPUT_NODE],name='x-input')
    y_ = tf.placeholder(tf.float32,[None,OUTPUT_NODE],name='y-input')

    weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE,LAYER1_NODE],stddev=1.0))
    biases1 = tf.Variable(tf.constant(0.1,shape=[LAYER1_NODE]))

    weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE,OUTPUT_NODE],stddev=1.0))
    biases2 = tf.Variable(tf.constant(0.1,shape=[OUTPUT_NODE]))

    y = inference(x,None,weights1,biases1,weights2,biases2)

    #采用滑动平均模型来调节训练参数
    Variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
    Variable_averages_op = Variable_averages.apply(tf.trainable_variables())
    #求出采用滑动模型的平均值
    average_y = inference(x,Variable_averages,weights1,biases1,weights2,biases2)
    #yy = tf.argmax(y_,1)
    #计算交叉商
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    #正则化
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    regularization = regularizer(weights1)+regularizer(weights2)
    loss = cross_entropy_mean +regularization
    #调整学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DACAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
    
    with tf.control_dependencies([train_step,Variable_averages_op]):
        train_op = tf.no_op(name = 'train')

    correct_prediction = tf.equal(tf.argmax(average_y,1),tf.argmax(y_,1))

    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    #执行
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        validate_feed = {x:mnist.validation.images,y_:mnist.validation.labels}
        test_feed = {x:mnist.test.images,y_:mnist.test.labels}
        for i in range(TRAINING_STEPS):
            if i%1000 == 0:
                validate_acc = sess.run(accuracy,feed_dict=validate_feed)
                print("After %d training step(s),validation accuracy using average models is %g" % (i,validate_acc))
            xs,ys = mnist.train.next_batch(BATCH_SIZE)
            sess.run(train_op,feed_dict={x:xs,y_:ys})
        test_acc = sess.run(accuracy,feed_dict=test_feed)
        print("After %d training step(s),validation accuracy using average models is %g" % (TRAINING_STEPS,validate_acc))   

#主程序入口
def main(argv=None):
    mnist = input_data.read_data_sets("/home/project1/mnist_train/",one_hot=True) #模型需自已下载,并放入相应文件夹,下载地址:http://yann.lecun.com/exdb/mnist/,不需要解压
    train(mnist)

#判断是直接调用还是间接调用
if __name__=='__main__':
   tf.app.run()



#运行结果
After 0 training step(s),validation accuracy using average models is 0.11
After 1000 training step(s),validation accuracy using average models is 0.9174
After 2000 training step(s),validation accuracy using average models is 0.9352
After 3000 training step(s),validation accuracy using average models is 0.945
After 4000 training step(s),validation accuracy using average models is 0.9512
After 5000 training step(s),validation accuracy using average models is 0.9564
After 6000 training step(s),validation accuracy using average models is 0.9608
After 7000 training step(s),validation accuracy using average models is 0.9624
After 8000 training step(s),validation accuracy using average models is 0.9648
After 9000 training step(s),validation accuracy using average models is 0.968
After 10000 training step(s),validation accuracy using average models is 0.9684
After 11000 training step(s),validation accuracy using average models is 0.9692
After 12000 training step(s),validation accuracy using average models is 0.9726
After 13000 training step(s),validation accuracy using average models is 0.9738
After 14000 training step(s),validation accuracy using average models is 0.9744
After 15000 training step(s),validation accuracy using average models is 0.9758
After 16000 training step(s),validation accuracy using average models is 0.978
After 17000 training step(s),validation accuracy using average models is 0.9774
After 18000 training step(s),validation accuracy using average models is 0.9782
After 19000 training step(s),validation accuracy using average models is 0.9786
After 20000 training step(s),validation accuracy using average models is 0.9798
After 21000 training step(s),validation accuracy using average models is 0.9804
After 22000 training step(s),validation accuracy using average models is 0.9806
After 23000 training step(s),validation accuracy using average models is 0.9798
After 24000 training step(s),validation accuracy using average models is 0.9814
After 25000 training step(s),validation accuracy using average models is 0.9792
After 26000 training step(s),validation accuracy using average models is 0.9798








  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值