Tensorflow-MNIST数字识别练习代码

Tensorflow-MNIST数字识别练习代码

方案一 训练代码 + 验证代码

# -- coding: utf-8 --

import tensorflow as tf  
from tensorflow.examples.tutorials.mnist import input_data  

#层节点
INPUT_NODE = 784  
LAYER1_NODE = 500
OUTPUT_NODE = 10  

#数据batch大小
BATCH_SIZE = 100  
  
#训练参数
LEARNING_RATE_BASE = 0.8   
LEARNING_RATE_DECAY = 0.99   
REGULARIZATION_RATE= 0.0001   
TRAINING_STEPS = 30000     
MOVING_AVERAGE_DECAY = 0.99   

#前向传播函数  
def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):  
    if avg_class == None:  
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)  
        return tf.matmul(layer1, weights2) + biases2  
    else:  
        layer1 = tf.nn.relu(tf.matmul(input_tensor,avg_class.average(weights1))+avg_class.average(biases1))  
        return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2)  
  
  
def train(mnist): 
#输入层和数据label 
    x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')  
    y_ = tf.placeholder(tf.float32, [None,OUTPUT_NODE], name='y-input')

#隐藏层参数初始化    
    weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE,LAYER1_NODE], stddev=0.1))  
    biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))  

#输出层参数初始化 
    weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))  
    biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))  

#前向传播结果y  
    y = inference(x, None, weights1, biases1, weights2, biases2)

#use for count the train step , trainable=False
    global_step = tf.Variable(0, trainable=False)  

#滑动平均模型,及加入滑动平均的前向传播结果average_y
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)  
    variables_averages_op = variable_averages.apply(tf.trainable_variables())   
    average_y = inference(x, variable_averages, weights1, biases1, weights2, biases2)  

#计算交叉熵,并加入正则-->损失函数loss
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)   
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)  
    regularization = regularizer(weights1) + regularizer(weights2)   
    loss = cross_entropy_mean + regularization  
#学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY)  
#train_step 梯度下降(学习率,损失函数,全局步数)  
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)  
#运算图控制,用train_op作集合  
    with tf.control_dependencies([train_step, variables_averages_op]):  
        train_op = tf.no_op(name='train')  
#判断准确率  
    correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))  
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

#持久化
    saver = tf.train.Saver()  
  
    with tf.Session() as sess:  
        tf.initialize_all_variables().run()  
        validate_feed = {x:mnist.validation.images,y_:mnist.validation.labels}  
        test_feed = {x:mnist.test.images,y_:mnist.test.labels}  
  
        for i in range(TRAINING_STEPS): 
#每1000轮测试一次         
            if i%1000 == 0:  
                validate_acc = sess.run(accuracy, feed_dict=validate_feed)  
                print("After %d training step(s), validation accuracy using average model is %g " %(i,validate_acc))  
  
            xs,ys = mnist.train.next_batch(BATCH_SIZE)  
            sess.run(train_op, feed_dict={x:xs, y_:ys})  
        saver.save(sess,"./model/model.ckpt")
        test_acc = sess.run(accuracy, feed_dict=test_feed)  
        print("After %d training step(s), test accuracy using average model is %g" %(TRAINING_STEPS, test_acc))  
  
  
def main(argv=None):  
    mnist = input_data.read_data_sets("mnist_data/", one_hot=True)  
    train(mnist)  
  
if __name__== '__main__':  
    tf.app.run()


方案二 训练 + 验证代码

文件 mnis_inference.py

# -- coding: utf-8 --

import tensorflow as tf  

#层节点
INPUT_NODE = 784  
LAYER1_NODE = 500
OUTPUT_NODE = 10  

#获取权值weights
def get_weight_variable(shape, regularizer):
    weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
    
    if regularizer != None:
        tf.add_to_collection('losses', regularizer(weights))
    print("test_test")
        
    return weights

#前向传播函数  
def inference(input_tensor, regularizer):
#layer1
    with tf.variable_scope('layer1'):
        weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
        biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))   
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases) 
#layer2  
    with tf.variable_scope('layer2'):
        weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
        biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))   
        layer2 = tf.matmul(layer1, weights) + biases
    return layer2

文件 mnist_train.py

# -- coding: utf-8 --

import os

import tensorflow as tf  
from tensorflow.examples.tutorials.mnist import input_data  

import mnist_inference  

#数据batch大小
BATCH_SIZE = 100  
  
#训练参数
LEARNING_RATE_BASE = 0.8   
LEARNING_RATE_DECAY = 0.99   
REGULARIZATION_RATE= 0.0001   
TRAINING_STEPS = 30000     
MOVING_AVERAGE_DECAY = 0.99   

#模型保存路径及文件名
MODEL_SAVE_PATH = "/model2/"
MODEL_NAME = "model.ckpt"

  
def train(mnist): 
#输入层和数据label 
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')  
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')

#前向传播结果y
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)  
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)  

#滑动平均模型
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)  
    variables_averages_op = variable_averages.apply(tf.trainable_variables())   

#计算交叉熵,并加入正则-->损失函数loss
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)   
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))  
#学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY)  
#train_step 梯度下降(学习率,损失函数,全局步数)  
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)  
#运算图控制,用train_op作集合  
    with tf.control_dependencies([train_step, variables_averages_op]):  
        train_op = tf.no_op(name='train')  

#持久化
    saver = tf.train.Saver()  
  
    with tf.Session() as sess:  
        tf.initialize_all_variables().run() 
  
        for i in range(TRAINING_STEPS): 
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:xs, y_:ys})
#每1000轮保存一次         
            if i%1000 == 0:  
                print("After %d training step(s), loss on training batch is %g " %(step, loss_value))  
                saver.save(sess, "./model2/model.ckpt")
  
  
def main(argv=None):  
    mnist = input_data.read_data_sets("mnist_data/", one_hot=True)  
    train(mnist)  
  
if __name__== '__main__':  
    tf.app.run()

文件 mnist_eval.py

# -- coding: utf-8 --

import os
import time

import tensorflow as tf  
from tensorflow.examples.tutorials.mnist import input_data  

import mnist_inference  
import mnist_train

EVAL_INTERVAL_SECS = 10 

#模型保存路径及文件名
MODEL_SAVE_PATH = "/model2/"
MODEL_NAME = "model.ckpt"

def evaluate(mnist):
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
        validate_feed = {x: mnist.validation.images, y_ :mnist.validation.labels}
     
        y = mnist_inference.inference(x, None)
     
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
     
        variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        with tf.Session() as sess:
            saver.restore(sess, "./model2/model.ckpt")
        
            accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
            print("**********accuracy = %g", accuracy_score)            
               
        

def main(argv=None):
    mnist = input_data.read_data_sets("mnist_data/", one_hot=True)
    evaluate(mnist)
    
if __name__== '__main__':  
    tf.app.run()




评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值