Tensorflow中MNIST手写数字数据集

刚开始跑的代码还有错误,但是不知道为什么再次运行就正确了,可能是新添加了input_data.py文件,暂存记录一下,等待深入研究

# -- coding: utf-8 --

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

# 层节点
INPUT_NODE = 784
LAYER1_NODE = 500
OUTPUT_NODE = 10

# 数据batch大小
BATCH_SIZE = 100

# 训练参数
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99


# 前向传播函数
def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):
    if avg_class == None:
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)
        return tf.matmul(layer1, weights2) + biases2
    else:
        layer1 = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(weights1)) + avg_class.average(biases1))
        return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2)


def train(mnist):
    # 输入层和数据label
    x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')

    # 隐藏层参数初始化
    weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))
    biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))

    # 输出层参数初始化
    weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))
    biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))

    # 前向传播结果y
    y = inference(x, None, weights1, biases1, weights2, biases2)

    # use for count the train step , trainable=False
    global_step = tf.Variable(0, trainable=False)

    # 滑动平均模型,及加入滑动平均的前向传播结果average_y
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    average_y = inference(x, variable_averages, weights1, biases1, weights2, biases2)

    # 计算交叉熵,并加入正则-->损失函数loss
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    regularization = regularizer(weights1) + regularizer(weights2)
    loss = cross_entropy_mean + regularization
    # 学习率
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE,
                                               LEARNING_RATE_DECAY)
    # train_step 梯度下降(学习率,损失函数,全局步数)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    # 运算图控制,用train_op作集合
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')
    # 判断准确率
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # 持久化
    saver = tf.train.Saver()

    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
        test_feed = {x: mnist.test.images, y_: mnist.test.labels}

        for i in range(TRAINING_STEPS):
            # 每1000轮测试一次
            if i % 1000 == 0:
                validate_acc = sess.run(accuracy, feed_dict=validate_feed)
                print("After %d training step(s), validation accuracy using average model is %g " % (i, validate_acc))

            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            sess.run(train_op, feed_dict={x: xs, y_: ys})
        saver.save(sess, "./model/model.ckpt")
        test_acc = sess.run(accuracy, feed_dict=test_feed)
        print("After %d training step(s), test accuracy using average model is %g" % (TRAINING_STEPS, test_acc))


def main(argv=None):
    mnist = input_data.read_data_sets("mnist_data/", one_hot=True)
    train(mnist)


if __name__ == '__main__':
    tf.app.run()


input_data.py文件代码

# __author__ = 'youngkl'
# -*- coding: utf-8 -*-

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import gzip
import os
import tempfile

import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets


之后就运行正确了,仅有warning警告:

After 0 training step(s), validation accuracy using average model is 0.0798 

After 1000 training step(s), validation accuracy using average model is 0.9726 

After 2000 training step(s), validation accuracy using average model is 0.9746 

After 3000 training step(s), validation accuracy using average model is 0.982 

After 4000 training step(s), validation accuracy using average model is 0.9796 

After 5000 training step(s), validation accuracy using average model is 0.9822 

After 6000 training step(s), validation accuracy using average model is 0.9832 

After 7000 training step(s), validation accuracy using average model is 0.9854 

After 8000 training step(s), validation accuracy using average model is 0.9824 

After 9000 training step(s), validation accuracy using average model is 0.9846 

After 10000 training step(s), validation accuracy using average model is 0.984 

After 11000 training step(s), validation accuracy using average model is 0.984 

After 12000 training step(s), validation accuracy using average model is 0.9842 

After 13000 training step(s), validation accuracy using average model is 0.9858 

After 14000 training step(s), validation accuracy using average model is 0.984 

After 15000 training step(s), validation accuracy using average model is 0.983 

After 16000 training step(s), validation accuracy using average model is 0.9858 

After 17000 training step(s), validation accuracy using average model is 0.9848 

After 18000 training step(s), validation accuracy using average model is 0.9842 

After 19000 training step(s), validation accuracy using average model is 0.9844 

After 20000 training step(s), validation accuracy using average model is 0.9844 

After 21000 training step(s), validation accuracy using average model is 0.984 

After 22000 training step(s), validation accuracy using average model is 0.9844 

After 23000 training step(s), validation accuracy using average model is 0.9844 

After 24000 training step(s), validation accuracy using average model is 0.984 

After 25000 training step(s), validation accuracy using average model is 0.985 

After 26000 training step(s), validation accuracy using average model is 0.9844 

After 27000 training step(s), validation accuracy using average model is 0.9844 

After 28000 training step(s), validation accuracy using average model is 0.985 

After 29000 training step(s), validation accuracy using average model is 0.9846 

After 30000 training step(s), test accuracy using average model is 0.9843


Process finished with exit code 0



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值