用tensorflow训练神经网络之手写体

input_data.py :

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import gzip
import os
import tempfile

import numpy
from six.moves import urllib
from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets

第一课:

import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b)
y_ = tf.placeholder("float", [None,10])
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(1000):
  batch_xs, batch_ys = mnist.train.next_batch(100)
  sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print (sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))

第二课:
mnist.py

from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
mnist = read_data_sets('MNIST_data', one_hot=True)
import tensorflow as tf
#sess = tf.InteractiveSession()
x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])
#tf.truncated_normal(shape, mean, stddev) :shape表示生成张量的维度,
#mean是均值,stddev是标准差。这个函数产生正太分布,均值和标准差自己设定。
#这是一个截断的产生正太分布的函数,就是说产生正太分布的值如果与均值的差值
#大于两倍的标准差,那就重新生成。
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
#    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(0.1,shape)

def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME',use_cudnn_on_gpu=True)

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')

W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])

h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)



keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])

y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
with tf.Session() as sess:
    with tf.device("/gpu:0"):
        sess.run(tf.global_variables_initializer())
        for i in range(1000):
            batch = mnist.train.next_batch(500)
            if i%100 == 0:
                _,train_accuracy = sess.run([train_step,accuracy],feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0})
                print( "step %d, training accuracy %g"%(i, train_accuracy))
            sess.run(train_step,feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

    print ("test accuracy %g"%sess.run(accuracy,feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))

第二课改进之 mnist.py (使用tensorboard可视化,训练并保存权重pb文件)

from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
mnist = read_data_sets('MNIST_data', one_hot=True)
import tensorflow as tf
#sess = tf.InteractiveSession()

#tf.truncated_normal(shape, mean, stddev) :shape表示生成张量的维度,
#mean是均值,stddev是标准差。这个函数产生正太分布,均值和标准差自己设定。
#这是一个截断的产生正太分布的函数,就是说产生正太分布的值如果与均值的差值
#大于两倍的标准差,那就重新生成。
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
#    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(0.1,shape)

def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')
g = tf.Graph()
with g.as_default():    
    x = tf.placeholder("float", shape=[None, 784],name='inputdata')
    y_ = tf.placeholder("float", shape=[None, 10])
    W_conv1 = weight_variable([5, 5, 1, 32])
    b_conv1 = bias_variable([32])
    x_image = tf.reshape(x, [-1,28,28,1])
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)

    h_pool1 = max_pool_2x2(h_conv1)
    W_conv2 = weight_variable([5, 5, 32, 64])
    b_conv2 = bias_variable([64])

    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)

    h_pool2 = max_pool_2x2(h_conv2)
    W_fc1 = weight_variable([7 * 7 * 64, 1024])
    b_fc1 = bias_variable([1024])

    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

    keep_prob = tf.placeholder("float")
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    if tf.gfile.Exists('log'):
        tf.gfile.DeleteRecursively('log')
    tf.gfile.MakeDirs('log')
    if tf.gfile.Exists('backup'):
        tf.gfile.DeleteRecursively('backup')
    tf.gfile.MakeDirs('backup')
    # use another method
    saver=tf.train.Saver(max_to_keep=7)
    path='log/model.ckpt'
    y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2,name='outputdata')
    cross_entropy1 = -tf.reduce_sum(y_*tf.log(y_conv))
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, 
            logits=tf.matmul(h_fc1_drop, W_fc2) + b_fc2, name='xentropy'))
    train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    tf.summary.scalar('accuracy', accuracy)
    tf.summary.histogram('weights',W_conv1)
    tf.summary.image('input',h_pool2[:,:,:,0:1],10)
    summary = tf.summary.merge_all()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        summary_writer = tf.summary.FileWriter('log/', sess.graph)
        for i in range(1000):
            batch = mnist.train.next_batch(100)
            if i%100 == 0:
                _,train_accuracy,y_predict = sess.run([train_step,accuracy,y_conv],feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0})
                print( "step %d, training accuracy %g"%(i, train_accuracy))
                summary_str=sess.run(summary,feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0})
                saver.save(sess,path,global_step=i)
            summary_writer.add_summary(summary_str, i)
            summary_writer.flush()
            sess.run(train_step,feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
        _W_conv1,_b_conv1,_W_conv2,_b_conv2,_W_fc1,_b_fc1,_W_fc2,_b_fc2=sess.run([W_conv1,b_conv1,
                                W_conv2,b_conv2,W_fc1,b_fc1,W_fc2,b_fc2])
        print ("test accuracy %g"%sess.run(accuracy,feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))


g_2 = tf.Graph()
with g_2.as_default():
    x_2 = tf.placeholder("float", shape=[None, 784], name="inputdata")
    W_conv1_2 = tf.constant(_W_conv1, name="constant_W_conv1")
    b_conv1_2 = tf.constant(_b_conv1, name="constant_b_conv1")
    x_image_2 = tf.reshape(x_2, [-1, 28, 28, 1])
    h_conv1_2 = tf.nn.relu(conv2d(x_image_2, W_conv1_2) + b_conv1_2)
    h_pool1_2 = max_pool_2x2(h_conv1_2)

    W_conv2_2 = tf.constant(_W_conv2, name="constant_W_conv2")
    b_conv2_2 = tf.constant(_b_conv2, name="constant_b_conv2")
    h_conv2_2 = tf.nn.relu(conv2d(h_pool1_2, W_conv2_2) + b_conv2_2)
    h_pool2_2 = max_pool_2x2(h_conv2_2)

    W_fc1_2 = tf.constant(_W_fc1, name="constant_W_fc1")
    b_fc1_2 = tf.constant(_b_fc1, name="constanexport_dirt_b_fc1")
    h_pool2_flat_2 = tf.reshape(h_pool2_2, [-1, 7 * 7 * 64])
    h_fc1_2 = tf.nn.relu(tf.matmul(h_pool2_flat_2, W_fc1_2) + b_fc1_2)

    W_fc2_2 = tf.constant(_W_fc2, name="constant_W_fc2")
    b_fc2_2 = tf.constant(_b_fc2, name="constant_b_fc2")

    # DropOut is skipped for exported graph.

    y_conv_2 = tf.nn.softmax(tf.matmul(h_fc1_2, W_fc2_2) + b_fc2_2, name="outputdata")

    sess_2 = tf.Session()
    init_2 = tf.global_variables_initializer();
    sess_2.run(init_2)

    graph_def = g_2.as_graph_def()
    tf.train.write_graph(graph_def, 'backup/', 'expert-graph.weights', as_text=False)

    # Test trained model
    y__2 = tf.placeholder("float", [None, 10])
    correct_prediction_2 = tf.equal(tf.argmax(y_conv_2, 1), tf.argmax(y__2, 1))
    accuracy_2 = tf.reduce_mean(tf.cast(correct_prediction_2, "float"))

    print( "check accuracy %g" % accuracy_2.eval(
        {x_2: mnist.test.images, y__2: mnist.test.labels}, sess_2))

第二课改进之 test.py:

from __future__ import absolute_import, unicode_literals
from datasets_mnist import read_data_sets
import tensorflow as tf

train,validation,test = read_data_sets("MNIST_data/", one_hot=True)

with tf.Graph().as_default():
    output_graph_def = tf.GraphDef()
    output_graph_path = 'backup/expert-graph.weights'


    with open(output_graph_path, "rb") as f:
        output_graph_def.ParseFromString(f.read())
        tf.import_graph_def(output_graph_def, name="")

    with tf.Session() as sess:
        init=tf.global_variables_initializer()
        sess.run(init)
#        sess.graph.add_to_collection("input", test.images)
        input_x = sess.graph.get_tensor_by_name("inputdata:0")        
        print( input_x)
        output = sess.graph.get_tensor_by_name("outputdata:0")
        print( output)
        y_conv_2 = sess.run(output,{input_x:test.images})
        print( "y_conv_2", y_conv_2)

        # Test trained model
        y__2 = test.labels
        correct_prediction_2 = tf.equal(tf.argmax(y_conv_2, 1), tf.argmax(y__2, 1))
        print ("correct_prediction_2", correct_prediction_2 )
        accuracy_2 = tf.reduce_mean(tf.cast(correct_prediction_2, "float"))
        print ("accuracy_2", accuracy_2)

        print ("check accuracy %g" % accuracy_2.eval())

第三课:(tensorboard的运用,权重的保存与调用saver)
fully_connected.py

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# pylint: disable=missing-docstring
import argparse
import os
import time

import tensorflow as tf

import datasets_mnist
# Basic model parameters as external flags.
FLAGS = None
NUM_CLASSES = 10

# The MNIST images are always 28x28 pixels.
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
def placeholder_inputs(batch_size):
  images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,IMAGE_PIXELS))
  labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
  return images_placeholder, labels_placeholder


def fill_feed_dict(data_set, images_pl, labels_pl):

  images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,FLAGS.fake_data)
  feed_dict_value = {
      images_pl: images_feed,
      labels_pl: labels_feed,
  }
  return feed_dict_value
def conv2d(x, W):
  return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
  return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')

def inference(images):
  """Build the MNIST model up to where it may be used for inference.

  Args:
    images: Images placeholder, from inputs().
    hidden1_units: Size of the first hidden layer.
    hidden2_units: Size of the second hidden layer.

  Returns:
    softmax_linear: Output tensor with the computed logits.
  """

  W_conv1 = tf.Variable(tf.truncated_normal([5, 5, 1, 32],stddev=0.1))
  b_conv1 = tf.Variable(0.0,[32])
  x_image = tf.reshape(images, [-1,28,28,1])
  h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
  h_pool1 = max_pool_2x2(h_conv1)
  W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64],stddev=0.1))
  b_conv2 = tf.Variable(0.0,[64])

  h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
  h_pool2 = max_pool_2x2(h_conv2)
  W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024],stddev=0.1))
  b_fc1 = tf.Variable(0.0,[1024])

  h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
  h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

#  keep_prob = tf.placeholder("float")
  h_fc1_drop = tf.nn.dropout(h_fc1, 0.5)
  W_fc2 = tf.Variable(tf.truncated_normal([1024, 10],stddev=0.1))
  b_fc2 = tf.Variable(0.0,[10])
  logits=tf.matmul(h_fc1_drop, W_fc2) + b_fc2
  return logits


def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].

  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
#  labels = tf.to_float(labels)
#  labels= tf.one_hot(labels, 10)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=labels, name='xentropy')
#  y_conv = tf.nn.softmax(logits)
#  cross_entropy = -tf.reduce_sum(labels*tf.log(y_conv))
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')


def training(loss):
  """Sets up the training Ops.

  Creates a summarizer to track the loss over time in TensorBoard.

  Creates an optimizer and applies the gradients to all trainable variables.

  The Op returned by this function is what must be passed to the
  `sess.run()` call to cause the model to train.

  Args:
    loss: Loss tensor, from loss().
    learning_rate: The learning rate to use for gradient descent.

  Returns:
    train_op: The Op for training.
  """
  # Add a scalar summary for the snapshot loss.

  # Create the gradient descent optimizer with the given learning rate.
  optimizer = tf.train.AdamOptimizer()
  # Create a variable to track the global step.
#  global_step = tf.Variable(0, name='global_step', trainable=False)
  # Use the optimizer to apply the gradients that minimize the loss
  # (and also increment the global step counter) as a single training step.
  train_op = optimizer.minimize(loss)
  return train_op


def evaluation(logits, labels):
  """Evaluate the quality of the logits at predicting the label.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size], with values in the
      range [0, NUM_CLASSES).

  Returns:
    A scalar int32 tensor with the number of examples (out of batch_size)
    that were predicted correctly.
  """
  # For a classifier model, we can use the in_top_k Op.
  # It returns a bool tensor with shape [batch_size] that is true for
  # the examples where the label is in the top k (here k=1)
  # of all logits for that example.
  labels1=tf.one_hot(labels,10)
  correct = tf.nn.in_top_k(logits, labels, 1)

  correct1 = tf.equal(tf.argmax(logits,1), tf.argmax(labels1,1))
  # Return the number of true entries.
  return tf.reduce_mean(tf.cast(correct, tf.float32)) ,tf.reduce_mean(tf.cast(correct1, tf.float32))





def do_eval(sess,
            eval_correct,
            images_placeholder,
            labels_placeholder,
            data_set):
  """Runs one evaluation against the full epoch of data.

  Args:
    sess: The session in which the model has been trained.
    eval_correct: The Tensor that returns the number of correct predictions.
    images_placeholder: The images placeholder.
    labels_placeholder: The labels placeholder.
    data_set: The set of images and labels to evaluate, from
      input_data.read_data_sets().
  """
  # And run one epoch of eval.
  true_count = 0  # Counts the number of correct predictions.
  steps_per_epoch = data_set.num_examples // FLAGS.batch_size
  num_examples = steps_per_epoch * FLAGS.batch_size
  for step in range(steps_per_epoch):
    feed_dict_value = fill_feed_dict(data_set,
                               images_placeholder,
                               labels_placeholder)
    true_count += sess.run(eval_correct, feed_dict=feed_dict_value)
  precision = float(true_count) / num_examples

  print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' %
        (num_examples, true_count, precision))


def run_training(logits,labels_placeholder):
  """Train MNIST for a number of steps."""
  # Get the sets of images and labels for training, validation, and
  # test on MNIST.

  if tf.gfile.Exists(FLAGS.log_dir):
    tf.gfile.DeleteRecursively(FLAGS.log_dir)
  tf.gfile.MakeDirs(FLAGS.log_dir)
  # Add to the Graph the Ops for loss calculation.
  loss_value= loss(logits, labels_placeholder)
  tf.summary.scalar('loss', loss_value)
  # Add to the Graph the Ops that calculate and apply gradients.
  train_op = training(loss_value)

  #   Add the Op to compare the logits to the labels during evaluation.
  eval_correct,eval_correct1 = evaluation(logits, labels_placeholder)

  tf.summary.scalar('precision', eval_correct)
  # Build the summary Tensor based on the TF collection of Summaries.
  summary = tf.summary.merge_all()

  # Add the variable initializer Op.
  init = tf.global_variables_initializer()

  # Create a saver for writing training checkpoints.


  # Create a session for running Ops on the Graph.
  sess = tf.Session()

  # Instantiate a SummaryWriter to output summaries and the Graph.
  summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)

  # And then after everything is built:

  # Run the Op to initialize the variables.
  sess.run(init)

  # Start the training loop.
  for step in range(FLAGS.max_steps):
    start_time = time.time()

  # Fill a feed dictionary with the actual set of images and labels
  # for this particular training step.
    feed_dict_value = fill_feed_dict(train,
                             images_placeholder,
                             labels_placeholder)

  # Run one step of the model.  The return values are the activations
  # from the `train_op` (which is discarded) and the `loss` Op.  To
  # inspect the values of your Ops or variables, you may include them
  # in the list passed to sess.run() and the value tensors will be
  # returned in the tuple from the call.
    _, loss_value1,eval_correct_value,eval_correct_value1 = sess.run([train_op, loss_value,eval_correct,eval_correct],feed_dict=feed_dict_value)

    duration = time.time() - start_time

  # Write the summaries and print an overview fairly often.
    if step % 100 == 0:
    # Print status to stdout.
      print('Step %d: loss = %.2f (%.3f sec),precision=%.3f,%.3f' % (step, loss_value1, duration,eval_correct_value,eval_correct_value1))
    # Update the events file.

      summary_str = sess.run(summary, feed_dict=feed_dict_value)
      summary_writer.add_summary(summary_str, step)
      summary_writer.flush()

  # Save a checkpoint and evaluate the model periodically.
    if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:

      saver.save(sess, checkpoint_file, global_step=step)
    # Evaluate against the training set.
      print('Training Data Eval:')
      do_eval(sess,
            eval_correct,
            images_placeholder,
            labels_placeholder,
            train)
    # Evaluate against the validation set.
      print('Validation Data Eval:')
      do_eval(sess,
            eval_correct,
            images_placeholder,
            labels_placeholder,
            validation)
    # Evaluate against the test set.
      print('Test Data Eval:')
      do_eval(sess,
            eval_correct,
            images_placeholder,
            labels_placeholder,
            test)

def run_testing():
  sess=tf.Session()
  saver.restore(sess, tf.train.latest_checkpoint('ckpt'))
  feed_dict_value=fill_feed_dict(test,images_placeholder,labels_placeholder)
  a,accuracy=evaluation(logits,labels_placeholder)
  accuracy_=sess.run(a,feed_dict=feed_dict_value)
  print('accuracy is %f'%accuracy_)


if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--max_steps',
      type=int,
      default=2000,
      help='Number of steps to run trainer.'
  )
  parser.add_argument(
      '--batch_size',
      type=int,
      default=100,
      help='Batch size.  Must divide evenly into the dataset sizes.'
  )
  parser.add_argument(
      '--input_data_dir',
      type=str,
      default=os.path.join('datasets'),
      help='Directory to put the input data.'
  )
  parser.add_argument(
      '--log_dir',
      type=str,
      default=os.path.join('log'),
      help='Directory to put the log data.'
  )
  parser.add_argument(
      '--fake_data',
      default=False,
      help='If true, uses fake data for unit testing.',
      action='store_true'
  )
  parser.add_argument(
      '--train',
      type=bool ,default=False  
          )
  parser.add_argument(
          '--test',type=bool,default=True)
  FLAGS, unparsed = parser.parse_known_args()
  checkpoint_file = os.path.join('ckpt', 'model.ckpt')

  train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)
  # Generate placeholders for the images and labels.
  images_placeholder, labels_placeholder = placeholder_inputs(
    FLAGS.batch_size)

  # Build a Graph that computes predictions from the inference model.
  logits = inference(images_placeholder)
  saver = tf.train.Saver()
  if FLAGS.train:
    run_training(logits,labels_placeholder)
    exit('Training finished')
  run_testing()

datasets_mnist.py

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import gzip
import os
import numpy
from six.moves import xrange  # pylint: disable=redefined-builtin

from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed

# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SOURCE_URL = 'https://storage.googleapis.com/cvdf-datasets/mnist/'


def _read32(bytestream):
  dt = numpy.dtype(numpy.uint32).newbyteorder('>')
  return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]


def extract_images(f):
  """Extract the images into a 4D uint8 numpy array [index, y, x, depth].

  Args:
    f: A file object that can be passed into a gzip reader.

  Returns:
    data: A 4D uint8 numpy array [index, y, x, depth].

  Raises:
    ValueError: If the bytestream does not start with 2051.

  """
  print('Extracting', f.name)
  with gzip.GzipFile(fileobj=f) as bytestream:
    magic = _read32(bytestream)
    if magic != 2051:
      raise ValueError('Invalid magic number %d in MNIST image file: %s' %
                       (magic, f.name))
    num_images = _read32(bytestream)
    rows = _read32(bytestream)
    cols = _read32(bytestream)
    buf = bytestream.read(rows * cols * num_images)
    data = numpy.frombuffer(buf, dtype=numpy.uint8)
    data = data.reshape(num_images, rows, cols, 1)
    return data


def dense_to_one_hot(labels_dense, num_classes):
  """Convert class labels from scalars to one-hot vectors."""
  num_labels = labels_dense.shape[0]
  index_offset = numpy.arange(num_labels) * num_classes
  labels_one_hot = numpy.zeros((num_labels, num_classes))
  labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
  return labels_one_hot

#
def extract_labels(f, one_hot=False, num_classes=10):
  """Extract the labels into a 1D uint8 numpy array [index].

  Args:
    f: A file object that can be passed into a gzip reader.
    one_hot: Does one hot encoding for the result.
    num_classes: Number of classes for the one hot encoding.

  Returns:
    labels: a 1D uint8 numpy array.

  Raises:
    ValueError: If the bystream doesn't start with 2049.
  """
  print('Extracting', f.name)
  with gzip.GzipFile(fileobj=f) as bytestream:
    magic = _read32(bytestream)
    if magic != 2049:
      raise ValueError('Invalid magic number %d in MNIST label file: %s' %
                       (magic, f.name))
    num_items = _read32(bytestream)
    buf = bytestream.read(num_items)
    labels = numpy.frombuffer(buf, dtype=numpy.uint8)
    if one_hot:
      return dense_to_one_hot(labels, num_classes)
    return labels

#
class DataSet(object):

  def __init__(self,
               images,
               labels,
               fake_data=False,
               one_hot=False,
               dtype=dtypes.float32,
               reshape=True,
               seed=None):
    """Construct a DataSet.
    one_hot arg is used only if fake_data is true.  `dtype` can be either
    `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
    `[0, 1]`.  Seed arg provides for convenient deterministic testing.
    """
    seed1, seed2 = random_seed.get_seed(seed)
    # If op level seed is not set, use whatever graph level seed is returned
    numpy.random.seed(seed1 if seed is None else seed2)
    dtype = dtypes.as_dtype(dtype).base_dtype
    if dtype not in (dtypes.uint8, dtypes.float32):
      raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
                      dtype)
    if fake_data:
      self._num_examples = 10000
      self.one_hot = one_hot
    else:
      assert images.shape[0] == labels.shape[0], (
          'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
      self._num_examples = images.shape[0]

      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      if reshape:
        assert images.shape[3] == 1
        images = images.reshape(images.shape[0],
                                images.shape[1] * images.shape[2])
      if dtype == dtypes.float32:
        # Convert from [0, 255] -> [0.0, 1.0].
        images = images.astype(numpy.float32)
        images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0

  @property
  def images(self):
    return self._images

  @property
  def labels(self):
    return self._labels

  @property
  def num_examples(self):
    return self._num_examples

  @property
  def epochs_completed(self):
    return self._epochs_completed

  def next_batch(self, batch_size, fake_data=False, shuffle=True):
    """Return the next `batch_size` examples from this data set."""
    if fake_data:
      fake_image = [1] * 784
      if self.one_hot:
        fake_label = [1] + [0] * 9
      else:
        fake_label = 0
      return [fake_image for _ in xrange(batch_size)], [
          fake_label for _ in xrange(batch_size)
      ]
    start = self._index_in_epoch
    # Shuffle for the first epoch
    if self._epochs_completed == 0 and start == 0 and shuffle:
      perm0 = numpy.arange(self._num_examples)
      numpy.random.shuffle(perm0)
      self._images = self.images[perm0]
      self._labels = self.labels[perm0]
    # Go to the next epoch
    if start + batch_size > self._num_examples:
      # Finished epoch
      self._epochs_completed += 1
      # Get the rest examples in this epoch
      rest_num_examples = self._num_examples - start
      images_rest_part = self._images[start:self._num_examples]
      labels_rest_part = self._labels[start:self._num_examples]
      # Shuffle the data
      if shuffle:
        perm = numpy.arange(self._num_examples)
        numpy.random.shuffle(perm)
        self._images = self.images[perm]
        self._labels = self.labels[perm]
      # Start next epoch
      start = 0
      self._index_in_epoch = batch_size - rest_num_examples
      end = self._index_in_epoch
      images_new_part = self._images[start:end]
      labels_new_part = self._labels[start:end]
      return numpy.concatenate((images_rest_part, images_new_part), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)
    else:
      self._index_in_epoch += batch_size
      end = self._index_in_epoch
      return self._images[start:end], self._labels[start:end]


def read_data_sets(train_dir,
                   fake_data=False,
                   one_hot=False,
                   dtype=dtypes.float32,
                   reshape=True,
                   validation_size=5000,
                   seed=None):
  if fake_data:

    def fake():
      return DataSet(
          [], [], fake_data=True, one_hot=one_hot, dtype=dtype, seed=seed)

    train = fake()
    validation = fake()
    test = fake()
    return base.Datasets(train=train, validation=validation, test=test)

#  TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
#  TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
#  TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
#  TEST_LABELS = 't10k-labels-idx1-ubyte.gz'

  local_file = os.path.join(train_dir,'train-images-idx3-ubyte.gz')
  with open(local_file, 'rb') as f:
    train_images = extract_images(f)

  local_file = os.path.join(train_dir,'train-labels-idx1-ubyte.gz')
  with open(local_file, 'rb') as f:
    train_labels = extract_labels(f, one_hot=one_hot)

  local_file = os.path.join(train_dir,'t10k-images-idx3-ubyte.gz')
  with open(local_file, 'rb') as f:
    test_images = extract_images(f)

  local_file =os.path.join(train_dir,'t10k-labels-idx1-ubyte.gz')
  with open(local_file, 'rb') as f:
    test_labels = extract_labels(f, one_hot=one_hot)

  if not 0 <= validation_size <= len(train_images):
    raise ValueError(
        'Validation size should be between 0 and {}. Received: {}.'
        .format(len(train_images), validation_size))

  validation_images = train_images[:validation_size]
  validation_labels = train_labels[:validation_size]
  train_images = train_images[validation_size:]
  train_labels = train_labels[validation_size:]

  train = DataSet(
      train_images, train_labels, dtype=dtype, reshape=reshape, seed=seed)
  validation = DataSet(
      validation_images,
      validation_labels,
      dtype=dtype,
      reshape=reshape,
      seed=seed)
  test = DataSet(
      test_images, test_labels, dtype=dtype, reshape=reshape, seed=seed)
  return train,validation,test

#  return base.Datasets(train=train, validation=validation, test=test)


def load_mnist(train_dir='MNIST-data'):
  return read_data_sets(train_dir)
  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值