# Tensorflow实现CIFAR-10分类问题-详解四cifar10_eval.py

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from datetime import datetime
import math
import time

import numpy as np
import tensorflow as tf

import cifar10

parser = cifar10.parser

help='Directory where to write event logs.')

help='Either test or train_eval.')

help='Directory where to read model checkpoints.')

help='How often to run the eval.')#设置每隔多长时间做一侧评估

help='Number of examples to run.')

help='Whether to run eval only once.')

def eval_once(saver, summary_writer, top_k_op, summary_op):
"""Run Eval once.

Args:
saver: Saver.
summary_writer: Summary writer.
top_k_op: Top K op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
#   /my-favorite-path/cifar10_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return

# Start the queue runners.# 启动很多线程，并把coordinator传递给每一个线程
coord = tf.train.Coordinator()
try:
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
start=True))

num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
true_count = 0  # Counts the number of correct predictions.
total_sample_count = num_iter * FLAGS.batch_size
step = 0
while step < num_iter and not coord.should_stop():
predictions = sess.run([top_k_op])#计算num_iter个评估用例是否预测正确，应该到不了num_iter就会满足coord.should_shop()然后退出
true_count += np.sum(predictions)#累加
step += 1

# Compute precision @ 1.
precision = true_count / total_sample_count
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))

summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=precision)
except Exception as e:  # pylint: disable=broad-except
coord.request_stop(e)

coord.request_stop()

def evaluate():
"""Eval CIFAR-10 for a number of steps."""
with tf.Graph().as_default() as g:
# Get images and labels for CIFAR-10.
eval_data = FLAGS.eval_data == 'test'
images, labels = cifar10.inputs(eval_data=eval_data)# 读入评估图片和标签

# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)

# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1) #判定predictions的top k个预测结果是否包含targets，返回bool变量

# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY)#创建计算均值的对象
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)

# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# 创建一个event file,用于之后写summary对象到FLAGS.eval_dir目录下的文件中
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)
# 每隔一定时间进行评估，只对当前训练好的最新的模型进行评估。
while True:
eval_once(saver, summary_writer, top_k_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)

def main(argv=None):  # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
evaluate()

if __name__ == '__main__':
FLAGS = parser.parse_args()
tf.app.run()

source activate tensorflow
tensorboard –logdir /your_path/cifar10_train/