继续。。。
tensorflow demo在windows调试通过后。开始调试Tensorboard可视化工具。
参考文章:http://wiki.jikexueyuan.com/project/tensorflow-zh/how_tos/summaries_and_tensorboard.html
所使用的Py代码:https://github.com/tensorflow/tensorflow/blob/r0.10/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py
要使用tensorboard查看整个tendorflow的卷积层,必须进行summaries,整个tensorbord的工作原理参看参考文章,下面是我在windows10上的调试记录
开工
一、导入数据,还是使用我自己编写input_data文件,使用本地的数据,代码在http://blog.csdn.net/kinsent/article/details/78180955
二、新建文件TestBoardDemo.py,复制粘贴mnist_with_summaries.py全部代码,开始修改:
1.from tensorflow.examples.tutorials.mnist import input_data改为
import input_data(这样就是使用本地的Mnist数据)
2.由于mnist_with_summaries.py的代码使用的是tensorflow0.7,所以对一些函数进行更名;
1)tf.image_summary 改为 tf.summary.image
2) tf.scalar_summary 改为 tf.summary.scalar
3)tf.histogram_summary 改为 tf.summary.histogram
4)tf.train.SummaryWriter 改为 tf.summary.FileWriter
5)tf.merge_all_summaries() 改为 tf.summary.merge_all()
三、一个小坑
改完上述函数名,以为没问题,跑一次试试,报AssertionError断言错误,经检查发现是
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):调用中报错,
debug跟踪,是红框处代码问题,查看python3.6文档,改为
activations = act(preactivate),即可。断言错误是python3.6认为只应该传入一个参数,而原来的代码传入了两个。我遇到的AssertionError很多都是因为函数传参错误导致的
第四步、run一下。
第五步、运行tensorboard,注意红框处,logdir参数就是你python的设置的summaries_dir参数第六步,访问tensorboard代码:# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A simple MNIST classifier which displays summaries in TensorBoard. This is an unimpressive MNIST model, but it is a good example of using tf.name_scope to make a graph legible in the TensorBoard graph explorer, and of naming summary tags so that they are grouped meaningfully in TensorBoard. It demonstrates the functionality of every TensorBoard dashboard. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf #from tensorflow.examples.tutorials.mnist import input_data import input_data flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data ' 'for unit testing.') flags.DEFINE_integer('max_steps', 1000, 'Number of steps to run trainer.') flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') flags.DEFINE_float('dropout', 0.9, 'Keep probability for training dropout.') flags.DEFINE_string('data_dir', '/tmp/data', 'Directory for storing data') flags.DEFINE_string('summaries_dir', '/tmp/mnist_logs', 'Summaries directory') def train(): # Import data mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True, fake_data=FLAGS.fake_data) sess = tf.InteractiveSession() # Create a multilayer model. # Input placehoolders with tf.name_scope('input'): x = tf.placeholder(tf.float32, [None, 784], name='x-input') image_shaped_input = tf.reshape(x, [-1, 28, 28, 1]) tf.summary.image('input', image_shaped_input, 10) y_ = tf.placeholder(tf.float32, [None, 10], name='y-input') keep_prob = tf.placeholder(tf.float32) tf.summary.scalar('dropout_keep_probability', keep_prob) # We can't initialize these variables to 0 - the network will get stuck. def weight_variable(shape): """Create a weight variable with appropriate initialization.""" initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): """Create a bias variable with appropriate initialization.""" initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def variable_summaries(var, name): """Attach a lot of summaries to a Tensor.""" with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.summary.scalar('mean/' + name, mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean))) tf.summary.scalar('sttdev/' + name, stddev) tf.summary.scalar('max/' + name, tf.reduce_max(var)) tf.summary.scalar('min/' + name, tf.reduce_min(var)) tf.summary.histogram(name, var) def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): """Reusable code for making a simple neural net layer. It does a matrix multiply, bias add, and then uses relu to nonlinearize. It also sets up name scoping so that the resultant graph is easy to read, and adds a number of summary ops. """ # Adding a name scope ensures logical grouping of the layers in the graph. with tf.name_scope(layer_name): # This Variable will hold the state of the weights for the layer with tf.name_scope('weights'): weights = weight_variable([input_dim, output_dim]) variable_summaries(weights, layer_name + '/weights') with tf.name_scope('biases'): biases = bias_variable([output_dim]) variable_summaries(biases, layer_name + '/biases') with tf.name_scope('Wx_plus_b'): preactivate = tf.matmul(input_tensor, weights) + biases tf.summary.histogram(layer_name + '/pre_activations', preactivate) activations = act(preactivate) tf.summary.histogram(layer_name + '/activations', activations) return activations hidden1 = nn_layer(x, 784, 500, 'layer1') dropped = tf.nn.dropout(hidden1, keep_prob) y = nn_layer(dropped, 500, 10, 'layer2', act=tf.nn.softmax) with tf.name_scope('cross_entropy'): diff = y_ * tf.log(y) with tf.name_scope('total'): cross_entropy = -tf.reduce_mean(diff) tf.summary.scalar('cross entropy', cross_entropy) with tf.name_scope('train'): train_step = tf.train.AdamOptimizer( FLAGS.learning_rate).minimize(cross_entropy) with tf.name_scope('accuracy'): with tf.name_scope('correct_prediction'): correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) with tf.name_scope('accuracy'): accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar('accuracy', accuracy) # Merge all the summaries and write them out to /tmp/mnist_logs (by default) merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', sess.graph) test_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/test') tf.global_variables_initializer().run() # Train the model, and also write summaries. # Every 10th step, measure test-set accuracy, and write test summaries # All other steps, run train_step on training data, & add training summaries def feed_dict(train): """Make a TensorFlow feed_dict: maps data onto Tensor placeholders.""" if train or FLAGS.fake_data: xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data) k = FLAGS.dropout else: xs, ys = mnist.test.images, mnist.test.labels k = 1.0 return {x: xs, y_: ys, keep_prob: k} for i in range(FLAGS.max_steps): if i % 10 == 0: # Record summaries and test-set accuracy summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False)) test_writer.add_summary(summary, i) print('Accuracy at step %s: %s' % (i, acc)) else: # Record train set summarieis, and train summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True)) train_writer.add_summary(summary, i) def main(_): if tf.gfile.Exists(FLAGS.summaries_dir): tf.gfile.DeleteRecursively(FLAGS.summaries_dir) tf.gfile.MakeDirs(FLAGS.summaries_dir) train() if __name__ == '__main__': tf.app.run()