参考:https://blog.csdn.net/u010099080/article/details/77426577
https://blog.csdn.net/qq_31761357/article/details/80225737
将参数信息写到日志当中时,步骤多了,log文件会很大,今天跑了一下,从几十兆到一个多G,一会就到了。
拿tensorflow实战谷歌这本书第七章的例子看了一下
1. tf.summary.histogram(生成Histogram 和distribution),
2. tf.summary.scalar: 主要用于记录诸如:准确率、损失和学习率等单个值的变化趋势。
3. tf.summary.image:
4. tf.summary.FileWriter(分别生成日志), 指定一个目录来告诉程序把文件放到哪里。然后运行的时候使用add_summary()来将某一步的summary数据记录到文件中
5. tf.summary.merge_all(整理日志操作的,sess.run一次就不用对上述分别run)
6.add_summary
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
SUMMARY_DIR = "./log/supervisor.log"
BATCH_SIZE = 100
TRAIN_STEPS = 300000
def variable_summaries(name,var ):
#with tf.name_scope('summaries'):#summaries删除
tf.summary.histogram(name, var)
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)#name删掉
#标准差
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
weights = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=0.1))
variable_summaries( 'weights',weights)
with tf.name_scope('biases'):
biases = tf.Variable(tf.constant(0.0, shape=[output_dim]))
variable_summaries('biases',biases )
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.summary.histogram( 'pre_activations', preactivate)
activations = act(preactivate, name='activation')
# 记录神经网络节点输出在经过激活函数之后的分布。
tf.summary.histogram('activations',activations)
return activations
def main():
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, 784], name='x-input')
y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
with tf.name_scope('input_reshape'):
image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
#10代表展示10张输入图片
tf.summary.image('input', image_shaped_input, 10)
hidden1 = nn_layer(x, 784, 500, 'layer1')
y = nn_layer(hidden1, 500, 10, 'layer2', act=tf.identity)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph)
tf.global_variables_initializer().run()
for i in range(TRAIN_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
# 运行训练步骤以及所有的日志生成操作,得到这次运行的日志。
summary, _ = sess.run([merged, train_step], feed_dict={x: xs, y_: ys})
# 将得到的所有日志写入日志文件,这样TensorBoard程序就可以拿到这次运行所对应的
# 运行信息。
summary_writer.add_summary(summary, i)
summary_writer.close()
if __name__ == '__main__':
main()
一、scalar结果展示
这图片当中红线框出来的地方为什么会有重叠的部分。
二、Image输入图片展示
三、模型图
四、Distributions
五、Histograms