【TensorFlow】TensorBoard监控网络运行

import tensorflow as tf
# 加载数据
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/tmp/data',one_hot=True)
Extracting /tmp/data/train-images-idx3-ubyte.gz
Extracting /tmp/data/train-labels-idx1-ubyte.gz
Extracting /tmp/data/t10k-images-idx3-ubyte.gz
Extracting /tmp/data/t10k-labels-idx1-ubyte.gz

一、定义和训练模型

1.参数
batch_size = 100 # 每个batch的大小
n_batch = mnist.train.num_examples//batch_size # 训练集共包含多少个batch
2.定义计算图

定义name_scope和name属性后,对应的名字会在TensorBoard中显示;而且定义合适的name_scope可以使TensorBoard的图更加简洁清晰

tf.summary记录的变量,最终会在tensorboard中展示

# 参数概要
def variable_summaries(var):
    with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean',mean)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean)))
        tf.summary.scalar('stddev',stddev)
        tf.summary.scalar('max',tf.reduce_max(var))
        tf.summary.scalar('min',tf.reduce_min(var))
        tf.summary.histogram('histogram',var)
# 定义命名空间
with tf.name_scope('input'):
    x = tf.placeholder(tf.float32,[None,784],name='x-input')
    y = tf.placeholder(tf.float32,[None,10],name='y-input')

with tf.name_scope('layer1'):
    with tf.name_scope('weights'):
        W1 = tf.Variable(tf.truncated_normal([784,2000],stddev=0.1),name='W1')
        variable_summaries(W1) # 概括变量W1
    with tf.name_scope('biases'):
        b1 = tf.Variable(tf.zeros([2000])+0.1,name='b1')
        variable_summaries(b1) # 概括变量b1
    L1 = tf.nn.tanh(tf.matmul(x,W1)+b1)

with tf.name_scope('layer2'):
    W2 = tf.Variable(tf.truncated_normal([2000,1000],stddev=0.1),name='W2')
    b2 = tf.Variable(tf.zeros([1000])+0.1,name='b2')
    L2 = tf.nn.tanh(tf.matmul(L1,W2)+b2)
    
with tf.name_scope('layer3'):
    W3 = tf.Variable(tf.truncated_normal([1000,10],stddev=0.1),name='W3')
    b3 = tf.Variable(tf.zeros([10])+0.1,name='b3')

# 预测值
prediction = tf.nn.softmax(tf.matmul(L2,W3)+b3)
# loss
# loss = tf.reduce_mean(tf.square(y-prediction)) # MSE
# cross entropy
with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
    tf.summary.scalar('loss',loss) # 概括loss
# SGD
with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
# 初始化
init = tf.global_variables_initializer()

with tf.name_scope('accuracy'):
    # 分类结果
    # tf.argmax(y,1):在axis=1,y中最大值的下标
    correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
    # 准确率
    # tf.cast:将bool类型的correct_prediction转换为tf.float32类型
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    tf.summary.scalar('accuracy',accuracy) # 概括accuracy
    
# 合并所有summary
merged = tf.summary.merge_all()
3.训练
with tf.Session() as sess:
    sess.run(init)
    # 将sess.graph写到文件夹logs下
    writer = tf.summary.FileWriter('logs/',sess.graph)
    for epoch in range(5):
        for batch in range(n_batch):
            batch_xs,batch_ys = mnist.train.next_batch(batch_size)
            summary,_ = sess.run([merged,train_step],feed_dict={x:batch_xs,y:batch_ys})
            
        writer.add_summary(summary,epoch) # 将每个epoch的summary写入到logs中
        test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
        train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels})
        print("Iter "+str(epoch)+",Testing accuracy "+str(test_acc)+",Training accuracy "+str(train_acc))
Iter 0,Testing accuracy 0.8546,Training accuracy 0.86127275
Iter 1,Testing accuracy 0.8643,Training accuracy 0.87305456
Iter 2,Testing accuracy 0.8715,Training accuracy 0.8810727
Iter 3,Testing accuracy 0.8731,Training accuracy 0.88452727
Iter 4,Testing accuracy 0.8763,Training accuracy 0.8867818

二、使用TensorBoard

在上面训练的过程中我们已经将有关于图信息的文件保存到了目录’logs/’,那么之后只要打开TensorBoard服务读取这个文件。然后再浏览器中就能看到我们的网络结构了。

1.进入命令行,输入下面的命令,其中logs可以替换为保存图信息文件的相对路径或者决定路径

在这里插入图片描述

2.进入浏览器,输入"tensorboard服务器的IP地址:6006",将会看到如下图

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

BQW_

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值