tensorboardx可视化_TensorFlow学习笔记(7):TensorBoard——Tensor与Graph可视化

前言

TensorBoard是TensorFlow自带的一个可视化工具。本文在学习笔记(4)的基础上修改少量代码,以探索TensorBoard的使用方法。

代码

# -*- coding=utf-8 -*-

# @author: 陈水平

# @date: 2017-02-09

# @description: implement a softmax regression model upon MNIST handwritten digits

# @ref: http://yann.lecun.com/exdb/mnist/

import gzip

import struct

import numpy as np

from sklearn.linear_model import LogisticRegression

from sklearn import preprocessing

from sklearn.metrics import accuracy_score

import tensorflow as tf

# MNIST data is stored in binary format,

# and we transform them into numpy ndarray objects by the following two utility functions

def read_image(file_name):

with gzip.open(file_name, 'rb') as f:

buf = f.read()

index = 0

magic, images, rows, columns = struct.unpack_from('>IIII' , buf , index)

index += struct.calcsize('>IIII')

image_size = '>' + str(images*rows*columns) + 'B'

ims = struct.unpack_from(image_size, buf, index)

im_array = np.array(ims).reshape(images, rows, columns)

return im_array

def read_label(file_name):

with gzip.open(file_name, 'rb') as f:

buf = f.read()

index = 0

magic, labels = struct.unpack_from('>II', buf, index)

index += struct.calcsize('>II')

label_size = '>' + str(labels) + 'B'

labels = struct.unpack_from(label_size, buf, index)

label_array = np.array(labels)

return label_array

print "Start processing MNIST handwritten digits data..."

train_x_data = read_image("MNIST_data/train-images-idx3-ubyte.gz")

train_x_data = train_x_data.reshape(train_x_data.shape[0], -1).astype(np.float32)

train_y_data = read_label("MNIST_data/train-labels-idx1-ubyte.gz")

test_x_data = read_image("MNIST_data/t10k-images-idx3-ubyte.gz")

test_x_data = test_x_data.reshape(test_x_data.shape[0], -1).astype(np.float32)

test_y_data = read_label("MNIST_data/t10k-labels-idx1-ubyte.gz")

train_x_minmax = train_x_data / 255.0

test_x_minmax = test_x_data / 255.0

# Of course you can also use the utility function to read in MNIST provided by tensorflow

# from tensorflow.examples.tutorials.mnist import input_data

# mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)

# train_x_minmax = mnist.train.images

# train_y_data = mnist.train.labels

# test_x_minmax = mnist.test.images

# test_y_data = mnist.test.labels

# We evaluate the softmax regression model by sklearn first

eval_sklearn = False

if eval_sklearn:

print "Start evaluating softmax regression model by sklearn..."

reg = LogisticRegression(solver="lbfgs", multi_class="multinomial")

reg.fit(train_x_minmax, train_y_data)

np.savetxt('coef_softmax_sklearn.txt', reg.coef_, fmt='%.6f') # Save coefficients to a text file

test_y_predict = reg.predict(test_x_minmax)

print "Accuracy of test set: %f" % accuracy_score(test_y_data, test_y_predict)

eval_tensorflow = True

batch_gradient = False

def variable_summaries(var):

with tf.name_scope('summaries'):

mean = tf.reduce_mean(var)

tf.summary.scalar('mean', mean)

stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))

tf.summary.scalar('stddev', stddev)

tf.summary.scalar('max', tf.reduce_max(var))

tf.summary.scalar('min', tf.reduce_min(var))

tf.summary.histogram('histogram', var)

if eval_tensorflow:

print "Start evaluating softmax regression model by tensorflow..."

# reformat y into one-hot encoding style

lb = preprocessing.LabelBinarizer()

lb.fit(train_y_data)

train_y_data_trans = lb.transform(train_y_data)

test_y_data_trans = lb.transform(test_y_data)

x = tf.placeholder(tf.float32, [None, 784])

with tf.name_scope('weights'):

W = tf.Variable(tf.zeros([784, 10]))

variable_summaries(W)

with tf.name_scope('biases'):

b = tf.Variable(tf.zeros([10]))

variable_summaries(b)

with tf.name_scope('Wx_plus_b'):

V = tf.matmul(x, W) + b

tf.summary.histogram('pre_activations', V)

with tf.name_scope('softmax'):

y = tf.nn.softmax(V)

tf.summary.histogram('activations', y)

y_ = tf.placeholder(tf.float32, [None, 10])

with tf.name_scope('cross_entropy'):

loss = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

tf.summary.scalar('cross_entropy', loss)

with tf.name_scope('train'):

optimizer = tf.train.GradientDescentOptimizer(0.5)

train = optimizer.minimize(loss)

with tf.name_scope('evaluate'):

with tf.name_scope('correct_prediction'):

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))

with tf.name_scope('accuracy'):

accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

tf.summary.scalar('accuracy', accuracy)

init = tf.global_variables_initializer()

sess = tf.Session()

sess.run(init)

merged = tf.summary.merge_all()

train_writer = tf.summary.FileWriter('log/train', sess.graph)

test_writer = tf.summary.FileWriter('log/test')

if batch_gradient:

for step in range(300):

sess.run(train, feed_dict={x: train_x_minmax, y_: train_y_data_trans})

if step % 10 == 0:

print "Batch Gradient Descent processing step %d" % step

print "Finally we got the estimated results, take such a long time..."

else:

for step in range(1000):

if step % 10 == 0:

summary, acc = sess.run([merged, accuracy], feed_dict={x: test_x_minmax, y_: test_y_data_trans})

test_writer.add_summary(summary, step)

print "Stochastic Gradient Descent processing step %d accuracy=%.2f" % (step, acc)

else:

sample_index = np.random.choice(train_x_minmax.shape[0], 100)

batch_xs = train_x_minmax[sample_index, :]

batch_ys = train_y_data_trans[sample_index, :]

summary, _ = sess.run([merged, train], feed_dict={x: batch_xs, y_: batch_ys})

train_writer.add_summary(summary, step)

np.savetxt('coef_softmax_tf.txt', np.transpose(sess.run(W)), fmt='%.6f') # Save coefficients to a text file

print "Accuracy of test set: %f" % sess.run(accuracy, feed_dict={x: test_x_minmax, y_: test_y_data_trans})

思考

主要修改点有:

Summary:所有需要在TensorBoard上展示的统计结果。

tf.name_scope():为Graph中的Tensor添加层级,TensorBoard会按照代码指定的层级进行展示,初始状态下只绘制最高层级的效果,点击后可展开层级看到下一层的细节。

tf.summary.scalar():添加标量统计结果。

tf.summary.histogram():添加任意shape的Tensor,统计这个Tensor的取值分布。

tf.summary.merge_all():添加一个操作,代表执行所有summary操作,这样可以避免人工执行每一个summary op。

tf.summary.FileWrite:用于将Summary写入磁盘,需要制定存储路径logdir,如果传递了Graph对象,则在Graph Visualization会显示Tensor Shape Information。执行summary op后,将返回结果传递给add_summary()方法即可。

效果

Visualizing Learning

Scalar

Histogram

首先是Distribution,显示取值范围:

更细节的取值概率信息在Historgram里,如下:

Graph Visualization

双击train后,可查看下一层级的详细信息:

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值