前言
在tensorflow中,我们常常使用使用tf.summary 保存数据,但在报告中又想要定制一些标题,图例等,那用matplotlib比较方便一些,本文使用matplotlib实现tensoboard的可视化功能。
首先准备summary文件,然后读取此文件,接着做tensorboard一样得smooth平滑滤波操作,最后输出报告图像。
准备tf summary文件
准备一个tf summary文件。用tensorflow跑了一个mnist,输出 summary文件,里面记录的tf.summary.scalar loss 和 lr两个量。如果不想跑的话,我直接提供了summary文件在某盘:
events.out.tfevents.1578108826.hri-three
链接:https://pan.baidu.com/s/1ipzf9FZ3L1CotB1dFzS9Zw
提取码:mmb7
# -*- coding: utf-8 -*-
import os
import time
import shutil
import numpy as np
import json
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def conv2d(x, out_channels, kernel_size=4, strides=2, padding='same', use_bias=False, name='Conv'):
"""padding 'same' padding with 0"""
with tf.variable_scope(name):
initializer = tf.random_normal_initializer(0, 0.02)
return tf.layers.conv2d(x, out_channels, kernel_size=kernel_size, strides=strides, padding=padding,
kernel_initializer=initializer, use_bias=use_bias)
class SomeNet(object):
def __init__(self, args):
# ============= hyper-parameters =============
self.test_name = self.__class__.__name__ + '_' + time.strftime("%m%d_%H%M_", time.localtime()) + args.test_name
self.test_name = os.path.join(time.strftime("%m%d_", time.localtime()) + 'OUTPUT', self.test_name)
print(self.test_name)
if not os.path.exists(self.test_name):
os.makedirs(self.test_name)
with open(os.path.join(self.test_name, 'args.json'), 'w') as f: # ae_args = json.load(f)
args.test_name = self.test_name
json.dump(vars(args), f, indent=4)
f.close()
shutil.copy(__file__, os.path.join(self.test_name, os.path.basename(__file__)))
self.start_lr = args.lr
self.batch_size = args.batch_size
# ============= placeholder =============
self.xs = tf.placeholder("float", shape=[None, 28, 28, 1], name='xs')
self.ys = tf.placeholder("float", shape=[None, 10], name="ys")
self.lr = tf.placeholder(tf.float32, name='lr')
self.phase_train = tf.placeholder(dtype="bool", name="isTrain")
# ============= Net build =============
with tf.variable_scope("MinTest", reuse=False) as scope:
x = conv2d(self.xs, 64, name="conv1")
x = conv2d(x, 128, name="conv2")
# x = batch_norm2(x, phase_train=self.phase_train)
x = conv2d(x, 10, kernel_size=7, padding='valid', strides=1, name="conv3")
print(x)
self.y_p = tf.squeeze(x) # 预测标签
print(self.y_p)
self.compute_loss()
self.summary()
def compute_loss(self):
# ============= losses =============
self.loss = tf.losses.mean_squared_error(self.y_p, self.ys)
def summary(self):
# ============= summary =============
_sum_loss = tf.summary.scalar('loss', self.loss, family='loss_family')
_sum_lr = tf.summary.scalar('lr', self.lr, family='loss_family')
self.sum = tf.summary.merge([_sum_loss, _sum_lr])
# ============= train =============
def train(self):
# configs
BATCH_SIZE = self.batch_size
EPOCHS = 3
mnist = input_data.read_data_sets(".", one_hot=True)
saver = tf.train.Saver()
# 设置优化权重 优化器
train_vars = tf.trainable_variables()
vars = [var for var in train_vars if var.name.startswith('MinTest')]
t_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=0.5, beta2=0.999).minimize(self.loss,
var_list=vars)
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter("%s/" % self.test_name, sess.graph)
print('----------- start training -----------')
lr_now = self.start_lr
counter = 0
e = 0
while e < EPOCHS:
e = mnist.train.epochs_completed
start_time = time.time()
batch_xs, batch_ys = mnist.train.next_batch(BATCH_SIZE)
batch_xs = np.reshape(batch_xs, (BATCH_SIZE, 28, 28, 1))
loss, summary_str, _ = sess.run([self.loss, self.sum, t_op],
feed_dict={
self.xs: batch_xs,
self.ys: batch_ys,
self.lr: lr_now,
self.phase_train: True})
counter += 1
writer.add_summary(summary_str, counter)
if counter % 100 == 0:
print('===== [Epoch %02d/%02d](lr: %.5f) =====' % (e + 1, EPOCHS, lr_now))
print('(spend time: %.2fmin) loss: %.4f \n' %
((time.time() - start_time) / 60, loss))
# saver.save(sess, '%s/weights/epoch%2d.ckpt' % (self.test_name, e))
# ===========================test=======================
#
# def test(args):
# pass
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--test_name', dest='test_name', default='only_test',
help='the name of test and mkdir test folder')
parser.add_argument('--gpu', dest='gpu', default='0', help='gpu use')
parser.add_argument('--phase', dest='phase', default='train', help='all, train, test')
parser.add_argument('--lr', dest='lr', type=float, default=0.0001, help='')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=32, help='')
args = parser.parse_args()
# set the use of gpu
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
print(vars(args))
if args.phase == 'train':
tf.reset_default_graph()
mNet = SomeNet(args)
mNet.train()
打开tensorboard面板之后,我们可以看到图 1。
图1 tensorflow 可视化
matplotlib可视化
读取tf summary文件,同时做一个类似 tensorboard smooth 功能的平滑滤波,结果如图2。
# -*- coding: utf-8 -*-
"""Description : 写一个 tensorflow summmary reader
可视化 tensorflow 的结果
"""
# talen@uestc 2020/1/4
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import numpy as np
import matplotlib
#matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
def get_data(log_path):
# 读取 tensorflow summary
# refer: https://stackoverflow.com/questions/37304461/tensorflow-importing-data-from-a-tensorboard-tfevent-file
res = []
for summary in tf.train.summary_iterator(log_path):
if len(summary.summary.value) != 0: # 去除第一二的 graph and meta summary
step = summary.step
for v in summary.summary.value:
if v.tag == 'loss_family/loss_family/loss':
loss = v.simple_value
if v.tag == 'loss_family/loss_family/lr':
lr = v.simple_value
res.append([step, loss, lr])
return np.array(res)
def smooth(data_array, weight=0.95):
# 一个类似 tensorboard smooth 功能的平滑滤波
# https://dingguanglei.com/tensorboard-xia-smoothgong-neng-tan-jiu/
last = data_array[0]
smoothed = []
for new in data_array:
smoothed_val = last * weight + (1 - weight) * new
smoothed.append(smoothed_val)
last = smoothed_val
return smoothed
def draw(data):
plt.plot(data[:, 0], smooth(data[:, 1]), label='loss')
plt.xlabel('steps')
plt.ylabel('values')
plt.title("Tensorflow Summary Plot")
plt.legend()
plt.savefig('test.jpg')
PATH = "events.out.tfevents.1578108826.hri-three"
data = get_data(PATH)
draw(data)
图2 matplotlib可视化
接下来,请小可爱们,自由发挥,尽情享用吧。
再见!