log 可视化tensorboard图表
文件tree如下:
- train 和val 分开显示
from tensorboardX import SummaryWriter
import re
import numpy as np
import os, sys
'''
安装依赖:tensorboard,tensorboardX
在yyq根目录输入:tensorboard --logdir=./runs/exp
'''
writer = SummaryWriter('./runs/exp',comment='butd')
keywordsT = ['train loss', 'top-5 acc']
keywordsV = ['val loss', 'top-5 acc']
scalarDict = {}
def paresTxt(keyword,text):
pattern = keyword + ' = (\d+\.*\d*)'
scalarList = re.findall(pattern, text)
return scalarList
root_path = os.path.join(sys.path[0],'log')
for root, dirs, files in os.walk(root_path):
log_list = [os.path.join(root_path, f) for f in files if f.endswith('.txt')]
print("\033[1;36m共搜索到%d 个日志文件" % (len(log_list)))
print("\033[1;33mfile_list: {}".format(files))
for log in log_list:
keywords = keywordsT if 'train' in log else keywordsV
with open(log,'r', encoding='utf-8') as f:
text = f.read()
for i, keyword in enumerate(keywords): #得到scalar 字典,key:tag,value为 存于列表的scalar值
scalarList = paresTxt(keywords[i], text)
scalarList = list(map(float, scalarList))
scalarDict[keyword] = scalarList
for epoch, loss, acc in zip(np.arange(len(scalarDict[keywords[0]])), scalarDict[keywords[0]], scalarDict[keywords[1]]):
writer.add_scalar(keywords[0], loss, global_step=epoch)
tag1 = 'val ' + keywords[1] if 'val' in log else 'train ' + keywords[1]
writer.add_scalar(tag1, acc, global_step=epoch)
效果如下:
- train 和val 显示在一起
'''
安装依赖:tensorboard,tensorboardX
在yyq根目录输入:tensorboard --logdir=./runs/
'''
# train 和val 显示在一起
logdirList = ['./runs/exp', './runs/exp1']
keywords = ['loss', 'top-5 acc']
scalarDict = {}
def paresTxt(keyword,text):
pattern = keyword + ' = (\d+\.*\d*)'
scalarList = re.findall(pattern, text)
return scalarList
root_path = os.path.join(sys.path[0],'log')
for root, dirs, files in os.walk(root_path):
log_list = [os.path.join(root_path, f) for f in files if f.endswith('.txt')]
print("\033[1;36m共搜索到%d 个日志文件" % (len(log_list)))
print("\033[1;33mfile_list: {}".format(files))
for i in range(len(logdirList)):
writer = SummaryWriter(logdirList[i])
with open(log_list[i],'r', encoding='utf-8') as f:
text = f.read()
for j, keyword in enumerate(keywords): #得到scalar 字典,key:tag,value为 存于列表的scalar值
scalarList = paresTxt(keywords[j], text)
scalarList = list(map(float, scalarList))
scalarDict[keyword] = scalarList
for epoch, loss, acc in zip(np.arange(len(scalarDict[keywords[0]])), scalarDict[keywords[0]], scalarDict[keywords[1]]):
#将train和val的events分别保存在exp和exp1中
writer.add_scalar(keywords[0], loss, global_step=epoch)
writer.add_scalar(keywords[1], acc, global_step=epoch)
效果如下:
附录:
- log_run_train.txt
nowTime: 2020-12-12 20:26:47
epoch = 0 train loss = 24.52560582809527 top-5 acc = 50.78516206757651
nowTime: 2020-12-12 22:15:04
epoch = 1 train loss = 9.110681539293964 top-5 acc = 66.10805613760192
nowTime: 2020-12-13 00:31:30
epoch = 2 train loss = 7.187706227126428 top-5 acc = 69.8835968863907
nowTime: 2020-12-13 02:01:32
epoch = 3 train loss = 6.619107402551463 top-5 acc = 71.7538596757047
nowTime: 2020-12-13 03:17:38
epoch = 4 train loss = 6.394670065123066 top-5 acc = 72.96222180339521
nowTime: 2020-12-13 04:19:15
epoch = 5 train loss = 6.2623699555956485 top-5 acc = 73.8262064989634
- log_run_val.txt
nowTime: 2020-12-12 22:12:02
val loss = 12.450690227832455 top-5 acc = 64.08342894207331 bleu4 = 0.1501
nowTime: 2020-12-13 00:29:26
val loss = 7.527754828228004 top-5 acc = 69.64280109778595 bleu4 = 0.1831
nowTime: 2020-12-13 01:59:21
val loss = 6.737729009651888 top-5 acc = 71.989719590318 bleu4 = 0.1987
nowTime: 2020-12-13 03:15:28
val loss = 6.467285215502328 top-5 acc = 73.32324355992314 bleu4 = 0.2111
nowTime: 2020-12-13 04:17:07
val loss = 6.3310750124022235 top-5 acc = 74.16890141198706 bleu4 = 0.2145