因为论文需要学习了pytorch,其中的TensorboardX可视化非常有意思,按照他人的教程写了几个Demo
以下代码在cmd或者Terminal的当前python文件目录下输入 都可以显示
tensorboard --logdir=./logs
1.graph ,ResNet网络结构展示
from tensorboardX import SummaryWriter
import torch
from torch import nn as nn
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Sequential( # input_size=(1*28*28)
nn.Conv2d(1, 6, 5, 1, 2),
nn.ReLU(), # (6*28*28)
nn.MaxPool2d(kernel_size=2, stride=2), # output_size=(6*14*14)
)
self.conv2 = nn.Sequential(
nn.Conv2d(6, 16, 5),
nn.ReLU(), # (16*10*10)
nn.MaxPool2d(2, 2) # output_size=(16*5*5)
)
self.fc1 = nn.Sequential(
nn.Linear(16 * 5 * 5, 120),
nn.ReLU()
)
self.fc2 = nn.Sequential(
nn.Linear(120, 84),
nn.ReLU()
)
self.fc3 = nn.Linear(84, 10)
# 定义前向传播过程,输入为x
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
# nn.Linear()的输入输出都是维度为一的值,所以要把多维度的tensor展平成一维
x = x.view(x.size()[0], -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
dummy_input = torch.rand(13,1,28,28)
model = LeNet()
with SummaryWriter(comment='LeNet',log_dir='logs') as w:
w.add_graph(model, (dummy_input,))
2.scalar、histogram、text
import torch as t
from tensorboardX import SummaryWriter
with SummaryWriter(log_dir='logs') as w:
x = t.FloatTensor([100])
y = t.FloatTensor([500])
for epoch in range(100):
x = x /1.5
y = y/1.5
loss = y -x
print(loss)
w.add_histogram('zz/x', x, epoch)
w.add_histogram('zz/y', y, epoch)
w.add_scalar('data/x', x, epoch)
w.add_scalar('data/y', y, epoch)
w.add_scalar('data/loss', loss, epoch)
w.add_scalars('data/scalar_group',
{'x': x, 'y':y,'loss':loss},
epoch)
w.add_text('zz/text','zz:this is epoch' + str(epoch), epoch)
w.export_scalars_to_json("./test.json")
代码来源以及重要参考:
https://blog.csdn.net/JNingWei/article/details/79740825
https://blog.csdn.net/xiaoxifei/article/details/82735355
可以闲着没事玩一玩的各种Demo:
https://www.cnblogs.com/kk17/p/10077335.html