tensorboard:
1)通过writer记录可视化数据
2)通过SummaryWriter 在硬盘中生成event file(生成在runs文件夹下)
3)在终端打开web网页端(在terminal中输入:tensorboard --logdir=./)
SummaryWriter
max_epoch = 100
writer = SummaryWriter(comment='test_comment', filename_suffix="test_suffix")
for x in range(max_epoch):
writer.add_scalar('y=2x', x * 2, x)
writer.add_scalar('y=pow_2_x', 2 ** x, x)
writer.add_scalars('data/scalar_group', {"xsinx": x * np.sin(x),
"xcosx": x * np.cos(x)}, x)
writer.close()
我看着这个的思路像是:先创建一个runs及eventfile,然后往eventfile中添加内容。
values可以是:weight,bias,gradient
writer = SummaryWriter(comment='test_comment', filename_suffix="test_suffix")
for x in range(2):
np.random.seed(x)
data_union = np.arange(100)
data_normal = np.random.normal(size=1000)
writer.add_histogram('distribution union', data_union, x)
writer.add_histogram('distribution normal', data_normal, x)
注意:
dataformats默认的是CHW形式
img_tensor中,若某些像素的值小于等于1,会乘以255;若某些像素的值大于1,默认是0-255区间内,此像素值则不变
writer = SummaryWriter(comment='test_your_comment', filename_suffix="_test_your_filename_suffix")
# img 1 random
fake_img = torch.randn(3, 512, 512)
writer.add_image("fake_img", fake_img, 1)
time.sleep(1)
# img 2 1
fake_img = torch.ones(3, 512, 512)
time.sleep(1)
writer.add_image("fake_img", fake_img, 2)
# img 3 1.1
fake_img = torch.ones(3, 512, 512) * 1.1
time.sleep(1)
writer.add_image("fake_img", fake_img, 3)
同时显示多张图像,注意padding(图像间距)默认为2,
writer = SummaryWriter(comment='test_your_comment', filename_suffix="_test_your_filename_suffix")
# 图片路径
split_dir = os.path.join("..", "..", "data", "rmb_split")
train_dir = os.path.join(split_dir, "train")
# 预处理
transform_compose = transforms.Compose([transforms.Resize((32, 64)), transforms.ToTensor()])
# dataset与dataloader
train_data = RMBDataset(data_dir=train_dir, transform=transform_compose)
train_loader = DataLoader(dataset=train_data, batch_size=16, shuffle=True)
# 获取一批数据(16,3,23,64)
data_batch, label_batch = next(iter(train_loader))
# 制作网格图像并记录
img_grid = vutils.make_grid(data_batch, nrow=4, normalize=True, scale_each=True)
writer.add_image("input img", img_grid, 0)
writer.close()
writer = SummaryWriter(comment='test_your_comment', filename_suffix="_test_your_filename_suffix")
fake_img = torch.randn(1, 3, 32, 32)
# 模型
lenet = LeNet(classes=2)
writer.add_graph(lenet, fake_img)
writer.close()
from torchsummary import summary
print(summary(lenet, (3, 32, 32), device="cpu"))
结果如下:
可见,torchsummary是非常方便查看模型信息
kernel的可视化:
writer = SummaryWriter(comment='test_your_comment', filename_suffix="_test_your_filename_suffix")
alexnet = models.alexnet(pretrained=True)
# kernel_num指示当前是第几个卷积层
kernel_num = -1
# 最大可视化层数(第0层,第1层)
vis_max = 1
# 避免pytorch1.7下的一个小bug,增加 torch.no_grad
with torch.no_grad():
for sub_module in alexnet.modules():
if isinstance(sub_module, nn.Conv2d):
# 判断层数
kernel_num += 1
if kernel_num > vis_max:
break
# 取卷积核(即权重)
kernels = sub_module.weight
# 卷积核的维度
c_out, c_int, k_w, k_h = tuple(kernels.shape)
# 第一种make_grid的方法:单张图维度标准化
for o_idx in range(c_out):
kernel_idx = kernels[o_idx, :, :, :].unsqueeze(1) # make_grid需要 BCHW,这里拓展C维度
kernel_grid = vutils.make_grid(kernel_idx, normalize=True, scale_each=True, nrow=c_int)
writer.add_image('{}_Convlayer_split_in_channel'.format(kernel_num), kernel_grid, global_step=o_idx)
# 第二种make_grid的方法:一个批次一起标准化
kernel_all = kernels.view(-1, 3, k_h, k_w) # 3, h, w
kernel_grid = vutils.make_grid(kernel_all, normalize=True, scale_each=True, nrow=8) # c, h, w
writer.add_image('{}_all'.format(kernel_num), kernel_grid, global_step=322)
print("{}_convlayer shape:{}".format(kernel_num, tuple(kernels.shape)))
writer.close()
feature map的可视化:
with torch.no_grad():
writer = SummaryWriter(comment='test_your_comment', filename_suffix="_test_your_filename_suffix")
# 数据
path_img = "./lena.png" # your path to image
# 预处理
normMean = [0.49139968, 0.48215827, 0.44653124]
normStd = [0.24703233, 0.24348505, 0.26158768]
norm_transform = transforms.Normalize(normMean, normStd)
img_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
norm_transform
])
img_pil = Image.open(path_img).convert('RGB')
if img_transforms is not None:
img_tensor = img_transforms(img_pil)
# 处理数据格式
img_tensor.unsqueeze_(0) # chw --> bchw
# 模型
alexnet = models.alexnet(pretrained=True)
# forward
convlayer1 = alexnet.features[0]
fmap_1 = convlayer1(img_tensor)
# 对feature map进行预处理
fmap_1.transpose_(0, 1) # bchw=(1, 64, 55, 55) --> (64, 1, 55, 55)
fmap_1_grid = vutils.make_grid(fmap_1, normalize=True, scale_each=True, nrow=8)
writer.add_image('feature map in conv1', fmap_1_grid, global_step=322)
writer.close()