from tensorboardX import SummaryWriter
import torchvision.utils as vutils
# 定义Summary_Writer
writer = SummaryWriter('./Result') # 数据存放在这个文件夹
def show(model,loss):
# 显示每个layer的权重
print(model)
for i, (name, param) in enumerate(model.named_parameters()):
if 'bn' not in name:
writer.add_histogram(name, param, 0)
writer.add_scalar('loss', loss[i], i)
# 卷积可视化
def show_featuremap(model,image):
# 定义网格
img_grid = vutils.make_grid(image, normalize=True, scale_each=True, nrow=2)
# 绘制原始图像
writer.add_image('raw img', img_grid) # j 表示feature map数
model.eval()
# 其实这个函数写的很垃圾,再弄的时候要好好写
for name, layer in model._modules.items():
print(name,layer)
#针对lenet,后面就没有了
if not ('c' in name):
return
#计算这层出来的结果
image = layer(image)
if 'c' in name:
x1 = image.transpose(0, 1) # C,B, H, W ---> B,C, H, W
img_grid = vutils.make_grid(x1, normalize=True, scale_each=True, nrow=4) # normalize进行归一化处理
writer.add_image(f'{name}_feature_maps', img_grid, global_step=0)
# for name, layer in model.named_parameters():
# print(name)
# if 'conv' in name:
# print(name)
# x1 = image.transpose(0, 1) # C,B, H, W ---> B,C, H, W
# img_grid = vutils.make_grid(x1, normalize=True, scale_each=True, nrow=4) # normalize进行归一化处理
# writer.add_image(f'{name}_feature_maps', img_grid, global_step=0)
#卷积核可视化
def show_kernal(model):
# 可视化卷积核
for name, param in model.named_parameters():
if 'conv' in name and 'weight' in name:
in_channels = param.size()[1]
out_channels = param.size()[0] # 输出通道,表示卷积核的个数
k_w, k_h = param.size()[3], param.size()[2] # 卷积核的尺寸
kernel_all = param.view(-1, 1, k_w, k_h) # 每个通道的卷积核
kernel_grid = vutils.make_grid(kernel_all, normalize=True, scale_each=True, nrow=in_channels)
writer.add_image(f'{name}_all', kernel_grid, global_step=0)
if __name__ == '__main__':
import torch
import LeNet5
model = LeNet5.LeNet5(10)
model.load_state_dict(torch.load('ResnetCifar10.pt'))
#----------------------特征图可视化的---------------#
import torchvision
import torchvision.transforms as transforms
mnist_train = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST',
train=True, download=True, transform=transforms.ToTensor())
mnist_iter = torch.utils.data.DataLoader(mnist_train, 1, shuffle=True)
for i, (images, labels) in enumerate(mnist_iter):
print(images)
print(images.shape)
show_featuremap(model, images)
break
#----------------------卷积核可视化-------------------#
# show_kernal(model)
补一个之前发的tensorbord可视化中的tensorbordutil文件
最新推荐文章于 2022-12-18 15:18:02 发布