Hook函数与CAM算法

一、Hook函数概念

Hook函数机制:不改变主体,实现额外功能,像一个挂件,挂钩,hook
1、torch.Tensor.register_hook(hook)
功能:注册一个反向传播hook函数,该hook函数仅一个输入参数(该张量的梯度),可返回
在这里插入图片描述

import torch

flag=1
if flag:
    w = torch.tensor([1.0], requires_grad=True)
    x = torch.tensor([2.], requires_grad=True)
    a = torch.add(x, w)
    b = torch.add(w, 1)
    y = torch.mul(a, b)

    a_grad = list()
    def grad_hook(grad):
        a_grad.append(grad)

    a.register_hook(grad_hook)
    y.backward()
    #查看梯度
    # a,b,y为非叶子节点,梯度会被释放
    print(w.grad,x.grad,a.grad,b.grad,y.grad)
    # # =》tensor([5.]) tensor([2.]) None None None
    print(a_grad[0])
    # =》tensor([2.])
flage=0
if flag:
    w = torch.tensor([1.0], requires_grad=True)
    x = torch.tensor([2.], requires_grad=True)
    a = torch.add(x, w)
    b = torch.add(w, 1)
    y = torch.mul(a, b)

    a_grad = list()

    #改变梯度
    def grad_hook(grad):
        grad*=2
	"""
	def grad_hook(grad):
       grad*=2
    reture 3*grad
    """
    w.register_hook(grad_hook)
    y.backward()
    print(w.grad)
    # =》tensor([10.])

同时也可以将改变后的梯度返回。此时会覆盖原来的梯度,如上边"“” “”"内的内容,此时w的梯度会变为原来的6倍(感觉和不返回结果是一样的)

2、torch.nn.Module.register_forward_hook
在这里插入图片描述
如要实现下面功能:
在这里插入图片描述

import torch
import torch.nn as nn

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.conv1=nn.Conv2d(1,2,3)
        self.pool = nn.MaxPool2d(2,2)

    def forward(self,input):
        x=self.conv1(input)
        y=self.pool(x)
        return y

def forward_hook(module,input,output):
    fmap_block.append(output)
    input_block.append(input)

net=Net()
net.conv1.weight[0].detach().fill_(1)
net.conv1.weight[1].detach().fill_(2)
net.conv1.bias.detach().zero_()

fmap_block,input_block=list(),list()
net.conv1.register_forward_hook(forward_hook)

fake_img=torch.ones((1,1,4,4))
output=net(fake_img)

print("output shape:{}\n output value : {}\n".format(output.shape,output))
print("feature map shape:{}\n output value : {}\n".format(fmap_block[0].shape,fmap_block[0]))
print("input shape:{}\n iinput value:{}".format(input_block[0][0].shape,input_block[0]))

在这里插入图片描述

3、torch.nn.Module.register_forward_pre_hook
在这里插入图片描述
4、torch.nn.Module.register_backward_hook
在这里插入图片描述

import torch
import torch.nn as nn

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.conv1=nn.Conv2d(1,2,3)
        self.pool = nn.MaxPool2d(2,2)

    def forward(self,input):
        x=self.conv1(input)
        y=self.pool(x)
        return y

def forward_pre_hook(module,input):
    print("forward_pre_hook_input:{}".format(input))

def backward_hook(module,grad_input,grad_output):
    print("backward_hook_input:{}".format(grad_input))
    print("backward_hook_output:{}".format(grad_output))

net=Net()
net.conv1.weight[0].detach().fill_(1)
net.conv1.weight[1].detach().fill_(2)
net.conv1.bias.detach().zero_()

fmap_block,input_block=list(),list()
net.conv1.register_forward_pre_hook(forward_pre_hook)
net.conv1.register_backward_hook(backward_hook)

fake_img=torch.ones((1,1,4,4))
output=net(fake_img)

loss_fnc=nn.L1Loss()
target=torch.rand_like(output)
loss=loss_fnc(target,output)
loss.backward()

在这里插入图片描述

采用hook函数对卷积层可视化

import torch.nn as nn
import numpy as np
from PIL import Image
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.utils.tensorboard import SummaryWriter
from tools.common_tools import set_seed
import torchvision.models as models

set_seed(1)  # 设置随机种子

flag = 1
if flag:
	writer = SummaryWriter(comment='test_your_comment', filename_suffix="_test_your_filename_suffix")

    # 数据
    path_img = "./lena.png"     # your path to image
    normMean = [0.49139968, 0.48215827, 0.44653124]
    normStd = [0.24703233, 0.24348505, 0.26158768]

    norm_transform = transforms.Normalize(normMean, normStd)
    img_transforms = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        norm_transform
  	  ])

    img_pil = Image.open(path_img).convert('RGB')
    if img_transforms is not None:
    	img_tensor = img_transforms(img_pil)
	img_tensor.unsqueeze_(0)    # chw --> bchw
	# 模型
    alexnet = models.alexnet(pretrained=True)

    # 注册hook
    fmap_dict = dict()
    for name, sub_module in alexnet.named_modules():
   		if isinstance(sub_module, nn.Conv2d):
        	key_name = str(sub_module.weight.shape)
       		fmap_dict.setdefault(key_name, list())
        	n1, n2 = name.split(".")
        	def hook_func(m, i, o):
           		key_name = str(m.weight.shape)
            	fmap_dict[key_name].append(o)

			alexnet._modules[n1]._modules[n2].register_forward_hook(hook_func)

    # forward
    output = alexnet(img_tensor)

    # add image
    for layer_name, fmap_list in fmap_dict.items():
        fmap = fmap_list[0]
        fmap.transpose_(0, 1)

    	nrow = int(np.sqrt(fmap.shape[0]))
   		fmap_grid = vutils.make_grid(fmap, normalize=True, scale_each=True, nrow=nrow)
    	writer.add_image('feature map in {}'.format(layer_name), fmap_grid, global_step=322)

二、CAM and Grad-CAM
在这里插入图片描述
CAM局限性:在分类层前需要对通道全局平均池化,这就需要重新训练网络
在这里插入图片描述
在这里插入图片描述分析与代码:https://zhuanlan.zhihu.com/p/75894080

import torch.nn as nn
import numpy as np
import torch
from PIL import Image
from torch.autograd import Variable
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
from model.Main_Net import MyEfficientNet
import cv2

def convert_square(img):
    w,h=img.size
    background = Image.new('RGB', size=(max(w, h), max(w, h)), color=(0, 0, 0))  # 创建背景图,颜色值为0(黑色)
    length = int(abs(w - h) // 2)  # 一侧需要填充的长度
    box = (length, 0) if w < h else (0, length)  # 粘贴的位置
    background.paste(img, box)
    return background

def image_handle(image_path):
    transfm=transforms.Compose([
        transforms.Grayscale(3),
        transforms.Lambda(lambda img:convert_square(img)),
        transforms.Resize([456,456]),
        transforms.ToTensor(),
    ])
    image = open(image_path, 'rb')
    image = Image.open(image)
    image = transfm(image)
    return image

def load_model(model,model_params,device):
    """
    :param cls:分类数量
    :param model_params:模型参数
    :param device: 是否调用CUDA
    :return: 加载好的模型
    """
    model_dict = model.state_dict()
    pretrained_dict = torch.load(model_params)
    pretrained_dict = {k[7:]: v for k, v in pretrained_dict.items() if k[7:] in model_dict}
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)
    model = model.to(device)
    return model


def comp_class_vec(ouput_vec, index=None):
    """
    计算类向量
    :param ouput_vec: tensor
    :param index: int,指定类别
    :return: tensor
    """
    if not index:
        index = np.argmax(ouput_vec.cpu().data.numpy())
    else:
        index = np.array(index)
    index = index[np.newaxis, np.newaxis]
    index = torch.from_numpy(index).cuda()
    one_hot = torch.zeros(1, 20,device="cuda").scatter_(1, index, 1)
    one_hot.requires_grad = True
    class_vec = torch.sum(one_hot * output)  # one_hot = 11.8605
    return class_vec


def gen_cam(feature_map, grads,img_size):
    """
    依据梯度和特征图,生成cam
    :param feature_map: np.array, in [C, H, W]
    :param grads: np.array, in [C, H, W]
    :return: np.array, [H, W]
    """
    cam = np.zeros(feature_map.shape[1:], dtype=np.float32)  # cam shape (H, W)

    weights = np.mean(grads, axis=(1, 2))  #

    for i, w in enumerate(weights):
        cam += w * feature_map[i, :, :]
    cam = np.maximum(cam, 0)# X(cam)和Y(0)逐位进行比较,选择最大值.
    cam_min=np.min(cam)
    cam_max=np.max(cam)
    cam=(cam-cam_min)/(cam_max-cam_min)
    cam = cv2.resize(cam, (img_size, img_size))

    return cam

def show_cam_on_image(img, mask,fmap_layer_name):
    heatmap = cv2.applyColorMap(np.uint8(255*mask), cv2.COLORMAP_JET)
    heatmap = np.float32(heatmap) / 255
    cam = 0.3*heatmap + np.float32(img)
    writer.add_image('feature map in {}'.format(fmap_layer_name), np.uint8(cam*255),  dataformats='HWC')

writer=SummaryWriter(log_dir="./logs",flush_secs=60)
model_params=r"./net_params/net.pth"
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
net=MyEfficientNet(20,mode="test")
gragh_inputs=Variable(torch.rand(1,3,456,456))
writer.add_graph(net,(gragh_inputs,))
model=load_model(net,model_params,device)
img = cv2.imread(r"./img.jpg",1)
image=image_handle(r"./img.jpg")
image=image.unsqueeze(0)
image=image.to(device)


fmap_dict=dict()
grad_dict=dict()
for name, sub_module in model.named_modules():
    if isinstance(sub_module, nn.Conv2d):
        key_name = str(sub_module.weight.shape)
        fmap_dict.setdefault(key_name, list())
        grad_dict.setdefault(key_name, list())
        name_char = name.split(".")
        length=len(name_char)
        # 定义获取梯度的函数
        def backward_hook(module, grad_in, grad_out):
            key_name = str(module.weight.shape)
            grad_dict[key_name].append(grad_out[0].detach())
        # 定义获取特征图的函数
        def farward_hook(module, input, output):
            key_name = str(module.weight.shape)
            fmap_dict[key_name].append(output)
        if length == 2:
            model._modules[name_char[0]]._modules[name_char[1]].register_forward_hook(farward_hook)
            model._modules[name_char[0]]._modules[name_char[1]].register_forward_hook(backward_hook)
        if length == 4:
            model._modules[name_char[0]]._modules[name_char[1]]._modules[name_char[2]]._modules[name_char[3]].register_forward_hook(farward_hook)
            model._modules[name_char[0]]._modules[name_char[1]]._modules[name_char[2]]._modules[name_char[3]].register_forward_hook(backward_hook)
        else:
            continue

#若只需要单层的,则只需要使用下面的列表存储回调值即可
# grad_block=list()
# fmap_block=list()

# forward
output = model(image)

# backward
model.zero_grad()
class_loss = comp_class_vec(output)
class_loss.backward()

for (fmap_layer_name, fmap_list),(grad_layer_name,grad_list) in list(zip(fmap_dict.items(),grad_dict.items())):
    try:
        grads_val=grad_list[0].cpu().data.numpy().squeeze()
        fmap=fmap_list[0].cpu().data.numpy().squeeze()
        cam=gen_cam(fmap,grads_val,456)
        img_show=np.float32(cv2.resize(img,(456,456)))/255
        show_cam_on_image(img_show,cam,fmap_layer_name)
    except:
        continue
writer.close()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值