2021-11-14

expand_dims:增加维度,axis=0就是x轴厚度为1,将数组立起来。

img_to_array就是将图片转化成数组。img_to_array 转换前后类型都是一样的,唯一区别是转换前元素类型是整型,转换后元素类型是浮点型。

preprocess_input:归一化,提高运行结果。

K.gradients(y,x)求y对x的导数。y=(y1,y2),x=(x1,x2,x3)。

y1/x1+y2/x1,y1/x2+y2/x2,y1/x3+y2/x3

function函数可以接收传入数据,并返回一个numpy数组。使用这个函数我们可以方便地看到中间结果,尤其对于变长输入的Input。

grad-cam输出热图

from tensorflow.keras.applications.vgg16 import (
    VGG16, preprocess_input, decode_predictions)
from tensorflow.keras.preprocessing import image
from tensorflow.python.framework import ops
import tensorflow.keras.backend as K
import tensorflow.compat.v1 as tf
import warnings
tf.disable_v2_behavior()
import numpy as np
import tensorflow.keras as Kl
import cv2
import heapq


def load_image(path):
    img_path = path
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x


def register_gradient():
    if "GuidedBackProp" not in ops._gradient_registry._registry:
        @ops.RegisterGradient("GuidedBackProp")
        def _GuidedBackProp(op, grad):
            dtype = op.inputs[0].dtype
            return grad * tf.cast(grad > 0., dtype) * tf.cast(op.inputs[0] > 0., dtype)


def compile_saliency_function(model, activation_layer='block5_conv3'):
    input_img = model.input
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
    layer_output = layer_dict[activation_layer].output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_img)[0]
    return K.function([input_img, K.learning_phase()], [saliency])


def modify_backprop(model, name):
    warnings.filterwarnings('ignore')
    g = tf.get_default_graph()
    with g.gradient_override_map({'Relu': name}):

        # get layers that have an activation
        layer_dict = [layer for layer in model.layers[1:]
                      if hasattr(layer, 'activation')]

        # replace relu activation
        for layer in layer_dict:
            if layer.activation == Kl.activations.relu:
                layer.activation = tf.nn.relu

        # re-instanciate a new model
        new_model = VGG16(weights='imagenet')
    return new_model


def deprocess_image(x):
    if np.ndim(x) > 3:
        x = np.squeeze(x)
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + 1e-5)
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_data_format() == 'channels_first':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x

'''
def _compute_gradients(tensor, var_list):
    with tf.GradientTape() as gtape:
        grads = gtape.gradient(tensor, var_list)
        return [grad if grad is not None else tf.zeros_like(var) for var, grad in zip(var_list, grads)]
'''
image_path = r'./samples/vegetables.jpg'
preprocessed_input = load_image(image_path)
model = VGG16()

register_gradient()
'''
guided_model = modify_backprop(model, 'GuidedBackProp')
saliency_fn = compile_saliency_function(guided_model)
saliency = saliency_fn([preprocessed_input, 0])
gradcam = saliency[0].transpose(1, 2, 3, 0)
a = np.squeeze(gradcam)
cv2.imshow(r'Guided_BP', deprocess_image(a))
cv2.waitKey(0)
cv2.imwrite(r'./samples/Guided_BP.png', deprocess_image(a))
'''
guided_model = modify_backprop(model, 'GuidedBackProp')
saliency_fn = compile_saliency_function(guided_model)
saliency = saliency_fn([preprocessed_input, 0])
pred = model.predict(preprocessed_input)
top1_idx, top2_idx, top3_idx= heapq.nlargest(3, range(len(pred[0])), pred[0].take)
top_1 = decode_predictions(pred)[0][0]
top_2 = decode_predictions(pred)[0][1]
top_3 = decode_predictions(pred)[0][2]
print('Predicted class:')
print('%s (%s , %d) with probability %.2f' % (top_1[1], top_1[0], top1_idx, top_1[2]))
print('%s (%s , %d) with probability %.2f' % (top_2[1], top_2[0], top2_idx, top_2[2]))
print('%s (%s , %d) with probability %.2f' % (top_3[1], top_3[0], top3_idx, top_3[2]))
class_output = model.output[:, top1_idx]

last_conv_layer = model.get_layer("block5_pool")
grads = K.gradients(class_output, last_conv_layer.output)[0]
pooled_grads = K.mean(grads, axis=(0, 1, 2))
iterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])
pooled_grads_value, conv_layer_output_value = iterate([preprocessed_input])

for i in range(512):
    conv_layer_output_value[:, :, i] *= pooled_grads_value[i]

heatmap = np.mean(conv_layer_output_value, axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)

img = cv2.imread(image_path)
img = cv2.resize(img, dsize=(224, 224), interpolation=cv2.INTER_NEAREST)
# img = img_to_array(image)
heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
heatmap = np.uint8(255 * heatmap)
#cv2.imwrite(r'./samples/Heatmap1.png', heatmap)
#cv2.imshow('heatmap1', heatmap)
#cv2.waitKey(0)

heatmap2color = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
grd_CAM = cv2.addWeighted(img, 0.6, heatmap2color, 0.4, 0)
cv2.imwrite(r'./samples/Grd-CAM1.png', grd_CAM)
cv2.imshow('Grd-CAM1', grd_CAM)
cv2.waitKey(0)

heatmap =cv2.imread(r'./samples/Heatmap.png')
guided_CAM = saliency[0].transpose(1, 2, 3, 0) * heatmap[..., np.newaxis]
guided_CAM = deprocess_image(guided_CAM)
cv2.imwrite(r'./samples/Guided-CAM1.png', guided_CAM)
cv2.imshow('Guided-CAM1', guided_CAM)
cv2.waitKey(0)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值