(一) PyTorch实现 VGG19 特征可视化及常用VGG进行的perceptual loss

5 篇文章 0 订阅
2 篇文章 0 订阅
该博客介绍了如何利用PyTorch加载VGG19模型,并提取全连接层之前的特征用于可视化和损失计算。首先,定义了一个只包含卷积层的VGG19模型,然后加载预训练权重。LossNetwork类用于计算不同层的MSE损失,重点在于relu1_2至relu5_2层的输出。同时,提供了可视化功能,展示特定层的特征映射。
摘要由CSDN通过智能技术生成

1.VGG19本是用来进行分类的,进行可视化和用作VGG loss 自然也就是用到全连接层之前的内容,先要了解VGG19全连接层之前的结构

from torchvision.models import vgg19,vgg16
import torch
import torch.nn.functional as F
import cv2
import numpy as np
from torchvision import models
from torchsummary import summary
import os

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

#对于torch 的models就已经包含了vgg19
#在这里pretrained=False,在下面我自己进行了模型预加载,选择Ture,则电脑联网时
#自动会完成下载
vgg_model = vgg19(pretrained=False).features[:].to(device) 
print(vgg_model)
“”“
Sequential(
  (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (1): ReLU(inplace)
  (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (3): ReLU(inplace)
  (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (6): ReLU(inplace)
  (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (8): ReLU(inplace)
  (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (11): ReLU(inplace)
  (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (13): ReLU(inplace)
  (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (15): ReLU(inplace)
  (16): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (17): ReLU(inplace)
  (18): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  (19): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (20): ReLU(inplace)
  (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (22): ReLU(inplace)
  (23): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (24): ReLU(inplace)
  (25): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (26): ReLU(inplace)
  (27): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (29): ReLU(inplace)
  (30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (31): ReLU(inplace)
  (32): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (33): ReLU(inplace)
  (34): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (35): ReLU(inplace)
  (36): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
”“”

2.加载vgg model 

#strict=False 这里只是选择False,这里我们只需要全连接层之前的,前面打印时也是只是打印了全连接层之
#前的结构层
vgg_model.load_state_dict(torch.load('./vgg19-dcbb9e9d.pth'),strict=False)
vgg_model.eval()

for param in vgg_model.parameters():
    param.requires_grad = False  #使得之后计算梯度时不进行反向传播及权重更新

3.获取VGG特征及实现VGG loss -----"relu1_2,relu2_2,relu3_2,relu4_2,relu5_2层之后输出的结果"

class LossNetwork(torch.nn.Module):
    def __init__(self, vgg_model):
        super(LossNetwork, self).__init__()
        self.vgg_layers = vgg_model
        self.layer_name_mapping = {
            '3': "relu1_2",
            '8': "relu2_2",
            '13': "relu3_2",
            '22': "relu4_2",
            '31': "relu5_2"
        }
        #self.weight = [1/2.6,1/4.8,1/3.7,1/5.6,10/1.5]
        self.weight =[1.0,1.0,1.0,1.0,1.0]
    def output_features(self, x):
        output = {}
        for name, module in self.vgg_layers._modules.items():
            #print("vgg_layers name:",name,module)
            x = module(x)
            if name in self.layer_name_mapping:
                output[self.layer_name_mapping[name]] = x
        print(output.keys())
        return list(output.values())

    def forward(self, output, gt):
        loss = []
        output_features = self.output_features(output)
        gt_features = self.output_features(gt)
        for iter,(dehaze_feature, gt_feature,loss_weight) in enumerate(zip(output_features, gt_features,self.weight)):
            loss.append(F.mse_loss(dehaze_feature, gt_feature)*loss_weight)
        return sum(loss),output_features  #/len(loss)

可视化代码:

#输入的应该时feature_maps.shape = (H,W,Channels)
#下图对relu1_2 进行了可视化,有64channels,拼了个了8*8的图
def visualize_feature_map(feature_maps):
    #创建特征子图,创建叠加后的特征图
    #param feature_batch: 一个卷积层所有特征图
    # np.squeeze(feature_maps, axis=0)
    print("visualize_feature_map shape:{},dtype:{}".format(feature_maps.shape,feature_maps.dtype))
    num_maps = feature_maps.shape[2]
    feature_map_combination = []
    plt.figure(figsize=(8, 7))
    # 取出 featurn map 的数量,因为特征图数量很多,这里直接手动指定了。
    #num_pic = feature_map.shape[2]
    row, col = get_row_col(num_maps)
    # 将 每一层卷积的特征图,拼接层 5 × 5
    for i in range(0, num_maps):
        feature_map_split = feature_maps[:, :, i]
        feature_map_combination.append(feature_map_split)
        plt.subplot(row, col, i+1)
        plt.imshow(feature_map_split)
        plt.axis('off')

    plt.savefig('./rain_pair/relu1_2_feature_map.png') # 保存图像到本地
    plt.show()

评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值