YOLOv7特征层可视化

 以下代码为YOLOv7的每层卷积层的可视化,同样可以应用在其他网络,需自行修改。(参考的GitHub - Jochiwon/yolov7-PyTorch-feature-map-visualization: my own yolov7 feature-map visualization code.

import torch
import models.common
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
import cv2 as cv
from torchvision import transforms
import math
import argparse
import os
import yaml

if __name__ == "__main__":

    ap = argparse.ArgumentParser()
    ap.add_argument('--model', required=True, help='path to model')
    ap.add_argument('--cfg', required=True, help='path to cfg(.yaml) file of model')
    ap.add_argument('--image', required=True, help='path to image')
    ap.add_argument('--name', required=True, help='name of output folder')
    args = vars(ap.parse_args())
    print('\n')
    print(args)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"using device: {device}")
    # Model load
    model = torch.load(args['model'], map_location=device)

    # Entire Model View -> Save to txt file
    f = open('./model-log.txt', 'w')
    print(model, file=f)
    f.close()
    
    conv_layers_count = []
    weights = []

    layers = model['model'].model


    """
    Not Completed Coded. Be Careful when using.
    # Visualize Filter ======================================================

    # Parse conv and Save conv's layer value
    for i in range(len(model['model'].model)):
        if type(model['model'].model[i]) == models.common.Conv:
            weights.append(model['model'].model[i].conv)
            conv_layers_count.append(i)

    for i in range(len(weights)):

        filters = weights[i].weight

        f_min, f_max = filters.min(), filters.max()
        filters = (filters - f_min) / (f_max - f_min)

        #print(filters.shape)
        n_filters = filters.shape[0]
        n_input_filters = filters.shape[1]
        #print('layer'+str(conv_layers_count[i])+' filters\n', n_filters)

        plt.figure(figsize=(4, 4))
        for j in range(n_filters):

            for k in range(n_input_filters):
                ax = plt.subplot(n_filters, n_input_filters, (n_input_filters*j)+k+1)
                plt.imshow(filters[j,k,:,:].cpu().detach(), cmap='gray')
                plt.axis('off')

        ax.set(xlabel = 'Input Channel Shape', ylabel = 'Output Channel Shape')

        fname = 'conv_layer_{}_Filter.jpg'.format(i)
        plt.savefig(os.getcwd() + '/visualize-filter/filters/' + args['name'] + '/' + fname, dpi=200)
    # ==========================================================================
    """

    # Visualize Feature Map =========================================================

    # Load Img and Pre-Processing ====================
    img = cv.imread(args['image'])
    img = cv.cvtColor(img, cv.COLOR_BGR2RGB)

    transform = transforms.Compose([transforms.ToPILImage(),
                                transforms.Resize((224, 224)), # Can change
                                transforms.ToTensor()])

    im = np.array(img)
    im = transform(im)
    im = im.unsqueeze(0)

    # im = torch.as_tensor(im, dtype=torch.half, device=0)
    im = im.to(device).half()
    # print(im.type)
    # =================================================
    # Getting Each Layer's Result =====================
    conv_out = [layers[0](im)]
    conv_layers_index = []

    # Getting Input Layer Infomation =====
    with open(args['cfg']) as f:
        data = yaml.load(f, Loader=yaml.FullLoader)

        for i in range(len(data['backbone'])):
            if type(data['backbone'][i][0]) == int:
                conv_layers_index.append([data['backbone'][i][0]])
            else:
                conv_layers_index.append(data['backbone'][i][0])

        for i in range(len(data['head']) - 1):
            if type(data['head'][i][0]) == int:
                conv_layers_index.append([data['head'][i][0]])
            else:
                conv_layers_index.append(data['head'][i][0])
    # ====================================
    print(conv_layers_index)
    # Apply Img to Model and Save Result =====
    for i in range(1, len(layers)-1):

        index = conv_layers_index[i]

        if len(index) == 1:
            src = conv_out[index[0]]
        else:
            src = [conv_out[j] for j in index]

        conv_out.append(layers[i](src))
        # Due to Error {AttributeError: 'Upsample' object has no attribute 'recompute_scale_factor'},
        # I edited "anaconda3/lib/python3.9/site-packages/torch/nn/modules/upsampling.py", line 157.
        # Just comment out "recompute_scale_factor=self.recompute_scale_factor"
        """
        Like this -> return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners,
                         # recompute_scale_factor=self.recompute_scale_factor
                         )
        """
    # =========================================

    results = conv_out
    # =================================================

    """
    for i in range(len(results)):
        
        print("i: ", i, results[i].shape)
    """
    # Make Output Folder for feature maps
    feature_map_dir = os.path.join(os.getcwd(), 'visualize-filter', 'feature-map', args['name'])
    os.makedirs(feature_map_dir, exist_ok=True)

    # Edit & Save Each Feature Map as an Individual Image
    for i in range(len(results)):
        print("layer (" + str(i) + "/" + str(len(results) - 1) + ") Feature Map Processing... ")

        # Create a subfolder for each conv layer
        layer_folder = os.path.join(feature_map_dir, f'conv_layer_{i}')
        os.makedirs(layer_folder, exist_ok=True)

        conv_layer_vis = results[i][0, :, :, :]
        conv_layer_vis = conv_layer_vis.data

        n_feature_maps = conv_layer_vis.size(0)
        n_feature_maps_to_save = min(n_feature_maps, 32)  # Limit the number of feature maps to save

        for j in range(n_feature_maps_to_save):
            # Save each feature map as an individual image
            plt.figure()
            plt.imshow(conv_layer_vis[j, :, :].cpu().detach(), cmap='gray')
            plt.axis('off')

            fname = f'feature_map_{j}.jpg'
            plt.savefig(os.path.join(layer_folder, fname), dpi=200)
            plt.close()  # Close the figure to free memory

    print('Program is terminated.\n')

需要参数python viz-filter-feature-map.py --model {YOUR MODEL'S WEIGHT(.pt) FILE} --cfg {CFG FILE(.yaml) OF YOUR MODEL}--image {YOUR IMG} --name {OUTPUT FOLDER NAME, CREATED AT ./visualize-filter/feature-map}

运行命令示例:python viz-filter-feature-map.py --model ./Please_Remember_Me/train/yolov7_Argo_origin4/weights/epoch_029.pt --cfg ./cfg/training/yolov7-tiny.yaml --image ./visualize-filter/src/cute-cat.jpg --name tiny-original

  • 8
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值