yolov5之可视化特征图和检测结果

本文介绍了如何使用Yolov5进行物体检测,并对640*480*3的狗图进行检测和特征可视化。探讨了torch.hub.load方法与detect.py的推理区别,揭示了模型在内部进行的预处理步骤,包括图像格式转换、尺寸调整和归一化。此外,还分享了使用U版v5官方工具进行卷积图可视化的结果。
摘要由CSDN通过智能技术生成

(1)对640*480*3的狗图进行检测和特征可视化

这个模型使用的是6.1版本的yolov5s.pt,狗图我放在百度云盘了,链接为:

链接:https://pan.baidu.com/s/1uPNK40bYCxHqIkd5LfMcuA 
提取码:lf0h 

import warnings

warnings.filterwarnings('ignore')
warnings.simplefilter('ignore')
import torch
import torch.nn as nn
import cv2
import numpy as np
import requests
import torchvision.transforms as transforms
from pytorch_grad_cam import EigenCAM
from pytorch_grad_cam.utils.image import show_cam_on_image, scale_cam_image
from PIL import Image

COLORS = np.random.uniform(0, 255, size=(80, 3))


def parse_detections(results):
    detections = results.pandas().xyxy[0]
    detections = detections.to_dict()
    boxes, colors, names = [], [], []

    for i in range(len(detections["xmin"])):
        confidence = detections["confidence"][i]
        if confidence < 0.2:
            continue
        xmin = int(detections["xmin"][i])
        ymin = int(detections["ymin"][i])
        xmax = int(detections["xmax"][i])
        ymax = int(detections["ymax"][i])
        name = detections["name"][i]
        category = int(detections["class"][i])
        color = COLORS[category]

        boxes.append((xmin, ymin, xmax, ymax))
        colors.append(color)
        names.append(name)
    return boxes, colors, names


def draw_detections(boxes, colors, names, img):
    for box, color, name in zip(boxes, colors, names):
        xmin, ymin, xmax, ymax = box
        cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 2)

        cv2.putText(img,
                    name, (xmin, ymin - 5),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.8,
                    color,
                    2,
                    lineType=cv2.LINE_AA)
    return img


def ResziePadding(img, fixed_side=128):

    h, w = img.shape[0], img.shape[1]
    scale = max(w, h) / float(fixed_side)  # 获取缩放比例
    new_w, new_h = int(w / scale), int(h / scale)
    resize_img = cv2.resize(img, (new_w, new_h))  # 按比例缩放

    # 计算需要填充的像素长度
    if new_w % 2 != 0 and new_h % 2 == 0:
        top, bottom, left, right = (fixed_side - new_h) // 2, (
            fixed_side - new_h) // 2, (fixed_side - new_w) // 2 + 1, (
                fixed_side - new_w) // 2
    elif new_w % 2 == 0 and new_h % 2 != 0:
        top, bottom, left, right = (fixed_side - new_h) // 2 + 1, (
            fixed_side - new_h) // 2, (fixed_side - new_w) // 2, (fixed_side -
                                                                  new_w) // 2
    elif new_w % 2 == 0 and new_h % 2 == 0:
        top, bottom, left, right = (fixed_side - new_h) // 2, (
            fixed_side - new_h) // 2, (fixed_side - new_w) // 2, (fixed_side -
                                                                  new_w) // 2
    else:
        top, bottom, left, right = (fixed_side - new_h) // 2 + 1, (
            fixed_side - new_h) // 2, (fixed_side - new_w) // 2 + 1, (
                fixed_side - new_w) // 2

    # 填充图像
    pad_img = cv2.copyMakeBorder(resize_img,
                                 top,
                                 bottom,
                                 left,
                                 right,
                                 cv2.BORDER_CONSTANT,
                                 value=[0, 0, 0])

    return pad_img


def letterbox(im,
              new_shape=(640, 640),
              color=(114, 114, 114),
              auto=True,
              scaleFill=False,
              scaleup=True,
              stride=32):
    # Resize and pad image while meeting stride-multiple constraints
    shape = im.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    if not scaleup:  # only scale down, do not scale up (for better val mAP)
        r = min(r, 1.0)

    # Compute padding
    ratio = r, r  # width, height ratios
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[
        1]  # wh padding
    if auto:  # minimum rectangle
        dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding
    elif scaleFill:  # stretch
        dw, dh = 0.0, 0.0
        new_unpad = (new_shape[1], new_shape[0])
        ratio = new_shape[1] / shape[1], new_shape[0] / shape[
            0]  # width, height ratios

    dw /= 2  # divide padding into 2 sides
    dh /= 2

    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    im = cv2.copyMakeBorder(im,
                            top,
                            bottom,
                            left,
                            right,
                            cv2.BORDER_CONSTANT,
                            value=color)  # add border
    return im, ratio, (dw, dh)


class YoloPrediction(torch.nn.Module):
    def __init__(self, model):
        super(YoloPrediction, self).__init__()
        self.model = model

    def forward(self, x):
        return self.model(x)[0]


if __name__ == "__main__":

    #image_path = "./runs/detect/1/11.png"
    image_path = "Puppies.jpg"
    img = np.array(Image.open(image_path))
    #img = cv2.resize(img, (640, 640))
    #img = letterbox(img, new_shape=(2016, 1216), auto=True, scaleFill=False)[0]
    img = letterbox(img, new_shape=(640, 640), auto=True, scaleFill=False)[0]
    #img = ResziePadding(img, fixed_side=1216)
    rgb_img = img.copy()

    img = np.float32(img) / 255
    transform = transforms.ToTensor()
    tensor = transform(img).unsqueeze(0)

    #model = torch.hub.load('./', 'custom', path='custom.pt', source='local')
    model = torch.hub.load('./', 'yolov5s', source='local')

    model.eval()
    model.cpu()

    # rgb_img[:, :, ::-1]

    #rgb_img = rgb_img.transpose(2, 0, 1)  #hwc->chw

    #results = model([rgb_img], size=(2016))
    results &
评论 10
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

曙光_deeplove

你的鼓励是我努力的最大源泉

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值