(1)对640*480*3的狗图进行检测和特征可视化
这个模型使用的是6.1版本的yolov5s.pt,狗图我放在百度云盘了,链接为:
链接:https://pan.baidu.com/s/1uPNK40bYCxHqIkd5LfMcuA
提取码:lf0h
import warnings
warnings.filterwarnings('ignore')
warnings.simplefilter('ignore')
import torch
import torch.nn as nn
import cv2
import numpy as np
import requests
import torchvision.transforms as transforms
from pytorch_grad_cam import EigenCAM
from pytorch_grad_cam.utils.image import show_cam_on_image, scale_cam_image
from PIL import Image
COLORS = np.random.uniform(0, 255, size=(80, 3))
def parse_detections(results):
detections = results.pandas().xyxy[0]
detections = detections.to_dict()
boxes, colors, names = [], [], []
for i in range(len(detections["xmin"])):
confidence = detections["confidence"][i]
if confidence < 0.2:
continue
xmin = int(detections["xmin"][i])
ymin = int(detections["ymin"][i])
xmax = int(detections["xmax"][i])
ymax = int(detections["ymax"][i])
name = detections["name"][i]
category = int(detections["class"][i])
color = COLORS[category]
boxes.append((xmin, ymin, xmax, ymax))
colors.append(color)
names.append(name)
return boxes, colors, names
def draw_detections(boxes, colors, names, img):
for box, color, name in zip(boxes, colors, names):
xmin, ymin, xmax, ymax = box
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 2)
cv2.putText(img,
name, (xmin, ymin - 5),
cv2.FONT_HERSHEY_SIMPLEX,
0.8,
color,
2,
lineType=cv2.LINE_AA)
return img
def ResziePadding(img, fixed_side=128):
h, w = img.shape[0], img.shape[1]
scale = max(w, h) / float(fixed_side) # 获取缩放比例
new_w, new_h = int(w / scale), int(h / scale)
resize_img = cv2.resize(img, (new_w, new_h)) # 按比例缩放
# 计算需要填充的像素长度
if new_w % 2 != 0 and new_h % 2 == 0:
top, bottom, left, right = (fixed_side - new_h) // 2, (
fixed_side - new_h) // 2, (fixed_side - new_w) // 2 + 1, (
fixed_side - new_w) // 2
elif new_w % 2 == 0 and new_h % 2 != 0:
top, bottom, left, right = (fixed_side - new_h) // 2 + 1, (
fixed_side - new_h) // 2, (fixed_side - new_w) // 2, (fixed_side -
new_w) // 2
elif new_w % 2 == 0 and new_h % 2 == 0:
top, bottom, left, right = (fixed_side - new_h) // 2, (
fixed_side - new_h) // 2, (fixed_side - new_w) // 2, (fixed_side -
new_w) // 2
else:
top, bottom, left, right = (fixed_side - new_h) // 2 + 1, (
fixed_side - new_h) // 2, (fixed_side - new_w) // 2 + 1, (
fixed_side - new_w) // 2
# 填充图像
pad_img = cv2.copyMakeBorder(resize_img,
top,
bottom,
left,
right,
cv2.BORDER_CONSTANT,
value=[0, 0, 0])
return pad_img
def letterbox(im,
new_shape=(640, 640),
color=(114, 114, 114),
auto=True,
scaleFill=False,
scaleup=True,
stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = im.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better val mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[
1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[
0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(im,
top,
bottom,
left,
right,
cv2.BORDER_CONSTANT,
value=color) # add border
return im, ratio, (dw, dh)
class YoloPrediction(torch.nn.Module):
def __init__(self, model):
super(YoloPrediction, self).__init__()
self.model = model
def forward(self, x):
return self.model(x)[0]
if __name__ == "__main__":
#image_path = "./runs/detect/1/11.png"
image_path = "Puppies.jpg"
img = np.array(Image.open(image_path))
#img = cv2.resize(img, (640, 640))
#img = letterbox(img, new_shape=(2016, 1216), auto=True, scaleFill=False)[0]
img = letterbox(img, new_shape=(640, 640), auto=True, scaleFill=False)[0]
#img = ResziePadding(img, fixed_side=1216)
rgb_img = img.copy()
img = np.float32(img) / 255
transform = transforms.ToTensor()
tensor = transform(img).unsqueeze(0)
#model = torch.hub.load('./', 'custom', path='custom.pt', source='local')
model = torch.hub.load('./', 'yolov5s', source='local')
model.eval()
model.cpu()
# rgb_img[:, :, ::-1]
#rgb_img = rgb_img.transpose(2, 0, 1) #hwc->chw
#results = model([rgb_img], size=(2016))
results &