【目标检测】yolov5进行视频推理

1. 准备

https://blog.csdn.net/qq_44747572?spm=1000.2115.3001.5343

2. 相关文件

在这里插入图片描述

3. 视频推理

3.1 infer

# coding:gbk
# coding:utf-8
import cv2.cv2 as cv2
import numpy as np
import onnxruntime
import torch
import torchvision
import time
import random
from general import non_max_suppression


class YOLOV5_ONNX(object):
	def __init__(self,onnx_path):
		'''初始化onnx'''
		self.onnx_session=onnxruntime.InferenceSession(onnx_path)
		print(onnxruntime.get_device())
		self.input_name=self.get_input_name()
		self.output_name=self.get_output_name()
		self.classes=['silica gel is abnormal', 'silica gel is normal', 'open door', 'close door', 'breakage', 'dirt', 'rust', 'foreign object', 'oil leakage', 'animal', 'hat', 'person']

	def get_input_name(self):
		input_name=[]
		for node in self.onnx_session.get_inputs():
			input_name.append(node.name)

		return input_name

	def get_output_name(self):
		output_name=[]
		for node in self.onnx_session.get_outputs():
			output_name.append(node.name)

		return output_name

	def get_input_feed(self,image_tensor):
		input_feed={}
		for name in self.input_name:
			input_feed[name]=image_tensor

		return input_feed

	def letterbox(self,img, new_shape=(640, 640), color=(114, 114, 114), auto=False, scaleFill=False, scaleup=True, stride=32):
		shape = img.shape[:2]
		if isinstance(new_shape, int):
			new_shape = (new_shape, new_shape)

		r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
		if not scaleup:
			r = min(r, 1.0)

		ratio = r, r

		new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
		dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]

		if auto:  # minimum rectangle
			dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding
		elif scaleFill:  # stretch
			dw, dh = 0.0, 0.0
			new_unpad = (new_shape[1], new_shape[0])
			ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]

		dw /= 2
		dh /= 2

		if shape[::-1] != new_unpad:  # resize
			img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)

		top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
		left, right = int(round(dw - 0.1)), int(round(dw + 0.1))

		img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
		return img, ratio, (dw, dh)

	def xywh2xyxy(self,x):
		y = np.copy(x)

		y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
		y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
		y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
		y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y

		return y

	def nms(self,prediction, conf_thres=0.1, iou_thres=0.6, agnostic=False):
		if prediction.dtype is torch.float16:
			prediction = prediction.float()  # to FP32
		xc = prediction[..., 4] > conf_thres  # candidates
		min_wh, max_wh = 2, 4096  # (pixels) minimum and maximum box width and height
		max_det = 300  # maximum number of detections per image
		output = [None] * prediction.shape[0]
		for xi, x in enumerate(prediction):  # image index, image inference
			x = x[xc[xi]]  # confidence
			if not x.shape[0]:
				continue

			x[:, 5:] *= x[:, 4:5]  # conf = obj_conf * cls_conf
			box = self.xywh2xyxy(x[:, :4])

			conf, j = x[:, 5:].max(1, keepdim=True)
			x = torch.cat((torch.tensor(box), conf, j.float()), 1)[conf.view(-1) > conf_thres]
			n = x.shape[0]  # number of boxes
			if not n:
				continue
			c = x[:, 5:6] * (0 if agnostic else max_wh)  # classes
			boxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scores
			i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
			if i.shape[0] > max_det:  # limit detections
				i = i[:max_det]
			output[xi] = x[i]

		return output

	def clip_coords(self,boxes, img_shape):
		boxes[:, 0].clamp_(0, img_shape[1])
		boxes[:, 1].clamp_(0, img_shape[0])
		boxes[:, 2].clamp_(0, img_shape[1])
		boxes[:, 3].clamp_(0, img_shape[0])

	def scale_coords(self,img1_shape, coords, img0_shape, ratio_pad=None):
		if ratio_pad is None:
			gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])
			pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (
						img1_shape[0] - img0_shape[0] * gain) / 2
		else:
			gain = ratio_pad[0][0]
			pad = ratio_pad[1]

		coords[:, [0, 2]] -= pad[0]
		coords[:, [1, 3]] -= pad[1]
		coords[:, :4] /= gain
		self.clip_coords(coords, img0_shape)
		return coords

	def sigmoid(self,x):
		return 1 / (1 + np.exp(-x))

	def infer(self, src_img, output):
		img_size=(640,640)
		# src_img=cv2.imread(img_path)
		src_size=src_img.shape[:2]
		img=self.letterbox(src_img,img_size,stride=32)[0]
		img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
		img = np.ascontiguousarray(img)
		img=img.astype(dtype=np.float32)
		img/=255.0
		img=np.expand_dims(img,axis=0)
		input_feed=self.get_input_feed(img)
		pred = torch.tensor(self.onnx_session.run(None, input_feed)[0])
		results = non_max_suppression(pred, 0.5,0.5)
		img_shape=img.shape[2:]
		for det in results:  # detections per image
			if det is not None and len(det):
				det[:, :4] = self.scale_coords(img_shape, det[:, :4],src_size).round()
		
		if det is not None and len(det):
			self.draw(src_img, det, output)
		return det.numpy().tolist(), src_img

	def plot_one_box(self,x, img, color=None, label=None, line_thickness=None):
		# Plots one bounding box on image img
		tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1  # line/font thickness
		color = color or [random.randint(0, 255) for _ in range(3)]
		c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
		cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
		if label:
			tf = max(tl - 1, 1)  # font thickness
			t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
			c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
			cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filled
			cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)

	def draw(self,img, boxinfo, output):
		# colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(self.classes))]
		for *xyxy, conf, cls in boxinfo:
			label = '%s %.2f' % ('person', conf)
			if int(cls) == 0:
				self.plot_one_box(xyxy, img, label=label, color=[0, 0, 255], line_thickness=1)
		# cv2.imwrite(output, img)
		return 0


if __name__=="__main__":
	# 把这个初始化
	import os
	model = YOLOV5_ONNX(onnx_path="yolov5l.onnx")

	videopath = r'D:\AI\PyMyWork\AI\TargetDetect\Yolov5\yolo-video\test.mp4'
	path_filename = './yolov5-video'
	videodata = cv2.VideoCapture(videopath)
	# fourcc = cv2.VideoWriter_fourcc(*'XVID')
	fourcc = cv2.VideoWriter_fourcc(*'mp4v')
	fps = videodata.get(cv2.CAP_PROP_FPS)
	success, frame = videodata.read()
	if (success):
		height = frame.shape[0]
		width = frame.shape[1]
	size = (int(width), int(height))
	out = cv2.VideoWriter(path_filename + '/' + 'yolov5l-reuslt' + '.mp4', fourcc, fps, size)
	sum = 0
	timef= 1  #隔timef帧获取一帧
	for i in range(int(videodata.get(cv2.CAP_PROP_FRAME_COUNT))):
		sum += 1
		success, frame = videodata.read()
		if success == True and (sum % timef == 0):
			det, img= model.infer(frame, output="res1.jpg")
			out.write(img)

3.2 infer_video

# coding:gbk
# coding:utf-8
import cv2.cv2 as cv2
import numpy as np
import onnxruntime
import torch
import torchvision
import time
import random
from utils.general import non_max_suppression


class YOLOV5_ONNX(object):
	def __init__(self,onnx_path):
		'''初始化onnx'''
		self.onnx_session=onnxruntime.InferenceSession(onnx_path)
		print(onnxruntime.get_device())
		self.input_name=self.get_input_name()
		self.output_name=self.get_output_name()
		self.classes=['person', 'car', 'special_person', 'truck']
	def get_input_name(self):
		'''获取输入节点名称'''
		input_name=[]
		for node in self.onnx_session.get_inputs():
			input_name.append(node.name)

		return input_name


	def get_output_name(self):
		'''获取输出节点名称'''
		output_name=[]
		for node in self.onnx_session.get_outputs():
			output_name.append(node.name)

		return output_name

	def get_input_feed(self,image_tensor):
		'''获取输入tensor'''
		input_feed={}
		for name in self.input_name:
			input_feed[name]=image_tensor

		return input_feed

	def letterbox(self,img, new_shape=(640, 640), color=(114, 114, 114), auto=False, scaleFill=False, scaleup=True,
				  stride=32):
		'''图片归一化'''
		# Resize and pad image while meeting stride-multiple constraints
		shape = img.shape[:2]  # current shape [height, width]
		if isinstance(new_shape, int):
			new_shape = (new_shape, new_shape)

		# Scale ratio (new / old)
		r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
		if not scaleup:  # only scale down, do not scale up (for better test mAP)
			r = min(r, 1.0)

		# Compute padding
		ratio = r, r  # width, height ratios

		new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
		dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

		if auto:  # minimum rectangle
			dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding
		elif scaleFill:  # stretch
			dw, dh = 0.0, 0.0
			new_unpad = (new_shape[1], new_shape[0])
			ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios

		dw /= 2  # divide padding into 2 sides
		dh /= 2

		if shape[::-1] != new_unpad:  # resize
			img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)

		top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
		left, right = int(round(dw - 0.1)), int(round(dw + 0.1))

		img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
		return img, ratio, (dw, dh)

	def xywh2xyxy(self,x):
		# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
		y = np.copy(x)

		y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
		y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
		y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
		y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y

		return y

	def nms(self,prediction, conf_thres=0.1, iou_thres=0.6, agnostic=False):
		if prediction.dtype is torch.float16:
			prediction = prediction.float()  # to FP32
		xc = prediction[..., 4] > conf_thres  # candidates
		min_wh, max_wh = 2, 4096  # (pixels) minimum and maximum box width and height
		max_det = 300  # maximum number of detections per image
		output = [None] * prediction.shape[0]
		for xi, x in enumerate(prediction):  # image index, image inference
			x = x[xc[xi]]  # confidence
			if not x.shape[0]:
				continue

			x[:, 5:] *= x[:, 4:5]  # conf = obj_conf * cls_conf
			box = self.xywh2xyxy(x[:, :4])

			conf, j = x[:, 5:].max(1, keepdim=True)
			x = torch.cat((torch.tensor(box), conf, j.float()), 1)[conf.view(-1) > conf_thres]
			n = x.shape[0]  # number of boxes
			if not n:
				continue
			c = x[:, 5:6] * (0 if agnostic else max_wh)  # classes
			boxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scores
			i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
			if i.shape[0] > max_det:  # limit detections
				i = i[:max_det]
			output[xi] = x[i]

		return output

	def clip_coords(self,boxes, img_shape):
		'''查看是否越界'''
		# Clip bounding xyxy bounding boxes to image shape (height, width)
		boxes[:, 0].clamp_(0, img_shape[1])  # x1
		boxes[:, 1].clamp_(0, img_shape[0])  # y1
		boxes[:, 2].clamp_(0, img_shape[1])  # x2
		boxes[:, 3].clamp_(0, img_shape[0])  # y2

	def scale_coords(self,img1_shape, coords, img0_shape, ratio_pad=None):
		'''
		坐标对应到原始图像上,反操作:减去pad,除以最小缩放比例
		:param img1_shape: 输入尺寸
		:param coords: 输入坐标
		:param img0_shape: 映射的尺寸
		:param ratio_pad:
		:return:
		'''
		# Rescale coords (xyxy) from img1_shape to img0_shape
		if ratio_pad is None:  # calculate from img0_shape
			gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new,计算缩放比率
			pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (
						img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding ,计算扩充的尺寸
		else:
			gain = ratio_pad[0][0]
			pad = ratio_pad[1]

		coords[:, [0, 2]] -= pad[0]  # x padding,减去x方向上的扩充
		coords[:, [1, 3]] -= pad[1]  # y padding,减去y方向上的扩充
		coords[:, :4] /= gain  # 将box坐标对应到原始图像上
		self.clip_coords(coords, img0_shape)  # 边界检查
		return coords

	def sigmoid(self,x):
		return 1 / (1 + np.exp(-x))

	def infer(self,img_path):
		'''执行前向操作预测输出'''
		# 超参数设置
		img_size=(640,640) #图片缩放大小
		
		vc = cv2.VideoCapture('mmexport1650357560408.mp4')
		fourcc = cv2.VideoWriter_fourcc(*'mp4v')
		fps = vc.get(cv2.CAP_PROP_FPS)
		size = (int(vc.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT)))
		videoWriter = cv2.VideoWriter('test.avi', fourcc, fps, size)
		while True:
			rval, src_img = vc.read()
			if not rval:
				break
			# 读取图片
			# src_img=cv2.imread(img_path)
			start=time.time()
			src_size=src_img.shape[:2]

			# 图片填充并归一化
			img=self.letterbox(src_img,img_size,stride=32)[0]

			# Convert
			img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
			img = np.ascontiguousarray(img)

			# 归一化
			img=img.astype(dtype=np.float32)
			img/=255.0

			# 维度扩张
			img=np.expand_dims(img,axis=0)
			print('img resuming: ',time.time()-start)
			# 前向推理
			# start=time.time()
			input_feed=self.get_input_feed(img)
			# ort_inputs = {self.onnx_session.get_inputs()[0].name: input_feed[None].numpy()}
			pred = torch.tensor(self.onnx_session.run(None, input_feed)[0])
			results = non_max_suppression(pred, 0.5,0.5)
			print('onnx resuming: ',time.time()-start)
			# pred=self.onnx_session.run(output_names=self.output_name,input_feed=input_feed)

			#映射到原始图像
			img_shape = img.shape[2:]
			# print(img_size)
			for det in results:  # detections per image
				if det is not None and len(det):
					det[:, :4] = self.scale_coords(img_shape, det[:, :4],src_size).round()
			print(time.time()-start)
			# print(det)
			for i in range(len(det)):
				if int(det[i][5]) == 0:
					c1, c2 = (int(det[i][0]), int(det[i][1])), (int(det[i][2]), int(det[i][3]))
					cv2.rectangle(src_img, c1, c2, (0, 0, 255), 2)
					cv2.putText(src_img, "person", (c1[0], c1[1] - 2), 0, 0.5, [225, 255, 255], 1)
			# cv2.imwrite("niubi.jpg", src_img)
			videoWriter.write(src_img)
		videoWriter.release()

if __name__=="__main__":
	model=YOLOV5_ONNX(onnx_path="yolov5s.onnx")
	model.infer(img_path="zidane.jpg")

3.3 test-video

#-*- codeing = utf-8 -*-
#@Function: 
#@Time : 2022/4/19 15:04
#@Author : yx
#@File : test-video.py
#@Software : PyCharm


import cv2
import logging

# log information settings
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(levelname)s: %(message)s')


def save_image(num, image):
    """Save the images.

    Args:
        num: serial number
        image: image resource

    Returns:
        None
    """
    image_path = './raw_pictures/{}.jpg'.format(str(num))
    cv2.imwrite(image_path, image)


file_path = './test.mp4'

vc = cv2.VideoCapture(file_path)  # import video files

# determine whether to open normally
if vc.isOpened():
    ret, frame = vc.read()
else:
    ret = False

count = 0  # count the number of pictures
frame_interval = 5  # video frame count interval frequency
frame_interval_count = 0

# loop read video frame
while ret:
    ret, frame = vc.read()
    # store operation every time f frame
    if frame_interval_count % frame_interval == 0:
        save_image(count, frame)
        logging.info("num:" + str(count) + ", frame: " +
                     str(frame_interval_count))
        count += 1
    frame_interval_count += 1
    cv2.waitKey(1)

vc.release()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

只搬烫手的砖

你的鼓励将是我最大的动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值