opencv目标追踪实战

目标追踪实战记录

追踪算法有八种,这里用的是kcf算法。
CF算法主要做的是(一些原理):
(1)在要追踪的目标上画一个窗口,第一帧的位置。
(2)padding填充边缘,得到正样本和多个副样本
y的01分别代表的是负例和正例,计算响应,响应与样本距离值相关,越远,响应值越小,找到响应最大的值(位置)。
在这里插入图片描述
增加样本量的方法:原本abcd(1234)变换为badc(2143)。
简单了解即可,可了解相关论文。

# opencv已经实现了的追踪算法
OPENCV_OBJECT_TRACKERS = {
	"csrt": cv2.TrackerCSRT_create,
	#用得最多
	"kcf": cv2.TrackerKCF_create,
	"boosting": cv2.TrackerBoosting_create,
	"mil": cv2.TrackerMIL_create,
	"tld": cv2.TrackerTLD_create,
	"medianflow": cv2.TrackerMedianFlow_create,
	#滤波2010年提出
	"mosse": cv2.TrackerMOSSE_create
}

调用算法

# 实例化OpenCV's multi-object tracker
#调用算法,多目标追踪
trackers = cv2.MultiTracker_create()
vs = cv2.VideoCapture(args["video"])

视频流,获取当前帧(每一帧),追踪后显示结果。

import argparse
import time
import cv2
import numpy as np

# 配置参数
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type=str,
	help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf",
	help="OpenCV object tracker type")
args = vars(ap.parse_args())

# opencv已经实现了的追踪算法
OPENCV_OBJECT_TRACKERS = {
	"csrt": cv2.TrackerCSRT_create,
	"kcf": cv2.TrackerKCF_create,
	"boosting": cv2.TrackerBoosting_create,
	"mil": cv2.TrackerMIL_create,
	"tld": cv2.TrackerTLD_create,
	"medianflow": cv2.TrackerMedianFlow_create,
	"mosse": cv2.TrackerMOSSE_create
}

# 实例化OpenCV's multi-object tracker
#调用算法,多目标追踪
trackers = cv2.MultiTracker_create()
vs = cv2.VideoCapture(args["video"])

# 视频流
while True:
	# 取当前帧
	frame = vs.read()
	# (true, data)
	frame = frame[1]
	# 到头了就结束
	if frame is None:
		break

	# resize每一帧(缩小),原始视频有点大,
	(h, w) = frame.shape[:2]
	width=600
	#保留r参数利于发那个锁
	r = width / float(w)
	dim = (width, int(h * r))
	frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)

	# 追踪结果,第一帧未添加
	(success, boxes) = trackers.update(frame)
	# 绘制区域
	for box in boxes:
		(x, y, w, h) = [int(v) for v in box]
		cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
	# 显示
	cv2.imshow("Frame", frame)
	key = cv2.waitKey(100) & 0xFF

	#区域绘制(第一帧没有追踪器时就直接来到这里)
	if key == ord("s"):
		# cv2提供的函数,选择一个区域,按s暂停后框选
		box = cv2.selectROI("Frame", frame, fromCenter=False,
			showCrosshair=True)

		# 创建一个新的追踪器
		tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
		trackers.add(tracker, frame, box)

	# 退出(空格)
	elif key == 27:
		break
vs.release()
cv2.destroyAllWindows()

算法遇到遮蔽现象时追踪效果就不太好了,甚至追踪失败。

目标追踪实战升级,省去手动框选版

!相较于之前的,没有手动框选的步骤后,代替的步骤是:首先检测到目标,之后再对检测到的目标追踪。

建议深度学习顺序Faster-RCNN——SSD——YOLO V3——Mask-RCNN(更通用)

此实验选择SSD模型。
在第一帧进行检测,使用到的检测框架------SSD。

dlib用于目标检测
dlib学习网站
链接: link
dlib学习网站
完整的代码片段

#导入工具包
from utils import FPS
import numpy as np
import argparse
#专门用于做机器学习的一些实现方法
import dlib
import cv2
"""
--prototxt mobilenet_ssd/MobileNetSSD_deploy.prototxt 
--model mobilenet_ssd/MobileNetSSD_deploy.caffemodel 
--video race.mp4
"""
# 参数
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
	help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
	help="path to Caffe pre-trained model")
ap.add_argument("-v", "--video", required=True,
	help="path to input video file")
ap.add_argument("-o", "--output", type=str,
	help="path to optional output video file")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
	help="minimum probability to filter weak detections")
args = vars(ap.parse_args())

# SSD标签
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
	"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
	"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
	"sofa", "train", "tvmonitor"]

# 读取网络模型
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

# 初始化
print("[INFO] starting video stream...")
vs = cv2.VideoCapture(args["video"])
writer = None

# 一会要追踪多个目标
trackers = []
labels = []

# 计算FPS
fps = FPS().start()

while True:
	# 读取一帧
	(grabbed, frame) = vs.read()

	# 是否是最后了
	if frame is None:
		break

	# 预处理操作
	(h, w) = frame.shape[:2]
	width=600
	r = width / float(w)
	dim = (width, int(h * r))
	frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
	rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

	# 如果要将结果保存的话
	if args["output"] is not None and writer is None:
		fourcc = cv2.VideoWriter_fourcc(*"MJPG")
		writer = cv2.VideoWriter(args["output"], fourcc, 30,
			(frame.shape[1], frame.shape[0]), True)

	#此处的if...else...是该实战的关键代码
	# 先检测 再追踪
	if len(trackers) == 0:
		# 获取blob数据
		(h, w) = frame.shape[:2]
		# 0.007843是resize后的一个倍数(1/127.5)
		blob = cv2.dnn.blobFromImage(frame, 0.007843, (w, h), 127.5)

		# 得到检测结果
		net.setInput(blob)
		#detections,多个目标检测
		detections = net.forward()

		# 遍历得到的检测结果
		for i in np.arange(0, detections.shape[2]):
			# 能检测到多个结果(此时是第i个),计算概率值,只保留概率高的(循环到最后得出)
			confidence = detections[0, 0, i, 2]

			# 过滤
			if confidence > args["confidence"]:
				# extract the index of the class label from the
				# detections list
				#获取最大值,得到它的id
				idx = int(detections[0, 0, i, 1])
				label = CLASSES[idx]

				# 只保留人的(因为我们的目标是人)
				if CLASSES[idx] != "person":
					continue

				# 得到BBOX(结果框的位置,是相对于整个画面的)
				#print (detections[0, 0, i, 3:7])
				box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
				(startX, startY, endX, endY) = box.astype("int")

				# 使用dlib来进行目标追踪
				#http://dlib.net/python/index.html#dlib.correlation_tracker
				t = dlib.correlation_tracker()
				rect = dlib.rectangle(int(startX), int(startY), int(endX), int(endY))
				#开始追踪
				t.start_track(rgb, rect)

				# 保存结果
				labels.append(label)
				trackers.append(t)

				# 绘图,展示
				cv2.rectangle(frame, (startX, startY), (endX, endY),
					(0, 255, 0), 2)
				cv2.putText(frame, label, (startX, startY - 15),
					cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)

	# 检测完了所有的人。如果已经有了框,就可以直接追踪了
	else:
		# 每一个追踪器都要进行更新
		for (t, l) in zip(trackers, labels):
			t.update(rgb)
			#追踪到的最新位置
			pos = t.get_position()

			# 得到位置
			startX = int(pos.left())
			startY = int(pos.top())
			endX = int(pos.right())
			endY = int(pos.bottom())

			# 画出来
			cv2.rectangle(frame, (startX, startY), (endX, endY),
				(0, 255, 0), 2)
			cv2.putText(frame, l, (startX, startY - 15),
				cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)

	# 也可以把结果保存下来
	if writer is not None:
		writer.write(frame)

	# 显示
	cv2.imshow("Frame", frame)
	key = cv2.waitKey(1) & 0xFF

	# 退出
	if key == 27:
		break

	# 计算FPS
	fps.update()


fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

if writer is not None:
	writer.release()

cv2.destroyAllWindows()
vs.release()

遇到报错:
cv2.error: OpenCV(4.8.0) D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\caffe\caffe_io.cpp:1176: error: (-2:Unspecified error) FAILED: ReadProtoFromBinaryFile…xxxxxxx…in function ‘cv::dnn::ReadNetParamsFromBinaryFileOrDie’
这种情况一般都是路径问题,我没用上arg,直接引用了本地文件路径,在报错代码行用了两个相同的文件路径,替换成正确文件的路径就好了…至于如果是配置文件中的问题网络中还有其他的解决方法。

改进,利用多进程

多进程的工具:
import multiprocessing
每个进程执行相同的操作

def start_tracker(box, label, rgb, inputQueue, outputQueue):
	t = dlib.correlation_tracker()
	rect = dlib.rectangle(int(box[0]), int(box[1]), int(box[2]), int(box[3]))
	t.start_track(rgb, rect)

	while True:
		# 获取下一帧
		rgb = inputQueue.get()

		# 非空就开始处理
		if rgb is not None:
			# 更新追踪器
			t.update(rgb)
			pos = t.get_position()

			startX = int(pos.left())
			startY = int(pos.top())
			endX = int(pos.right())
			endY = int(pos.bottom())
			
			#因为多进程,没办法写return值
			# 把结果放到输出q(队列),就是追踪到的结果,每个检测到的目标(人)都要来这循环几次
			outputQueue.put((label, (startX, startY, endX, endY)))
if __name__ == '__main__':
	
	while True:
		(grabbed, frame) = vs.read()
	
		if frame is None:
			break
	
		(h, w) = frame.shape[:2]
		width=600
		r = width / float(w)
		dim = (width, int(h * r))
		frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
		rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
	
		if args["output"] is not None and writer is None:
			fourcc = cv2.VideoWriter_fourcc(*"MJPG")
			writer = cv2.VideoWriter(args["output"], fourcc, 30,
				(frame.shape[1], frame.shape[0]), True)
	
		#首先检测位置
		if len(inputQueues) == 0:
			(h, w) = frame.shape[:2]
			blob = cv2.dnn.blobFromImage(frame, 0.007843, (w, h), 127.5)
			net.setInput(blob)
			detections = net.forward()
			for i in np.arange(0, detections.shape[2]):
				confidence = detections[0, 0, i, 2]
				if confidence > args["confidence"]:
					idx = int(detections[0, 0, i, 1])
					label = CLASSES[idx]
					if CLASSES[idx] != "person":
						continue
					box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
					(startX, startY, endX, endY) = box.astype("int")
					bb = (startX, startY, endX, endY)
	
					# 创建输入q和输出q,多少个人就创建多少个进程(进入到这里就代表检测到了一个人,把他加入队列)
					iq = multiprocessing.Queue()
					oq = multiprocessing.Queue()
					inputQueues.append(iq)
					outputQueues.append(oq)
					
					# 第一帧操作,第一次检测。多核,target定义了使用的方法,将人传入,每个人都进行目标追踪,结束后可得到多个oq
					p = multiprocessing.Process(
						target=start_tracker,
						args=(bb, label, rgb, iq, oq))
					p.daemon = True
					p.start()
					
					cv2.rectangle(frame, (startX, startY), (endX, endY),
						(0, 255, 0), 2)
					cv2.putText(frame, label, (startX, startY - 15),
						cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
	
		else:
			# 多个追踪器处理的都是相同输入,多个iq,每个进程都把当前的图像传进去
			for iq in inputQueues:
				iq.put(rgb)
	
			for oq in outputQueues:
				# 调用函数,得到更新结果(oq)
				(label, (startX, startY, endX, endY)) = oq.get()
	
				# 绘图
				cv2.rectangle(frame, (startX, startY), (endX, endY),
					(0, 255, 0), 2)
				cv2.putText(frame, label, (startX, startY - 15),
					cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)
	
		if writer is not None:
			writer.write(frame)
	
		cv2.imshow("Frame", frame)
		key = cv2.waitKey(1) & 0xFF
	
		if key == 27:
			break

		fps.update()
	fps.stop()
	print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
	print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
	
	if writer is not None:
		writer.release()

	cv2.destroyAllWindows()
	vs.release()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值