一个基于行人跟踪的例子
目标跟踪是对摄像头视频移动目标进行定位的过程,可用于监控(surveillance)、基于感知的(perceptual)用户界面、增强现实、基于对象的视频压缩以及辅助驾驶等。
应用程序的工作流程
- 检查第一帧
- 检查后面输入的帧,从场景的开始通过背景分割器来识别场景中的行人
- 为每个行人建立ROI(Region of interest),并利用Kalman/CAMShift来跟踪行人ID
- 检查下一帧是否有进入场景的新行人
代码
下面介绍一个基于行人跟踪的例子。demo.avi
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 9 14:48:10 2021
@author: gkm0120
"""
import cv2
import numpy as np
import os.path as path
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--algorithm",
help = "m (or nothing) for meanShift and c for camshift")
args = vars(parser.parse_args())
def center(points):
"""计算给定矩阵的质心"""
x = (points[0][0] + points[1][0] + points[2][0] + points[3][0]) / 4
y = (points[0][1] + points[1][1] + points[2][1] + points[3][1]) / 4
return np.array([np.float32(x), np.float32(y)], np.float32)
font = cv2.FONT_HERSHEY_SIMPLEX
class Pedestrian():
"""Pedestrian(行人)
每个行人都由ROI,ID和卡尔曼过滤器组成,因此我们创建了一个步行者类来保存对象状态
"""
def __init__(self, id, frame, track_window):
"""使用跟踪窗口坐标初始化行人对象"""
# 设置ROI区域
self.id = int(id)
x,y,w,h = track_window
self.track_window = track_window
self.roi = cv2.cvtColor(frame[y:y+h, x:x+w], cv2.COLOR_BGR2HSV)
roi_hist = cv2.calcHist([self.roi], [0], None, [16], [0, 180])
self.roi_hist = cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
# 设置卡尔曼滤波器
self.kalman = cv2.KalmanFilter(4,2)
self.kalman.measurementMatrix = np.array([[1,0,0,0],[0,1,0,0]],np.float32)
self.kalman.transitionMatrix = np.array([[1,0,1,0],[0,1,0,1],[0,0,1,0],[0,0,0,1]],np.float32)
self.kalman.processNoiseCov = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],np.float32) * 0.03
self.measurement = np.array((2,1), np.float32)
self.prediction = np.zeros((2,1), np.float32)
self.term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
self.center = None
self.update(frame)
def __del__(self):
print ("Pedestrian %d destroyed" % self.id)
def update(self, frame):
# print ("updating %d " % self.id)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
back_project = cv2.calcBackProject([hsv],[0], self.roi_hist,[0,180],1)
if args.get("algorithm") == "c":
ret, self.track_window = cv2.CamShift(back_project, self.track_window, self.term_crit)
pts = cv2.boxPoints(ret)
pts = np.int0(pts)
self.center = center(pts)
cv2.polylines(frame,[pts],True, 255,1)
if not args.get("algorithm") or args.get("algorithm") == "m":
ret, self.track_window = cv2.meanShift(back_project, self.track_window, self.term_crit)
x,y,w,h = self.track_window
self.center = center([[x,y],[x+w, y],[x,y+h],[x+w, y+h]])
cv2.rectangle(frame, (x,y), (x+w, y+h), (255, 255, 0), 2)
self.kalman.correct(self.center)
prediction = self.kalman.predict()
cv2.circle(frame, (int(prediction[0]), int(prediction[1])), 4, (255, 0, 0), -1)
# 反向投影
cv2.putText(frame, "ID: %d -> %s" % (self.id, self.center), (11, (self.id + 1) * 25 + 1),
font, 0.6,
(0, 0, 0),
1,
cv2.LINE_AA)
# 实际位置信息
cv2.putText(frame, "ID: %d -> %s" % (self.id, self.center), (10, (self.id + 1) * 25),
font, 0.6,
(0, 255, 0),
1,
cv2.LINE_AA)
def main():
camera = cv2.VideoCapture(path.join(path.dirname(__file__), "demo.avi")) #加载视频
# camera = cv2.VideoCapture(0) #网络摄像头
history = 20 #设置20帧作为背景模型的帧
# KNN背景分割器
bs = cv2.createBackgroundSubtractorKNN()
# MOG背景分割器
# bs = cv2.bgsegm.createBackgroundSubtractorMOG(history = history)
# bs.setHistory(history)
# GMG背景分割器
# bs = cv2.bgsegm.createBackgroundSubtractorGMG(initializationFrames = history)
# 创建主窗口显示,设置行人字典和firstFrame标志,该标志能使得背景分割器利用这些帧构造历史
cv2.namedWindow("surveillance")
pedestrians = {}
firstFrame = True
frames = 0
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
while True:
print (" -------------------- FRAME %d --------------------" % frames)
grabbed, frame = camera.read()
if (grabbed is False):
print ("failed to grab frame.")
break
fgmask = bs.apply(frame)
# 这只是为了让背景分割器建立一些历史
if frames < history:
frames += 1
continue
# 处理帧,通过前景掩模采用膨胀和腐蚀的方法来识别斑点和周围边框
th = cv2.threshold(fgmask.copy(), 127, 255, cv2.THRESH_BINARY)[1]
th = cv2.erode(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3)), iterations = 2)
dilated = cv2.dilate(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8,3)), iterations = 2)
image, contours, hier = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 对轮廓设置最小区域,以便能对检测进行降噪
counter = 0
for c in contours:
if cv2.contourArea(c) > 500:
(x,y,w,h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0), 1)
# 只在第一帧中的行人的每个轮廓进行实例化
if firstFrame is True:
pedestrians[counter] = Pedestrian(counter, frame, (x,y,w,h))
counter += 1
# 对每个检测到的行人,都执行update()函数来传递当前帧
for i, p in pedestrians.items():
p.update(frame)
firstFrame = False # 表示不会跟踪更多的行人,而是跟踪已有的行人
frames += 1
cv2.imshow("surveillance", frame) #窗口显示结果
out.write(frame)
if cv2.waitKey(110) & 0xff == 27:
break
out.release()
camera.release()
if __name__ == "__main__":
main()
图例
这张截图中,蓝色矩形框是CAMShift检测的结果,绿色矩形框是卡尔曼滤波器预测的结果,其中心为蓝色圆圈。