视频分析与对象跟踪

所用环境

本文使用的环境需要包含OpenCV的扩展模块,故使用的是已经编译好扩展模块的OpenCV版本。在后续的某些目标跟踪算法,如KCF算法是在扩展模块中的。
这里所用的OpenCV版本是3.2的包含扩展模块,资源如下
https://download.csdn.net/download/qq_44870829/13023392
当然也可以自己进行最新OpenCV版本的编译,将扩展模块编译到其中,可参考网上教程。

videoDemo类的定义(头文件)

videoDemo.h

#pragma once
#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

class videoDemo
{
public:
	void video_Demo();				//01-视频打开与保存(VideoCapture,VideoWriter)
	void BSM_demo();				//02-视频中对运动物体进行跟踪,使用BSM(背景消去建模法)(createBackgroundSubtractorMOG2,apply)
	void colorTracking();			//03-基于颜色的对象跟踪(此处只针对绿色)(inRange、findContours)
	void sparse_flowTracking();		//04-光流法进行的跟踪,稀疏光流,速度很快(calcOpticalFlowPyrLK)
	void dense_flowTracking();		//05-稠密光流(calcOpticalFlowFarneback)
	void count_tracking();			//06-使用BSM法进行运动物体的计数(createBackgroundSubtractorMOG2)
	void CAMShift_tracking();		//07-连续自适应的meanshift跟踪算法(CamShift)
	void KCF_tracking();			//08-基于KCF的单对象跟踪(Tracker、selectROI)
	void multiple_tracking();		//09-基于多对象的跟踪(MultiTracker、selectROI)
};

videoDemo类的函数实现(头文件)

videoDemo.cpp

#include "videoDemo.h"
#include <opencv2/tracking.hpp>			//此处的include不要放在头文件中,否则会报错

01-视频打开与保存(VideoCapture,VideoWriter)

//01-视频的打开与关闭
void videoDemo::video_Demo() {
	//VideoCapture capture;
	//capture.open("D:/vcprojects/images/video_006.mp4");		//打开视频文件
	VideoCapture capture(0);
	if (!capture.isOpened()) {
		printf("could not load video data...\n");
		return;
	}
	// 获取帧的属性
	double fps = capture.get(CAP_PROP_FPS);
	Size size = Size(capture.get(CAP_PROP_FRAME_WIDTH), capture.get(CAP_PROP_FRAME_HEIGHT));
	printf("FPS : %f", fps);

	//OpenCV的保存视频只能是无声的,且保存最大视频有限制。不是专业的视频编解码的
	VideoWriter writer;
	writer.open("D:/vcprojects/images/wv_demo.mp4", writer.fourcc('D', 'I', 'V', 'X'), 24.0, size, true);
	//VideoWriter writer("G:/OpenCV/贾志刚视频笔记/Video_object_tracking/Video_object_tracking/wv_demo.avi", -1, 15.0, size, true);		//此处fps自定义15帧,不是获取的fps,获取的存储不了,
																																		//-1可以设置视频压缩,但-1下保存的应是avi的,MP4的无法保存
	// create window
	Mat frame, gray, binary;
	//namedWindow("video-demo", CV_WINDOW_AUTOSIZE);

	// show each frame and save
	vector<Mat> bgr;
	while (capture.read(frame)) {
		inRange(frame, Scalar(0, 127, 0), Scalar(127, 255, 127), gray);
		cvtColor(frame, gray, COLOR_BGR2GRAY);
		threshold(gray, binary, 0, 255, THRESH_BINARY | THRESH_OTSU);
		bitwise_not(frame, frame);
		flip(frame, frame, 1);
		imshow("video-demo", frame);

		writer.write(frame);				//保存帧
		char c = waitKey(100);
		if (c == 27) {
			break;
		}
	}

	capture.release();
}

02-BSM背景消去建模法跟踪(createBackgroundSubtractorMOG2,apply)

//背景消融建模进行运动物体的跟踪
void videoDemo::BSM_demo() {
	VideoCapture capture;
	//capture.open("G:/OpenCV/贾志刚课件及PPT/视频分析与对象跟踪/课程配套课件/video_004.avi");
	capture.open("G:/OpenCV/贾志刚课件及PPT/视频分析与对象跟踪/课程配套课件/video_003.avi");
	if (!capture.isOpened()) {
		printf("could not find the video file...\n");
		return;
	}
	// create windows
	Mat frame;
	Mat bsmaskMOG2, bsmaskKNN;
	namedWindow("input video", WINDOW_AUTOSIZE);
	namedWindow("MOG2", WINDOW_AUTOSIZE);
	//namedWindow("KNN Model", CV_WINDOW_AUTOSIZE);

	Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(3, 3));

	// intialization BS
	//实例化背景提取的对象
	Ptr<BackgroundSubtractor> pMOG2 = createBackgroundSubtractorMOG2();	//创建一个背景MOG2分割器
	//Ptr<BackgroundSubtractor> pKNN = createBackgroundSubtractorKNN();
	vector<vector<Point>>  contours;
	vector<Vec4i> hireachy;
	Rect rect;
	while (capture.read(frame)) {
		imshow("input video", frame);

		// MOG BS
		pMOG2->apply(frame, bsmaskMOG2);		//对视频的帧进行背景分割,输入图像,输出前景的mask掩码
		morphologyEx(bsmaskMOG2, bsmaskMOG2, MORPH_OPEN, kernel);		//开运算可用于消除细小部分
		imshow("MOG2", bsmaskMOG2);

		// KNN BS mask
		//pKNN->apply(frame, bsmaskKNN);
		//imshow("KNN Model", bsmaskKNN);
		findContours(bsmaskMOG2, contours, hireachy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0));
		for (int i = 0; i < contours.size(); i++) {
			if (arcLength(contours[i],true)>100) {
				//找一个直矩形,不会旋转
				rect = boundingRect(contours[i]);
				rectangle(frame, rect, Scalar(0, 0, 255), 2, 8, 0);
			}
		}

		imshow("output video", frame);
		char c = waitKey(150);
		if (c == 27) {
			break;
		}
	}

	capture.release();
}

03-基于颜色(绿色)的对象跟踪(inRange、findContours)

//基于颜色的目标跟踪
Rect roi;
void processFrame(Mat &binary, Rect &rect);
void videoDemo::colorTracking() {
	// load video
	VideoCapture capture;
	capture.open("G:/OpenCV/贾志刚课件及PPT/视频分析与对象跟踪/课程配套课件/video_006.mp4");
	if (!capture.isOpened()) {
		printf("could not find video file");
		return;
	}

	Mat frame, mask;
	Mat kernel1 = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
	Mat kernel2 = getStructuringElement(MORPH_RECT, Size(5, 5), Point(-1, -1));

	namedWindow("input video", WINDOW_AUTOSIZE);
	namedWindow("track mask", WINDOW_AUTOSIZE);
	while (capture.read(frame)) {
		inRange(frame, Scalar(0, 127, 0), Scalar(120, 255, 120), mask); // 过滤,得到一个颜色范围在(0, 127, 0)~(120, 255, 120)的mask,此处是绿色
		imshow("inRange mask", mask);
		morphologyEx(mask, mask, MORPH_OPEN, kernel1, Point(-1, -1), 1); // 开操作过滤
		dilate(mask, mask, kernel2, Point(-1, -1), 4);// 膨胀后的形状感觉比较大了,可以画外接矩形了
		imshow("track mask", mask);

		processFrame(mask, roi); // 轮廓发现与位置标定
		rectangle(frame, roi, Scalar(0, 0, 255), 2, 8, 0);
		imshow("input video", frame);

		// trigger exit
		char c = waitKey(100);
		if (c == 27) {
			break;
		}
	}

	capture.release();
}

//轮廓查找,外接矩形标定
//输入mask,返回rect的矩形信息
//下面的函数要注意,在OpenCV3.0版本有bug会报错,而在后面的4版本无错
void processFrame(Mat &binary, Rect &rect) {
	vector<vector<Point>> contours;		//定义查找到的轮廓,有多个
	vector<Vec4i> hireachy;
	findContours(binary, contours, hireachy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0));	//查找最外层轮廓
	if (contours.size() > 0) {
		double maxArea = 0.0;
		for (int t = 0; t < contours.size(); t++) {
			double area = contourArea(contours[t]);
			if (area > maxArea) {
				maxArea = area;		//得到最大轮廓的面积
				rect = boundingRect(contours[t]);		//将最大轮廓进行最小矩形拟合,返回rect的信息
			}
		}
	}
	else {
		rect.x = rect.y = rect.width = rect.height = 0;
	}
}

04-光流法跟踪(稀疏光流)(calcOpticalFlowPyrLK)

//光流法跟踪
Mat frame, gray;
Mat prev_frame, prev_gray;
vector<Point2f> features; // shi-tomasi角点检测 - 特征数据

vector<Point2f> iniPoints; // 初始化特征数据
vector<Point2f> fpts[2]; // 保存当前帧和前一帧的特征点位置

vector<uchar> status; // 特征点跟踪成功标志位
vector<float> errors; // 跟踪时候区域误差和

void drawFeature(Mat &inFrame);
void detectFeatures(Mat &inFrame, Mat &ingray);
void klTrackFeature();
void drawTrackLines();
void videoDemo::sparse_flowTracking() {
	//VideoCapture capture(0);
	VideoCapture capture;
	capture.open("G:/OpenCV/贾志刚课件及PPT/视频分析与对象跟踪/课程配套课件/bike.avi");
	if (!capture.isOpened()) {
		printf("could not load video file...\n");
		return;
	}

	namedWindow("camera input", WINDOW_AUTOSIZE);
	while (capture.read(frame)) {
		//flip(frame, frame, 1);
		cvtColor(frame, gray, COLOR_BGR2GRAY);

		//每个N帧要重采样一次,对特征点的再计算,原因是有些跟踪点跟踪的就失败了,后面找不到了,特征点就没了
		//若特征点的数量小于某个值如40,就进行重采样,将采样得到的特征点放到前一帧中
		if (fpts[0].size() < 40) {
			detectFeatures(frame, gray);
			fpts[0].insert(fpts[0].end(), features.begin(), features.end());
			iniPoints.insert(iniPoints.end(), features.begin(), features.end());
		}
		else {
			printf("tttttttttttttttttttttttttttttttttttttttt...\n");
		}

		//判断前一帧是否为空,若为空,拷贝
		if (prev_gray.empty()) {
			gray.copyTo(prev_gray);
		}

		klTrackFeature();		//跟踪角点
		drawFeature(frame);		//画特征点

		// 更新前一帧数据
		gray.copyTo(prev_gray);
		frame.copyTo(prev_frame);
		imshow("camera input", frame);

		char c = waitKey(50);
		if (c == 27) {
			break;
		}
	}
	capture.release();
}

//光流法跟踪-检测特征
void detectFeatures(Mat &inFrame, Mat &ingray) {
	double maxCorners = 5000;
	double qualitylevel = 0.01;
	double minDistance = 10;
	double blockSize = 3;
	double k = 0.04;
	goodFeaturesToTrack(ingray, features, maxCorners, qualitylevel, minDistance, Mat(), blockSize, false, k);
	cout << "detect features : " << features.size() << endl;
}

//光流法-绘制特征
void drawFeature(Mat &inFrame) {
	for (size_t t = 0; t < fpts[0].size(); t++) {
		circle(inFrame, fpts[0][t], 2, Scalar(0, 0, 255), 2, 8, 0);
	}
}

//光流法-KLT进行特征跟踪
void klTrackFeature() {
	// KLT
	calcOpticalFlowPyrLK(prev_gray, gray, fpts[0], fpts[1], status, errors);
	//前一帧灰度图像,当前帧灰度图像,前一帧特征点,当前帧的特征点,状态值,错误值
	int k = 0;
	// 特征点过滤,计算前一帧特征点和后一帧特征点对等位置,以及状态值,若为1说明此点未消失
	for (int i = 0; i < fpts[1].size(); i++) {
		double dist = abs(fpts[0][i].x - fpts[1][i].x) + abs(fpts[0][i].y - fpts[1][i].y);
		if (dist > 2 && status[i]) {		//状态值为1说明跟踪成功了,为0失败
			iniPoints[k] = iniPoints[i];
			fpts[1][k++] = fpts[1][i];
		}
	}
	// 保存特征点并绘制跟踪轨迹
	iniPoints.resize(k);
	fpts[1].resize(k);
	drawTrackLines();		//画特征点后的跟踪轨迹

	std::swap(fpts[1], fpts[0]);		//进行特征点交换
}

//光流法-绘制线
void drawTrackLines() {
	for (size_t t = 0; t < fpts[1].size(); t++) {
		line(frame, iniPoints[t], fpts[1][t], Scalar(0, 255, 0), 1, 8, 0);
		circle(frame, fpts[1][t], 2, Scalar(0, 0, 255), 2, 8, 0);
	}
}

05-稠密光流(calcOpticalFlowFarneback)

//稠密光流-绘制区域
void drawOpticalFlowHF(const Mat &flowdata, Mat& image, int step) {
	for (int row = 0; row < image.rows; row++) {
		for (int col = 0; col < image.cols; col++) {
			const Point2f fxy = flowdata.at<Point2f>(row, col);
			if (fxy.x > 1 || fxy.y > 1) {
				line(image, Point(col, row), Point(cvRound(col + fxy.x), cvRound(row + fxy.y)), Scalar(0, 255, 0), 2, 8, 0);
				circle(image, Point(col, row), 2, Scalar(0, 0, 255), -1);
			}
		}
	}
}

//稠密光流
void videoDemo::dense_flowTracking() {
	VideoCapture capture;
	//capture.open("D:/vcprojects/images/video_006.mp4");
	capture.open("G:/OpenCV/贾志刚课件及PPT/视频分析与对象跟踪/课程配套课件/video_006.mp4");
	if (!capture.isOpened()) {
		printf("could not load image...\n");
		return;
	}

	Mat frame, gray;
	Mat prev_frame, prev_gray;
	Mat flowResult, flowdata;
	capture.read(frame);
	cvtColor(frame, prev_gray, COLOR_BGR2GRAY);
	namedWindow("flow", CV_WINDOW_AUTOSIZE);
	namedWindow("input", CV_WINDOW_AUTOSIZE);

	// 从第二帧数据开始
	while (capture.read(frame))
	{
		cvtColor(frame, gray, COLOR_BGR2GRAY);
		if (!prev_gray.empty()) {
			calcOpticalFlowFarneback(prev_gray, gray, flowdata, 0.5, 3, 15, 3, 5, 1.2, 0);
			cvtColor(prev_gray, flowResult, COLOR_GRAY2BGR);
			drawOpticalFlowHF(flowdata, flowResult, 10);
			imshow("flow", flowResult);
			imshow("input", frame);
		}
		char c = waitKey(1);
		if (c == 27) {
			break;
		}
	}
	capture.release();
}

06-使用BSM法进行运动物体的计数(createBackgroundSubtractorMOG2)

//使用BSM法进行当前帧运动物体的计数
void videoDemo::count_tracking() {
	VideoCapture capture;
	//capture.open("D:/vcprojects/images/768x576.avi");
	capture.open("G:/OpenCV/贾志刚课件及PPT/视频分析与对象跟踪/课程配套课件/video_003.avi");
	if (!capture.isOpened()) {
		printf("could not load video data...\n");
		return;
	}

	namedWindow("input video", WINDOW_AUTOSIZE);
	namedWindow("motion objects", WINDOW_AUTOSIZE);

	// 初始BS模型
	Ptr<BackgroundSubtractor> pMOG2 = createBackgroundSubtractorMOG2();
	Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));

	vector<vector<Point>> contours;
	vector<Vec4i> hireachy;
	int count = 0;

	Mat frame, gray, mogMask;
	while (capture.read(frame)) {
		imshow("input video", frame);
		pMOG2->apply(frame, mogMask);			//得到运动前景mogMask
		threshold(mogMask, mogMask, 100, 255, THRESH_BINARY);				//二值化
		morphologyEx(mogMask, mogMask, MORPH_OPEN, kernel, Point(-1, -1));	//开运算,去掉小噪声

		findContours(mogMask, contours, hireachy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0));	//发现轮廓
		count = 0;
		char numText[8];
		for (size_t t = 0; t < contours.size(); t++) {
			double area = contourArea(contours[t]);				//计算每个轮廓面积
			if (area < 50) continue;
			Rect selection = boundingRect(contours[t]);
			if (selection.width < 10 || selection.height < 10) continue;
			count++;
			rectangle(frame, selection, Scalar(0, 0, 255), 2, 8);		//将其框处

			sprintf_s(numText, "%d", count);
			putText(frame, numText, Point(selection.x, selection.y), FONT_HERSHEY_PLAIN, 2, Scalar(255, 0, 0), 1, 8);
		}

		imshow("motion objects", frame);
		char c = waitKey(50);
		if (c == 27) {// ESC 
			break;
		}
	}

	capture.release();
}

07-连续自适应的meanshift跟踪算法(CamShift)

//连续自适应的meanshift跟踪算法
int smin = 15;
int vmin = 40;
int vmax = 256;
int bins = 16;
void videoDemo::CAMShift_tracking() {
	VideoCapture capture;
	//capture.open("D:/vcprojects/images/balltest.mp4");
	//capture.open("G:/OpenCV/贾志刚课件及PPT/视频分析与对象跟踪/课程配套课件/video_006.mp4");
	capture.open("G:/OpenCV/OpenCV项目实战/python与OpenCV项目实战/目标追踪/multi-object-tracking/videos/soccer_01.mp4");

	if (!capture.isOpened()) {
		printf("could not find video data file...\n");
		return;
	}
	namedWindow("CAMShift Tracking", CV_WINDOW_AUTOSIZE);
	namedWindow("ROI Histogram", CV_WINDOW_AUTOSIZE);

	bool firstRead = true;
	float hrange[] = { 0, 180 };
	const float* hranges = hrange;
	Rect selection;
	Mat frame, hsv, hue, mask, hist, backprojection;
	Mat drawImg = Mat::zeros(300, 300, CV_8UC3);
	while (capture.read(frame)) {
		if (firstRead) {
			Rect2d first = selectROI("CAMShift Tracking", frame);
			selection.x = first.x;
			selection.y = first.y;
			selection.width = first.width;
			selection.height = first.height;
			printf("ROI.x= %d, ROI.y= %d, width = %d, height= %d", selection.x, selection.y, selection.width, selection.height);
		}
		// convert to HSV
		cvtColor(frame, hsv, COLOR_BGR2HSV);		//转换成hsv色彩空间
		inRange(hsv, Scalar(0, smin, vmin), Scalar(180, vmax, vmax), mask);
		hue = Mat(hsv.size(), hsv.depth());				//hue是单通道的
		int channels[] = { 0, 0 };						//将0通道复制到0通道上,数组
		mixChannels(&hsv, 1, &hue, 1, channels, 1);		//用于将输入数组的指定通道复制到输出数组的指定通道

		if (firstRead) {
			// ROI 直方图计算
			Mat roi(hue, selection);
			Mat maskroi(mask, selection);
			calcHist(&roi, 1, 0, maskroi, hist, 1, &bins, &hranges);	//计算直方图
			normalize(hist, hist, 0, 255, NORM_MINMAX);					//归一化

			// show histogram(用矩形进行单通道灰度图的展示)
			int binw = drawImg.cols / bins;				
			//建立颜色索引,每个bins什么颜色,落在那个bins就是什么颜色
			Mat colorIndex = Mat(1, bins, CV_8UC3);
			for (int i = 0; i < bins; i++) {
				colorIndex.at<Vec3b>(0, i) = Vec3b(saturate_cast<uchar>(i * 180 / bins), 255, 255);
			}
			cvtColor(colorIndex, colorIndex, COLOR_HSV2BGR);
			for (int i = 0; i < bins; i++) {
				int  val = saturate_cast<int>(hist.at<float>(i)*drawImg.rows / 255);
				rectangle(drawImg, Point(i*binw, drawImg.rows), Point((i + 1)*binw, drawImg.rows - val), Scalar(colorIndex.at<Vec3b>(0, i)), -1, 8, 0);
			}
		}

		// back projection(直方图反向映射,用直方图模型去目标图像中寻找是否有相似的对象)
		calcBackProject(&hue, 1, 0, hist, backprojection, &hranges);
		// CAMShift tracking
		backprojection &= mask;
		RotatedRect trackBox = CamShift(backprojection, selection, TermCriteria((TermCriteria::COUNT | TermCriteria::EPS), 10, 1));

		// draw location on frame;
		ellipse(frame, trackBox, Scalar(0, 0, 255), 3, 8);

		if (firstRead) {
			firstRead = false;
		}
		imshow("CAMShift Tracking", frame);
		imshow("ROI Histogram", drawImg);
		char c = waitKey(50);// ESC
		if (c == 27) {
			break;
		}
	}

	capture.release();
}

08-基于KCF的单对象跟踪(Tracker、selectROI)

//基于KCF的单对象跟踪
void videoDemo::KCF_tracking() {
	VideoCapture capture;
	//capture.open("D:/vcprojects/images/mulballs.mp4");
	capture.open("G:/OpenCV/贾志刚课件及PPT/视频分析与对象跟踪/课程配套课件/video_003.avi");
	if (!capture.isOpened()) {
		printf("could not load video data...\n");
		return;
	}

	Mat frame;
	namedWindow("tracker demo", CV_WINDOW_AUTOSIZE);
	Ptr<Tracker> tracker = Tracker::create("MEDIANFLOW");
	capture.read(frame);							//先读取一帧图像,在后面选择一个之后,才能进行后面的运动
	Rect2d  roi = selectROI("tracker demo", frame);		//要选择一个ROI区域,然后使用此ROI区域跟踪目标,但此处只能选择一个
	if (roi.width == 0 || roi.height == 0) {
		return;
	}
	tracker->init(frame, roi);
	while (capture.read(frame)) {
		tracker->update(frame, roi);
		rectangle(frame, roi, Scalar(255, 0, 0), 2, 8, 0);
		imshow("tracker demo", frame);

		char c = waitKey(20);
		if (c == 27) {
			break;
		}
	}

	capture.release();
}

09-基于多对象的跟踪(MultiTracker、selectROI)

//基于多对象的跟踪
void videoDemo::multiple_tracking() {
	VideoCapture capture;
	capture.open("G:/OpenCV/OpenCV项目实战/python与OpenCV项目实战/目标追踪/multi-object-tracking/videos/soccer_01.mp4");

	if (!capture.isOpened()) {
		printf("could not load video data...\n");
		return;
	}

	namedWindow("Multiple Objects Tracking", CV_WINDOW_AUTOSIZE);

	//实例化MultiTracker
	MultiTracker trackers("KCF");
	vector<Rect2d> objects;

	Mat frame, gray;

	//视频流
	while (capture.read(frame)) {
		trackers.update(frame);			//追踪的结果
		//绘制区域
		for (size_t t = 0; t < trackers.objects.size(); t++) {
			rectangle(frame, trackers.objects[t], Scalar(0, 0, 255), 2, 8, 0);
		}

		imshow("Multiple Objects Tracking", frame);

		char c = waitKey(100);
		if (c == 's') {					//若此选择区域
			selectROI("Multiple Objects Tracking", frame, objects, false);		//在第一帧进行选择要跟踪的区域,可选择多区域
			if (objects.size() < 1) {
				return;
			}

			//创建一个新的跟踪器并添加
			trackers = MultiTracker("KCF");	//此段代码可以控制是否在中断的时候跟踪其他的目标,之前画的就擦除了,若注释,之前选的不会擦除
			trackers.add(frame, objects);		//添加追踪器
		}else if (c == 27) {
			break;
		}
	}

	capture.release();
}

主程序调用类

#include "videoDemo.h"

int main(int argc, char** argv) {
	videoDemo video;

	//video.video_Demo();				//视频打开与保存
	//video.BSM_demo();					//视频中对运动物体进行跟踪,使用BSM(背景消去建模)
	//video.colorTracking();			//基于颜色的对象跟踪(此处只针对绿色)
	//video.sparse_flowTracking();		//光流法进行的跟踪,稀疏光流,速度很快
	//video.dense_flowTracking();		//稠密光流
	//video.count_tracking();			//使用BSM法进行运动物体的计数
	//video.CAMShift_tracking();		//连续自适应的meanshift跟踪算法
	//video.KCF_tracking();				//基于KCF的单对象跟踪
	video.multiple_tracking();		//基于多对象的跟踪
	
	waitKey(0);
	return 0;
}
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

吾名招财

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值