OpenCV之视频分析与对象跟踪(四) 光流的对象跟踪 稀疏光流&稠密光流

版权声明:本文为博主原创文章,遵循 CC 4.0 by-sa 版权协议,转载请附上原文出处链接和本声明。
本文链接:https://blog.csdn.net/CJ_035/article/details/81982022

稀疏光流

实现流程:

代码&注释:

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

Mat frame, gray;
Mat prev_frame, prev_gray;

vector<Point2f> features; // shi-tomasi角点检测 - 特征数据

vector<Point2f> iniPoints; // 初始化特征数据
vector<Point2f> fpts[2]; // 保持当前帧和前一帧的特征点位置

vector<uchar> status; // 特征点跟踪成功标志位
vector<float> errors; // 跟踪时候区域误差和

void drawFeature(Mat &inFrame);
void detectFeatures(Mat &inFrame, Mat &ingray);
void klTrackFeature();
void drawTrackLines();
int main(int argc, char** argv) {
	//VideoCapture capture(0);
	VideoCapture capture;
	capture.open("bike.avi");
	if (!capture.isOpened()) {
		printf("could not load video file...\n");
		return -1;
	}

	namedWindow("camera input", 0);
	while (capture.read(frame)) {
	//	flip(frame, frame, 1);
		cvtColor(frame, gray, COLOR_BGR2GRAY);
		if (fpts[0].size() < 40) {
			detectFeatures(frame, gray);
			fpts[0].insert(fpts[0].end(), features.begin(), features.end());
			iniPoints.insert(iniPoints.end(), features.begin(), features.end());
		}
		else {
			printf("没有检测,持续追踪...\n");
		}

		if (prev_gray.empty()) {
			gray.copyTo(prev_gray);
		}

		klTrackFeature();

		// 更新前一帧数据
		gray.copyTo(prev_gray);
		frame.copyTo(prev_frame);
		imshow("camera input", frame);

		char c = waitKey(50);
		if (c == 27) {
			break;
		}
	}

	waitKey(0);
	return 0;
}

void detectFeatures(Mat &inFrame, Mat &ingray) {
	double maxCorners = 5000;
	double qualitylevel = 0.01;
	double minDistance = 10;
	double blockSize = 3;
	double k = 0.04;
	goodFeaturesToTrack(ingray, features, maxCorners, qualitylevel, minDistance, Mat(), blockSize, false, k);
	cout << "detect features : " << features.size() << endl;
}


void klTrackFeature() {
	// KLT
	calcOpticalFlowPyrLK(prev_gray, gray, fpts[0], fpts[1], status, errors);
	int k = 0;

	// 特征点过滤
	for (int i = 0; i < fpts[1].size(); i++) {
		double dist = abs(fpts[0][i].x - fpts[1][i].x) + abs(fpts[0][i].y - fpts[1][i].y);

		/*
		1.calcOpticalFlowPyrLK函数作用是对输入的特征点fpts[0],根据下一帧的图像对这些特征点判定是不是光流,
		检测结束后,status的每个下标会保存答案,再进行判断即可。
		2.initPoints集合用于存放初始化特征数据,每次的calcOpticalFlowPyrLK后都会重新更新一次,用status判断判断有没有新的特征是可以追踪的,或者用status判断哪些旧的特征可以不要了,
		3.fpts[1]集合用于存放当前帧的数据,	

		*/

		if (dist > 2 && status[i]) {
			iniPoints[k] = iniPoints[i];
			fpts[1][k++] = fpts[1][i];

			//1.将用KLT算法找到的特征点集fpts[1]进行筛选,将没用的点去除,
			//没用的点包括距离太小没有变化和status的状态
			//2.将有用的点放进fpts[1]中
		}
	}
	// 保存特征点并绘制跟踪轨迹
	iniPoints.resize(k);	//裁剪不要的特征,更新集合大小
	fpts[1].resize(k);

	drawTrackLines();

	std::swap(fpts[1], fpts[0]);//更新帧的特征点
}

void drawTrackLines() {
	for (size_t t = 0; t<fpts[1].size(); t++) {
		line(frame, iniPoints[t], fpts[1][t], Scalar(0, 255, 0), 1, 8, 0);
		circle(frame, fpts[1][t], 2, Scalar(0, 0, 255), 2, 8, 0);
	}
}

效果图:

稠密光流

代码:

#include "opencv2/opencv.hpp"
#include <iostream>
#include <math.h>

using namespace cv;
using namespace std;

void drawOpticalFlowHF(const Mat &flowdata, Mat& image, int step);
int main(int argc, char** argv)
{
	VideoCapture capture;
	capture.open("video_003.avi");
	if (!capture.isOpened()) {
		printf("could not load image...\n");
		return -1;
	}

	Mat frame, gray;
	Mat prev_frame, prev_gray;
	Mat flowResult, flowdata;
	capture.read(frame);
	cvtColor(frame, prev_gray, COLOR_BGR2GRAY);
	namedWindow("flow", CV_WINDOW_AUTOSIZE);
	namedWindow("input", CV_WINDOW_AUTOSIZE);

	// 从第二帧数据开始
	while (capture.read(frame))
	{

		cvtColor(frame, gray, COLOR_BGR2GRAY);
		if (!prev_gray.empty()) {
			calcOpticalFlowFarneback(prev_gray, gray, flowdata, 0.5, 3, 15, 3, 5, 1.2, 0);
			cvtColor(prev_gray, flowResult, COLOR_GRAY2BGR);
			drawOpticalFlowHF(flowdata, flowResult, 10);
			gray.copyTo(prev_gray);
			imshow("flow", flowResult);
			imshow("input", frame);
		}
		char c = waitKey(1);
		if (c == 27) {
			break;
		}
	}
	return 0;
}

void drawOpticalFlowHF(const Mat &flowdata, Mat& image, int step) {
	for (int row = 0; row < image.rows; row++) {
		for (int col = 0; col < image.cols; col++) {
			const Point2f fxy = flowdata.at<Point2f>(row, col);
			if (fxy.x > 1 || fxy.y > 1) {
				line(image, Point(col, row), Point(cvRound(col + fxy.x), cvRound(row + fxy.y)), Scalar(0, 255, 0), 2, 8, 0);
	//			circle(image, Point(col, row), 2, Scalar(0, 0, 255), -1);
			}
		}
	}
}

效果图:

展开阅读全文

没有更多推荐了,返回首页