opencv贾老师系列11——视频分析和对象跟踪1

读写视频文件

#include "pch.h"
#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main(int argc, char** argv) {
	//VideoCapture capture;
	//capture.open("D:/vcprojects/images/video_006.mp4");
	VideoCapture capture(0);
	if (!capture.isOpened()) {
		printf("could not load video data...\n");
		return -1;
	}
	// 获取帧的属性
	double fps = capture.get(CV_CAP_PROP_FPS);
	//获得帧的大小
	Size size = Size(capture.get(CV_CAP_PROP_FRAME_WIDTH), capture.get(CV_CAP_PROP_FRAME_HEIGHT));
	printf("FPS : %f", fps);
	//保存视频
	VideoWriter writer("D:/wv_demo.mp4", CV_FOURCC('D', 'I', 'V', 'X'), 15.0, size, true);

	// create window
	Mat frame, gray, binary;
	namedWindow("video-demo", CV_WINDOW_AUTOSIZE);

	// 显示每一帧,并保存
	vector<Mat> bgr;
	while (capture.read(frame)) {
		inRange(frame, Scalar(0, 127, 0), Scalar(127, 255, 127), gray);
		cvtColor(frame, gray, COLOR_BGR2GRAY);
		threshold(gray, binary, 0, 255, THRESH_BINARY | THRESH_OTSU);
		bitwise_not(frame, frame);
		flip(frame, frame, 1);
		imshow("video-demo", frame);
		writer.write(frame);
		char c = waitKey(100);
		if (c == 27) {
			break;
		}
	}

	waitKey(0);
	return 0;
}

背景消除建模(BSM)

BSM用于背景比较固定不变的场景中
先用摄像头将背景学习几分钟,再调用算法,此时动态的物体会被识别,如果动态物体长期处于静态,也会被当做为背景处理。
在这里插入图片描述
在这里插入图片描述
效果展示
在这里插入图片描述

#include "pch.h"
#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main(int argc, char** argv) {
	VideoCapture capture;
	capture.open("E:/opencv/opencv视频/1.ts");
	if (!capture.isOpened())
	{
		cout << "could not load the video..." << endl;
		return -1;
	}

	Mat frame;
	Mat bsmaskMOG2, bsmaskKNN;
	namedWindow("input video", CV_WINDOW_NORMAL);
	namedWindow("MOG2", CV_WINDOW_NORMAL);

	Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
	Ptr<BackgroundSubtractor>pMOG2 = createBackgroundSubtractorMOG2();
	Ptr<BackgroundSubtractor>pKNN = createBackgroundSubtractorKNN();
	while (capture.read(frame))
	{
		imshow("input video", frame);
		//MOG  BS
		pMOG2->apply(frame, bsmaskMOG2);
		morphologyEx(bsmaskMOG2, bsmaskMOG2, MORPH_OPEN, kernel, Point(-1, -1));
		imshow("MOG2", bsmaskMOG2);

		//KNN BS mask
		//pKNN->apply(frame, bsmaskKNN);
		//imshow("KNN", bsmaskKNN);
		char c = waitKey(100);
		if (c == 27)
		{
			break;
		}
	}
	capture.release();

	waitKey(0);
	return 0;
}

对象检测与跟踪(基于颜色)

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

#include "pch.h"
#include <iostream>
#include<opencv2/opencv.hpp>

using namespace std;
using namespace cv;

Rect roi;
void processFrame(Mat &mask, Rect &rect);
int main(int argc,char** argv)
{
	VideoCapture capture;
	capture.open("D:/006.mp4");
	if (!capture.isOpened())
	{
		printf("could not load video data...\n");
		return -1;
	}
	Mat frame, mask;
	Mat kernel1 = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
	Mat kernel2 = getStructuringElement(MORPH_RECT, Size(5, 5), Point(-1, -1));

	namedWindow("input video", CV_WINDOW_NORMAL);
	namedWindow("track mask", CV_WINDOW_NORMAL);

	while (capture.read(frame))
	{

		inRange(frame, Scalar(0, 127, 0), Scalar(120, 255, 120), mask);//过滤
		morphologyEx(mask, mask, MORPH_OPEN, kernel1, Point(-1, -1), 1);//开运算
		dilate(mask, mask, kernel2, Point(-1, -1), 4);//膨胀
		imshow("track mask", mask);
		processFrame(mask, roi);
		//画出矩形
		rectangle(frame, roi, Scalar(0, 0, 255), 3, 8, 0);
		char c = waitKey(100);
		if (c == 27)
		{
			break;
		}
	}
	waitKey(0);
	return 0;
}

void processFrame(Mat &mask, Rect &rect)
{
	vector<vector<Point>>contours;
	vector<Vec4i>hireachy;
	//寻找轮廓
	findContours(mask, contours, hireachy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0));
	if (contours.size() > 0)
	{
		double maxArea = 0.0;
		for (size_t t = 0; t < contours.size(); t++)
		{
			//循环寻找最大轮廓,并得到最大外接矩形
			double area = contourArea(contours[static_cast<int>(t)]);
			if (area > maxArea)
			{
				maxArea = area;
				rect = boundingRect(contours[static_cast<int>(t)]);
			}
		}
	}
	else 
	{
		rect.x = rect.y = rect.width = rect.height = 0;
	}
}

void flip(InputArray src, OutputArray dst, int flipCode)
参数:

src,输入矩阵

dst,翻转后矩阵,类型与src一致

flipCode,翻转模式,flipCode==0垂直翻转(沿X轴翻转),flipCode>0水平翻转(沿Y轴翻转),flipCode<0水平垂直翻转(先沿X轴翻转,再沿Y轴翻转,等价于旋转180°)

光流的对象跟踪

在这里插入图片描述
稀疏光流(KLT):
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

Mat frame, gray;
Mat prev_frame, prev_gray;
vector<Point2f> features; // shi-tomasi角点检测 - 特征数据

vector<Point2f> iniPoints; // 初始化特征数据
vector<Point2f> fpts[2]; // 保持当前帧和前一帧的特征点位置

vector<uchar> status; // 特征点跟踪成功标志位
vector<float> errors; // 跟踪时候区域误差和

void drawFeature(Mat &inFrame);
void detectFeatures(Mat &inFrame, Mat &ingray);
void klTrackFeature();
void drawTrackLines();
int main(int argc, char** argv) {
	//VideoCapture capture(0);
	VideoCapture capture;
	capture.open("D:/vcprojects/images/video_006.mp4");
	if (!capture.isOpened()) {
		printf("could not load video file...\n");
		return -1;
	}

	namedWindow("camera input", CV_WINDOW_AUTOSIZE);
	while (capture.read(frame)) {
		//flip(frame, frame, 1);
		cvtColor(frame, gray, COLOR_BGR2GRAY);
		if (fpts[0].size() < 40) {
			detectFeatures(frame, gray);
			fpts[0].insert(fpts[0].end(), features.begin(), features.end());
			iniPoints.insert(iniPoints.end(), features.begin(), features.end());
		}
		else {
			printf("tttttttttttttttttttttttttttttttttttttttt...\n");
		}

		if (prev_gray.empty()) {
			gray.copyTo(prev_gray);
		}

		klTrackFeature();
		drawFeature(frame);

		// 更新前一帧数据
		gray.copyTo(prev_gray);
		frame.copyTo(prev_frame);
		imshow("camera input", frame);

		char c = waitKey(50);
		if (c == 27) {
			break;
		}
	}

	waitKey(0);
	return 0;
}

void detectFeatures(Mat &inFrame, Mat &ingray) {
	double maxCorners = 5000;
	double qualitylevel = 0.01;
	double minDistance = 10;
	double blockSize = 3;
	double k = 0.04;
	goodFeaturesToTrack(ingray, features, maxCorners, qualitylevel, minDistance, Mat(), blockSize, false, k);
	cout << "detect features : " << features.size() << endl;
}

void drawFeature(Mat &inFrame) {
	for (size_t t = 0; t < fpts[0].size(); t++) {
		circle(inFrame, fpts[0][t], 2, Scalar(0, 0, 255), 2, 8, 0);
	}
}

void klTrackFeature() {
	// KLT
	calcOpticalFlowPyrLK(prev_gray, gray, fpts[0], fpts[1], status, errors);// fpts[1]新特征点保存的位置
	int k = 0;
	// 特征点过滤
	for (int i = 0; i < fpts[1].size(); i++) {
		double dist = abs(fpts[0][i].x - fpts[1][i].x) + abs(fpts[0][i].y - fpts[1][i].y);  //计算前一个点和后一个点的距离
		if (dist > 2 && status[i]) {     //如果距离大于2,将i赋值给k
			iniPoints[k] = iniPoints[i];
			fpts[1][k++] = fpts[1][i];
		}
	}
	// 保存特征点并绘制跟踪轨迹
	iniPoints.resize(k);
	fpts[1].resize(k);
	drawTrackLines();

	std::swap(fpts[1], fpts[0]);  //交换
}

void drawTrackLines() {
	for (size_t t=0; t<fpts[1].size(); t++) {
		line(frame, iniPoints[t], fpts[1][t], Scalar(0, 255, 0), 1, 8, 0);
		circle(frame, fpts[1][t], 2, Scalar(0, 0, 255), 2, 8, 0);
	}
}

稠密光流(HF):
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值