基于opencv的两帧法匹配

由于第一帧与第二帧很难做到实时匹配故采用两帧法,第一帧与第二帧匹配,第二帧与第三帧匹配,计算出两个匹配结果的H矩阵,再默认连续帧的情况下图像不会发生较大变化,故第三帧的特征点为 P3=P1*H1*H2

// An highlighted block
//var foo = 'bar';
#include <stdio.h>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace std;
using namespace cv;
void on_MouseHandle(int event, int x, int y, int flags, void*param);
int a = 0;
int main()
{
	VideoCapture capture("2.avi");
	Mat a0;
	namedWindow("a0");
	while (true)
	{
		if (!capture.read(a0))
		{
			cout << "读取视频结束" << endl;
			return 0;
		}
		if (a == 1)
			break;
		imshow("a0", a0);
		setMouseCallback("a0", on_MouseHandle, (void*)&a0);
		waitKey(1);
	}
	Mat img_1, img_2,img_3,img_4,img_5;
	Mat des1, des2,des3,des4;
	Mat result,result1;
	int minHessian = 2000;
	capture >> img_1;
	capture >> img_2;
	Ptr<Feature2D>surf = xfeatures2d::SURF::create(minHessian);
	vector<KeyPoint> keypoint1, keypoint2,keypoint3,keypoint4,keypoint5;
	double start1 = static_cast<double>(getTickCount());
	surf->detect(img_1, keypoint1);
	surf->detect(img_2, keypoint2);
	surf->compute(img_1, keypoint1, des1);
	surf->compute(img_2, keypoint2, des2);
	BFMatcher matcher,matcher1;
	vector<DMatch>match,match1;
	matcher.match(des1, des2, match);
	drawMatches(img_1, keypoint1, img_2, keypoint2, match, result, Scalar(255,0,255), Scalar(255,255,0),vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
	double time1 = ((double)getTickCount() - start1) / getTickFrequency();
	cout << "所用时间为:" << time1 << "秒" << endl;
	std::vector<Point2f> obj;
	std::vector<Point2f> scene;
	for (size_t i = 0; i < match.size(); i++)
	{
		//-- Get the keypoints from the good matches
		obj.push_back(keypoint1[match[i].queryIdx].pt);
		scene.push_back(keypoint2[match[i].trainIdx].pt);
	}
	Mat H = findHomography(obj, scene, RANSAC);
	for (int i = 0; i < H.cols; i++){
		for (int j = 0; j<H.rows; j++)
			std::cout << H.at<double>(i, j) << "  ";
		//std::cout<<((double *)H.data)[i*H.cols+j]<< "  ";
		//std::cout<<H.type()<< "  ";
		std::cout << std::endl;
	}
	imshow("result", result);
	img_3=img_2.clone();
	//imshow("3", img_3);
	capture >> img_4;
	img_5 = img_4.clone();
	Ptr<ORB>orb = ORB::create();
	double start = static_cast<double>(getTickCount());
	orb->detect(img_3, keypoint3);
	orb->detect(img_4, keypoint4);
	orb->compute(img_3, keypoint3, des3);
	orb->compute(img_4, keypoint4, des4);
	matcher1.match(des3, des4, match1);
	drawMatches(img_3, keypoint3, img_4, keypoint4, match1, result1, Scalar(255, 0, 255), Scalar(255, 255, 0), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
	double time = ((double)getTickCount() - start) / getTickFrequency();
	cout << "所用时间为:" << time << "秒" << endl;
	imshow("result1", result1);
	std::vector<Point2f> obj1;
	std::vector<Point2f> scene1;
	for (size_t i = 0; i < match1.size(); i++)
	{
		//-- Get the keypoints from the good matches
		obj1.push_back(keypoint3[match1[i].queryIdx].pt);
		scene1.push_back(keypoint4[match1[i].trainIdx].pt);
	}
	Mat H1 = findHomography(obj1, scene1, RANSAC);
	for (int i = 0; i < H1.cols; i++){
		for (int j = 0; j<H1.rows; j++)
			std::cout << H1.at<double>(i, j) << "  ";
		//std::cout<<((double *)H.data)[i*H.cols+j]<< "  ";
		//std::cout<<H.type()<< "  ";
		std::cout << std::endl;
	}
	Mat H2 = H*H1;
	for (int i = 0; i < H2.cols; i++){
		for (int j = 0; j<H2.rows; j++)
			std::cout << H2.at<double>(i, j) << "  ";
		//std::cout<<((double *)H.data)[i*H.cols+j]<< "  ";
		//std::cout<<H.type()<< "  ";
		std::cout << std::endl;
	}
	for (int i = 0; i < keypoint1.size(); i++)
	{
		keypoint5.push_back(keypoint1.at(i));
		keypoint5.at(i).pt.x = keypoint1.at(i).pt.x*H2.at<double>(0, 0) + keypoint1.at(i).pt.y*H2.at<double>(0, 1) + H2.at<double>(0, 2);
		keypoint5.at(i).pt.y = keypoint1.at(i).pt.x*H2.at<double>(1, 0) + keypoint1.at(i).pt.y*H2.at<double>(1, 1) + H2.at<double>(1, 2);
	}
	drawKeypoints(img_5, keypoint5, img_5,Scalar(0,255,255));
	drawKeypoints(img_1, keypoint1, img_1, Scalar(0, 255, 255));
	imshow("img_1", img_1);
	imshow("img_5", img_5);
	waitKey(0);
	return 0;
}
void on_MouseHandle(int event, int x, int y, int falgs, void* param)
{
	if (event == EVENT_LBUTTONDOWN)
	{
		a = 1;
	}
}
  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
OpenCV是一个广泛使用的计算机视觉库,它提供了许多强大的工具,包括图像处理和视频分析功能。在视频处理中,"抽帧"(frame extraction)通常指的是从连续的视频流中选择并提取特定帧的过程,而"补帧"(frame interpolation或frame reconstruction)则是指在原始帧之间插入额外的帧,以提高视频的流畅度或进行特殊效果处理。 **抽帧**: - 通过VideoCapture接口读取视频文件或实时摄像头数据 - 使用`read()`或`grab()`函数获取单帧,例如`cv2.VideoCapture.read()`会返回一帧(图像矩阵)和是否为结束标志 - 如果需要抽取特定帧,可以设置帧率或索引,如`cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index)` **补帧**: - **插值方**:OpenCV提供了几种插值技术,如cv2.INTER_LINEAR(线性插值)、cv2.INTER_CUBIC(立方插值)等,用于生成新帧 - **双线性插值**(doubly interpolated frame):对于视频帧率较低的情况,可以通过对相邻帧进行两次线性插值得到中间帧 - **运动估计**:利用光流(如calcOpticalFlowPyrLK)分析帧间运动,然后基于运动信息生成新的帧 - **帧率转换**:有时候为了匹配特定输出格式或设备要求,需要改变视频帧率,这时可能需要插帧或去帧操作 **相关问题**: 1. OpenCV如何使用插值技术实现补帧? 2. 如何利用OpenCV进行光流的运动估计? 3. OpenCV是否有现成函数可以直接进行帧率转换并处理插帧或去帧?

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值