近来研究如何利用OpenCV中SIFT特征提取和RANSAC筛选进行特征匹配,发现很多版本的代码不在支持较新版本的OpenCV用法,本篇文章也是借鉴了其他博主的经验分享,整理而成,希望对大家有所帮助。
#include "cv.h"
#include "highgui.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace cv;
//读取图像
Mat img_test=imread("1.bmp");
Mat img_template=imread("2.bmp");
//SIFT特征检测
SiftFeatureDetector detector; //定义特征点检测器
vector<KeyPoint> keypoint1,keypoint2; //定义两个容器存放特征点
detector.detect(img_test,keypoint1);
detector.detect(img_template,keypoint2);
或者
Ptr<FeatureDetector> sift_detector = FeatureDetector::create("SIFT");
sift_detector ->detector(img_test,keypoint1);
sift_detector ->detector(img_template,keypoint2);
//特征点显示
Mat out_img1;
Mat out_img2;
drawKeypoints(img_test,keypoint1,out_img1);
drawKeypoints(img_template,keypoint2,out_img2);
imshow("特征点图1",out_img1);
imshow("特征点图2",out_img //提取特征点的特征描述子(特征向量)(128维)
SiftDescriptorExtractor extractor;
Mat descriptor1,descriptor2;
extractor.compute(img1,keypoint1,descriptor1);
extractor.compute(img2,keypoint2,descriptor2);
或者
Ptr< DescriptorExtractor> sift_desc_extract = DescriptorExtractor::create(“SIFT”);
sift_desc_extract-> compute(img1,keypoint1,descriptor1);
sift_desc_extract-> compute(img2,keypoint2,descriptor2);
//匹配,主要计算两个特征点特征向量的欧式距离,距离小于某个阈值则认为匹配
BruteForceMatcher<L2<float>> matcher;
vector<DMatch> matches;
Mat img_matches;
matcher.match(descriptor1,descriptor2,matches);
或者
Ptr bruteforce_matcher = Descriptormatcher::create(“BruteForce”);
bruteforce_matcher->match(descriptor1,descriptor2,matches);
drawMatches(img1,keypoint1,img2,keypoint2,matches,img_matches);
imshow("原始匹配",img_matches);1
//下面是利用RANSAC进行消除无匹配点:
//RANSAC 消除误匹配特征点 主要分为三个部分:
//1)根据matches将特征点对齐,将坐标转换为float类型
//2)使用求基础矩阵方法 findFundamentalMat,得到RansacStatus
//3)根据RansacStatus来将误匹配的点也即RansacStatus[i]=0的点删除
//根据matches将特征点对齐,将坐标转换为float类型
vector<KeyPoint> R_keypoint1,R_keypoint2;
for (size_t i=0;i<matches.size();i++)
{
R_keypoint1.push_back(keypoint1[matches[i].queryIdx]);
R_keypoint2.push_back(keypoint2[matches[i].trainIdx]);
//这两句话的理解:R_keypoint1是要存储img1中能与img2匹配的特征点,
//matches中存储了这些匹配点对的img1和img2的索引值
}
//坐标转换
vector<Point2f>p1,p2;
for (size_t i=0;i<matches.size();i++)
{
p1.push_back(R_keypoint1[i].pt);
p2.push_back(R_keypoint2[i].pt);
}
//利用基础矩阵剔除误匹配点
vector<uchar> RansacStatus;
Mat Fundamental= findFundamentalMat(p01,p02,RansacStatus,FM_RANSAC);
vector<KeyPoint> RR_keypoint1,RR_keypoint2;
vector<DMatch> RR_matches; //重新定义RR_keypoint 和RR_matches来存储新的关键点和匹配矩阵
int index=0;
for (size_t i=0;i<matches.size();i++)
{
if (RansacStatus[i]!=0)
{
RR_keypoint1.push_back(R_keypoint1[i]);
RR_keypoint2.push_back(R_keypoint2[i]);
matches[i].queryIdx=index;
matches[i].trainIdx=index;
RR_matches.push_back(matches[i]);
index++;
}
}
Mat img_RR_matches;
drawMatches(img01,RR_keypoint1,img2,RR_keypoint2,RR_matches,img_RR_matches);
imshow("After RANSAC Image",img_RR_matches);
“`