一、使用surf算法实现
1、绘制关键点函数
void drawKeyPoints(const Mat &image,const vector<KeyPoint>&keypoints,Mat &outImage,const Scalar &color=Scalar::all(-1),int flags = DrawMatchesFlags::DEFAULT)
参数一:输入图像
参数二:跟据图像得到的特征点
参数三:输出图像,其内容取决于参数五
参数四:关键点的颜色
参数五:绘制关键点的特征标识符
DEFAULT=0;对每一个关键点只绘制中间点
DRAW_OVER_OUTIMG=1;不创建输出图像阵列,而是在输出图像上绘制匹配对
NOT_DRAW_SINGLE_POINTS=2;单点特征点不被绘制
DRAW_RITCH_KEYPOINTS=4;对每一个关键点,绘制待大小和方向的关键点圆圈
2、绘制相匹配的两图像的特征点
(1)、void drawMatches(const Mat &img1,
const vetor<KeyPoint>& keypoints1,
const Mat &img2,
const vector<KeyPoint>&keypoints2,
const vector<DMatch>&matches1to2,
Mat &outImg,
const Scalar &machColor=Scalar::all(-1),
const Scalar &singlePointColor=Scalar::all(-1),
const vector<char>&matchesMask=vector<char>(),
int flags=DrawMatchesFlags::DEFAULT)
(2)、
void drawMatches(const Mat &img1,
const vetor<KeyPoint>& keypoints1,
const Mat &img2,
const vector<KeyPoint>&keypoints2,
const vector<vector<DMatch> >&matches1to2,
Mat &outImg,
const Scalar &matchColor=Scalar::all(-1),
const Scalar &singlePointColor=Scalar::all(-1),
const vector<vector<char> >&matchesMask=vector<char>(),
int flags=DrawMatchesFlags::DEFAULT)
参数一:第一幅图像
参数二:第一幅图像的特征点
参数三:第二幅图像
参数四:第二幅图像的特征点
参数五:第一幅图像到第二幅图像的特征点
参数六:输出图像,其内容取决于参数十flags
参数七:匹配的颜色,即线和关键点的颜色
参数八:单一特征点的颜色
参数九:确定哪些匹配要进行掩摸
参数十:标识符
DEFAULT=0;对每一个关键点只绘制中间点
DRAW_OVER_OUTIMG=1;不创建输出图像阵列,而是在输出图像上绘制匹配对
NOT_DRAW_SINGLE_POINTS=2;单点特征点不被绘制
DRAW_RITCH_KEYPOINTS=4;对每一个关键点,绘制待大小和方向的关键点圆圈
示例1:绘制特征点
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc,char *argv[])
{
Mat srcImage1 = imread(argv[1]);
Mat srcImage2 = imread(argv[2]);
if(!srcImage1.data||!srcImage2.data)
{
cout<<"读取图片出错"<<endl;
return -1;
}
imshow("srcImage1",srcImage1);
imshow("srcImage2",srcImage2);
int minHessian = 400;
SurfFeatureDetector detector(minHessian);
vector<KeyPoint>keyPoint1,keyPoint2;
detector.detect(srcImage1,keyPoint1);
detector.detect(srcImage2,keyPoint2);
Mat img1,img2;
drawKeypoints(srcImage1,keyPoint1,img1,Scalar::all(-1),DrawMatchesFlags::DEFAULT);
drawKeypoints(srcImage2,keyPoint1,img2,Scalar::all(-1),DrawMatchesFlags::DEFAULT);
imshow("img1",img1);
imshow("img2",img2);
waitKey(0);
return 0;
}
示例2:特征提取
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc,char *argv[])
{
Mat srcImage1 = imread(argv[1]);
Mat srcImage2 = imread(argv[2]);
if(!srcImage1.data||!srcImage2.data)
{
cout<<"读取图片出错"<<endl;
return -1;
}
imshow("srcImage1",srcImage1);
imshow("srcImage2",srcImage2);
int minHessian = 700;
SurfFeatureDetector detector(minHessian);
vector<KeyPoint>keyPoint1,keyPoint2;
detector.detect(srcImage1,keyPoint1);
detector.detect(srcImage2,keyPoint2);
//计算特征向量
SurfDescriptorExtractor extractor;
Mat descriptor1,descriptor2;
extractor.compute(srcImage1,keyPoint1,descriptor1);
extractor.compute(srcImage2,keyPoint2,descriptor2);
//实例化一个匹配器
BruteForceMatcher<L2<float> >matcher;
vector<DMatch >matches;
matcher.match(descriptor1,descriptor2,matches);
Mat imageMatches;
drawMatches(srcImage1,keyPoint1,srcImage2,keyPoint2,matches,imageMatches);
imshow("匹配图",imageMatches);
waitKey(0);
return 0;
}
二、使用flann算法实现
void DescriptorMatcher::match(
const Mat &queryDescriptors,//查询描述符集
const Mat &trainDescriptors,//训练描述符集
vector<DMatch>&matches,//匹配结果集
const Mat& mask=Mat())//指定输入查询和训练描述符允许匹配的掩摸
void DescriptorMatcher::match(
const Mat &queryDescriptors,//查询描述符集
vector<DMatch>&matches,//匹配结果集
const vector<Mat>&masks = vector<Mat>())//一组掩摸,指定输入查询和训练描述符允许匹配的掩摸
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/legacy/legacy.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc,char *argv[])
{
Mat srcImage1 = imread(argv[1]);
Mat srcImage2 = imread(argv[2]);
if(!srcImage1.data||!srcImage2.data)
{
cout<<"读取图片出错"<<endl;
return -1;
}
imshow("srcImage1",srcImage1);
imshow("srcImage2",srcImage2);
int minHessian = 300;
SurfFeatureDetector detector(minHessian);
vector<KeyPoint>keyPoint1,keyPoint2;
detector.detect(srcImage1,keyPoint1);
detector.detect(srcImage2,keyPoint2);
//计算特征向量
SurfDescriptorExtractor extractor;
Mat descriptor1,descriptor2;
extractor.compute(srcImage1,keyPoint1,descriptor1);
extractor.compute(srcImage2,keyPoint2,descriptor2);
//实例化一个匹配器
FlannBasedMatcher matcher;
vector<DMatch >matches;
matcher.match(descriptor1,descriptor2,matches);
double max_dist =0,min_dist =100;
for(int i=0;i<descriptors1.rows;i++)
{
double dist = matches[i].distance;
if(dist < min_dist) min_dist = dist;
if(dist > max_dist) max_dist = dist;
}
printf("max_dist:%f\n",max_dist);
printf("min_dist:%f\n",min_dist);
vector<DMatch>good_matches;
for(int i=0;i<descriptor1.rows;i++)
{
if(matches[i].distance < 2*min_dist)
{good_matches.push_back(matches[i]);}
}
Mat imageMatches;
drawMatches(srcImage1,keyPoint1,srcImage2,keyPoint2,
good_matches,imageMatches,Scalar::all(-1),Scalar::all(-1),
vector<char>(),DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
for(int i=0;i<good_matches.size();i++)
{
printf("符合条件的匹配点[%d]特征点1:%d--特征点2:%d\n",
i,good_matches[i].queryIdx,good_matches[i].trainIdx);
}
imshow("匹配图",imageMatches);
waitKey(0);
return 0;
}
三、使用surf检测特征点,flann匹配
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc,char *argv[])
{
//载入图像并转化为灰度图
Mat trainImage = imread(argv[1]);
Mat trainImage_gray;
imshow("原始图",trainImage);
cvtColor(trainImage,trainImage_gray,COLOR_BGR2GRAY);
//检测surf关键点、提取训练图像描述符
vector<KeyPoint> train_keyPoint;
Mat trainDescriptor;
SurfFeaturDetector featureDetector(80);
featureDetector.detect(trainImage_gray,train_keyPoint);
SurfDescriptorExtractor featureExtractor;
featureExtractor.compute(trainImage_gray,train_keyPoint,trainDescriptor);
//创建基于FLANN的描述符匹配对象
FlannBaseMatcher matcher;
vector<Mat>train_desc_collection(1,trainDescriptor);
matcher,add(train_desc_collection);
matcher.train();
//创建视频对象、定义频率
VideoCapture cap(0);
unsigned int frameCount = 0;
//循环处理,直到按下q键
while(char(waitKey(1)) != 'q')
{
int64 time0 = getTickCount();
Mat testImage,testImage_gray;
cap >> testImage;
if(testImage.empty())
continue;
cvtColor(testImage,testImage_gray,COLOR_BGR2GRAY);
vector<KeyPoint> test_keyPoint;
Mat testDescriptor;
featureDetector.detect(testImage_gray,test_keyPoint);
featureExtractor.compute(testImage_gray,test_keyPoint,testDescriptor);
//匹配训练和测试描述符
vector<vector<DMatch> >matches;
matcher.knnMatch(testDescriptor,matches,2);
//根据劳氏算法,得到优秀的匹配点
vector<DMatch>goodMatches;
for(unsigned int i=0;i<matches,size();i++)
{
if(matches[i][0].distance < 0.6*matches[i][1].distance)
goodMatches.push_back(matches[i][0])
}
//绘制匹配点并显示窗口
Mat dstImage;
drawMatches(testImage,test_keyPoint,trainImage,
train_keyPoint,goodMatches,dstImage);
imshow("匹配窗口",dstImage);
cout<<"当前频率为:"<<getTickFrequency()/(getTickCount-time0)<<endl;
}
return 0;
}
四、SIFT算法暴力匹配(比SURF算法慢3倍)
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc,char *argv[])
{
//载入图像并转化为灰度图
Mat trainImage = imread(argv[1]);
Mat trainImage_gray;
imshow("原始图",trainImage);
cvtColor(trainImage,trainImage_gray,COLOR_BGR2GRAY);
//检测surf关键点、提取训练图像描述符
vector<KeyPoint> train_keyPoint;
Mat trainDescriptor;
SiftFeaturDetector featureDetector(80);
featureDetector.detect(trainImage_gray,train_keyPoint);
SiftDescriptorExtractor featureExtractor;
featureExtractor.compute(trainImage_gray,train_keyPoint,trainDescriptor);
//创建基于FLANN的描述符匹配对象
BFMatcher matcher;
vector<Mat>train_desc_collection(1,trainDescriptor);
matcher,add(train_desc_collection);
matcher.train();
//创建视频对象、定义频率
VideoCapture cap(0);
unsigned int frameCount = 0;
//循环处理,直到按下q键
while(char(waitKey(1)) != 'q')
{
int64 time0 = getTickCount();
Mat testImage,testImage_gray;
cap >> testImage;
if(testImage.empty())
continue;
cvtColor(testImage,testImage_gray,COLOR_BGR2GRAY);
vector<KeyPoint> test_keyPoint;
Mat testDescriptor;
featureDetector.detect(testImage_gray,test_keyPoint);
featureExtractor.compute(testImage_gray,test_keyPoint,testDescriptor);
//匹配训练和测试描述符
vector<vector<DMatch> >matches;
matcher.knnMatch(testDescriptor,matches,2);
//根据劳氏算法,得到优秀的匹配点
vector<DMatch>goodMatches;
for(unsigned int i=0;i<matches,size();i++)
{
if(matches[i][0].distance < 0.6*matches[i][1].distance)
goodMatches.push_back(matches[i][0])
}
//绘制匹配点并显示窗口
Mat dstImage;
drawMatches(testImage,test_keyPoint,trainImage,
train_keyPoint,goodMatches,dstImage);
imshow("匹配窗口",dstImage);
cout<<"当前频率为:"<<getTickFrequency()/(getTickCount-time0)<<endl;
}
return 0;
}
五、寻找已知物体
步骤:
(1)、使用函数findHomography寻找匹配上的关键点的变换
(2)、使用函数perspectiveTranform来映射点
1、寻找透视变换矩阵
void findHomography(
InputArray srcPoints,
InputArray dstPoints,
int method = 0,
double ransacReprojThreshold = 3,
OutputArray mask = noArray()
)
参数一:源平面上对应的点
参数二:目标平面对应的点
参数三:可选标识符
0 使用所有点的常规方法
CV_RANSAC 基于RANSAC鲁棒性的方法
CV_LMEDS 最小中值鲁棒性方法
参数四:取值范围1-10,处理点对为内层时,允许重投影误差的最大值。
参数五:可选掩摸,输入掩摸值会忽略鲁棒性
2、进行透视矩阵变换
void perspectiveTransform(InputArray src,InputArray dst,OutputArray m)
参数一:输入图像
参数二:输出结果
参数三:变换矩阵3*3或4*4