OpenCV之特征点模板匹配
版权声明:博文为博主原创文章,转载请注明出处 :https://blog.csdn.net/ganbelieve/article/details/89959505
介绍一下OpenCV中对于模板匹配特征点的一些应用实例
一、使用xfeatures2d模块进行特征点的匹配
OpenCv提供有几个特征点算子匹配,其中最初了解到SIFT和SUFT算子的特征匹配
而SIFT算子匹配的性能没有SURF性能好,故下面介绍的程序使用SURF进行实现,其他的具体关于特征点匹配的问题可以自行网上google
输入的图像源
需要通过特征点进行搜索匹配图像源的图片
然后进行代码的demo测试
//特征点匹配以及图片匹配的查找
#include<opencv2/opencv.hpp>
#include<opencv2/xfeatures2d.hpp>
#include<opencv2/features2d.hpp>
#include<iostream>
using namespace std;
using namespace cv;
using namespace cv::xfeatures2d;
Mat Flannfeaturecompare(Mat srcImage1, Mat srcImage2)
{
int hessPara = 400;
Ptr<SURF> detector = SURF::create(hessPara);
vector<KeyPoint> keypoints1,keypoints2;
Mat descriptors1,descriptors2;
detector->detectAndCompute(srcImage1, Mat(), keypoints1, descriptors1);
detector->detectAndCompute(srcImage2, Mat(), keypoints2, descriptors2);
FlannBasedMatcher matcher;
//FlannBasedMatcher matcher(new flann::LshIndexParams(20, 10, 2));
vector<DMatch> matches;
//BFMatcher matcher;
matcher.match(descriptors1, descriptors2, matches);
double max_dist = 0;
double min_dist = 1000;
//距离判断--最优匹配点
for (int i = 0; i < descriptors1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
cout << "max_dist=" << max_dist << endl << "min_dist=" << min_dist <<endl;
//最佳匹配点
vector<DMatch> matchVec;
for (int i = 0; i < descriptors1.rows; i++)
{
if (matches[i].distance < 5*min_dist)
{
matchVec.push_back(matches[i]); //push_back 将满足条件的值赋值给matchVec数组
}
}
Mat matchMat, matchMat2;
drawMatches(srcImage1, keypoints1, srcImage2, keypoints2, matchVec, matchMat, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//imshow("matchMat", matchMat);
//特征点一致性检测
vector<Point2f> obj, scene;
for (int i = 0; i < matchVec.size(); i++)
{
obj.push_back(keypoints1[matchVec[i].queryIdx].pt); //为前图进行特征点索引 pt代表point(pt.x,pt.y);
scene.push_back(keypoints2[matchVec[i].trainIdx].pt); //为模板进行特征点索引
}
Mat H = findHomography(obj, scene, CV_RANSAC); //随机点匹配
vector<Point2f> objCorner(4),sceneCors(4);
objCorner[0] = Point(0, 0);
objCorner[1] = Point(srcImage1.cols, 0);
objCorner[2] = Point(srcImage1.cols, srcImage1.rows);
objCorner[3] = Point(0, srcImage1.rows);
perspectiveTransform(objCorner, sceneCors, H); //映射矩阵
Point2f offset((float)srcImage1.cols, 0); //偏移量的增加
//line(matchMat, sceneCors[0] + offset, sceneCors[1] + offset, Scalar(0, 255, 0), 2);
//line(matchMat, sceneCors[1] + offset, sceneCors[2] + offset, Scalar(0, 255, 0), 2);
//line(matchMat, sceneCors[2] + offset, sceneCors[3] + offset, Scalar(0, 255, 0), 2);
//line(matchMat, sceneCors[3] + offset, sceneCors[0] + offset, Scalar(0, 255, 0), 2);
float min_x = 1000, min_y = 1000;
float max_x = 0, max_y = 0;
for (int i = 0; i < 4; i++)
{
if (sceneCors[i].x < min_x)
min_x = sceneCors[i].x;
if (sceneCors[i].y < min_y)
min_y = sceneCors[i].y;
for(int j=i;j<3;j++)
{
float max_dis_x=abs(sceneCors[i].x - sceneCors[j + 1].x);
if (max_dis_x > max_x)
max_x = max_dis_x;
float max_dis_y = abs(sceneCors[i].y - sceneCors[j + 1].y);
if (max_dis_y > max_y)
max_y = max_dis_y;
}
}
//通过两张图片进行对比特征点匹配
//添加偏移量区域图的横向坐标偏移
rectangle(matchMat, Rect(min_x+srcImage1.cols, min_y, max_x, max_y), Scalar(0, 0, 255), 2, 8, 0);
Mat dst = srcImage2.clone();
rectangle(dst,
Rect(sceneCors[0].x, sceneCors[0].y, sceneCors[1].x - sceneCors[0].x, sceneCors[3].y - sceneCors[0].y),
Scalar(0, 0, 255), 2, 8, 0);
imshow("ObjectMat", matchMat);
imshow("dst", dst);
return matchMat;
}
int main()
{
Mat Image1 = imread("E:/Images/feature1.jpg");
Mat Image2 = imread("E:/Images/feature.jpg");
//Mat Image1 = imread("E:/Images/cap.jpg"); //两张图片进行对比 不怎么精确
//Mat Image1 = imread("E:/Images/module1.jpg"); //截图后进行对比查找
//Mat Image2 = imread("E:/Images/module.jpg");
//resize(Image1, Image1, Size(), 0.05, 0.05);
//resize(Image2, Image2, Size(), 0.25, 0.25);
Mat feature = Flannfeaturecompare(Image1, Image2);
//imshow("feature", feature);
waitKey(0);
return 0;
}
最后实现的结果为
表示通过特征点进行匹配到了,最后可以通过这些特征点实现原图的框选
好了,程序已经测试成功了!!!
二、使用OpenCV提供的matchTemplate()API进行模板匹配
原始图为
模板图为
代码实现如下
//模板匹配进行图像查找 使用灰度图进行操作时间更短
#include<iostream>
#include<opencv2/opencv.hpp>
using namespace std;
using namespace cv;
void match(Mat &Image, Mat &templateImage, int method)
{
int result_cols = Image.cols - templateImage.cols + 1;
int result_rows = Image.rows - templateImage.rows + 1;
Mat result = Mat(result_cols, result_rows, CV_32FC1);
matchTemplate(Image, templateImage, result, method);
normalize(result, result, 0, 1, NORM_MINMAX);
double minVal, maxVal;
Point minLoc, maxLoc, matchLoc;
minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc, Mat());
switch (method)
{
case CV_TM_SQDIFF:
case CV_TM_SQDIFF_NORMED:
matchLoc = minLoc;
break;
default:
matchLoc = maxLoc;
break;
}
rectangle(Image, Rect(matchLoc, Size(templateImage.cols, templateImage.rows)), Scalar(0, 0, 255), 2, 8, 0);
imshow("Image", Image);
}
int main()
{
clock_t cstart, cend;
Mat Image = imread("E:/Images/feature.jpg"); //灰度图需要的识别时间更短
Mat templateImage = imread("E:/Images/feature1.jpg");
imshow("Image", Image);
imshow("temp", templateImage);
cstart = clock();
match(Image, templateImage, CV_TM_SQDIFF);
cend = clock();
cout << "CV_TM_SQDIFF=" << cend - cstart << "ms" << endl;
//cstart = clock();
//match(Image, templateImage, CV_TM_SQDIFF_NORMED);
//cend = clock();
//cout << "CV_TM_SQDIFF_NORMED=" << cend - cstart << "ms" << endl;
//cstart = clock();
//match(Image, templateImage, CV_TM_CCOEFF);
//cend = clock();
//cout << "CV_TM_CCOEFF=" << cend - cstart << "ms" << endl;
//cstart = clock();
//match(Image, templateImage, CV_TM_CCOEFF_NORMED);
//cend = clock();
//cout << "CV_TM_CCOEFF_NORMED=" << cend - cstart << "ms" << endl;
//cstart = clock();
//match(Image, templateImage, CV_TM_CCORR);
//cend = clock();
//cout << "CV_TM_CCORR=" << cend - cstart << "ms" << endl;
//cstart = clock();
//match(Image, templateImage, CV_TM_CCORR_NORMED);
//cend = clock();
//cout << "CV_TM_CCORR_NORMED=" << cend - cstart << "ms" << endl;
waitKey(0);
return 0;
}
最后运行结果得到的图像为
结果如己所料!!!
注:模板匹配的代码有部分没用上的,主要是用于测试模板匹配的函数中的参数实现时间上的长短与精确度的问题
两种方法优缺点:
这两种方法各有所长,简单来说SURF稍微复杂一点,而模板匹配通常简单点,但处理复杂的图像集一般会考虑特征点匹配,因为模板匹配在复杂度高的图片集上运行时间会更长一点,它是通过逐点扫描的,而特征点是通过收集图像集中的各个能代表图像的所有特征点集
欢迎指正批评!!!
欢迎关注微信公众号–木木夕算法笔记,与博主交流!
公众号会时不时更新有趣的图像算法哦!!!