直接上代码吧
#include <iostream>
#include "opencv2/opencv.hpp"
#include"opencv2/core/core.hpp"
#include <opencv2/features2d/features2d.hpp>
bool siftPointsDetect(std::string imgName1, std::string imgName2, int pointNum)
{
cv::Mat img1, img2;
img1 = cv::imread(imgName1);
img2 = cv::imread(imgName2);
if (img1.empty() || img2.empty()) {
std::cout << "error: cannot reading image: " << std::endl;
return false;
}
// sift特征提取
std::vector<cv::KeyPoint> keyPoint1, keyPoint2;
cv::Ptr<cv::Feature2D> feature = cv::SIFT::create(pointNum); // 提取pointNum个特征点
feature->detect(img1, keyPoint1);
feature->detect(img2, keyPoint2);
std::cout << "detect feature ok\n";
cv::Mat descor1, descor2;
feature->compute(img1, keyPoint1, descor1);
feature->compute(img2, keyPoint2, descor2);
std::cout << "generate descor ok\n";
//绘制特征点(关键点)
cv::Mat feature_pic1, feature_pic2;
cv::drawKeypoints(img1, keyPoint1, feature_pic1, cv::Scalar(0, 255, 0), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
cv::drawKeypoints(img2, keyPoint1, feature_pic2, cv::Scalar(0, 255, 0), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
//显示结果
cv::imwrite("feature1.jpg", feature_pic1);
cv::imwrite("feature2.jpg", feature_pic2);
cv::FlannBasedMatcher matcher; //实例化FLANN匹配器
std::vector<cv::DMatch>matches; //定义匹配结果变量
matcher.match(descor1, descor2, matches); //实现描述符之间的匹配
std::cout << "original match numbers: " << matches.size() << std::endl;
cv::Mat oriMatchRes;
cv::drawMatches(img1, keyPoint1, img2, keyPoint2, matches, oriMatchRes,
cv::Scalar(0, 255, 0), cv::Scalar::all(-1));
cv::imwrite("oriMatchResult.jpg", oriMatchRes);
double sum = 0;
double max_dist = 0;
double min_dist = 100;
for (int i = 0; i<matches.size(); i++)
{
double dist = matches[i].distance;
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
std::cout << "max distance: " << max_dist << std::endl;
std::cout << "min distance: " << min_dist << std::endl;
//筛选出较好的匹配点
std::vector<cv::DMatch> goodMatches;
double dThreshold = 0.5; //匹配的阈值,越大匹配的点数越多
for (int i = 0; i<matches.size(); i++) {
if (matches[i].distance < dThreshold * max_dist) {
goodMatches.push_back(matches[i]);
}
}
//RANSAC 消除误匹配特征点 主要分为三个部分:
//1)根据matches将特征点对齐,将坐标转换为float类型
//2)使用求基础矩阵方法findFundamentalMat,得到RansacStatus
//3)根据RansacStatus来将误匹配的点也即RansacStatus[i]=0的点删除
//根据matches将特征点对齐,将坐标转换为float类型
std::vector<cv::KeyPoint> R_keypoint01, R_keypoint02;
for (int i = 0; i<goodMatches.size(); i++) {
R_keypoint01.push_back(keyPoint1[goodMatches[i].queryIdx]);
R_keypoint02.push_back(keyPoint2[goodMatches[i].trainIdx]);
// 这两句话的理解:R_keypoint1是要存储img01中能与img02匹配的特征点,
// matches中存储了这些匹配点对的img01和img02的索引值
}
//坐标转换
std::vector<cv::Point2f> p01, p02;
for (int i = 0; i<goodMatches.size(); i++) {
p01.push_back(R_keypoint01[i].pt);
p02.push_back(R_keypoint02[i].pt);
}
//计算基础矩阵并剔除误匹配点
std::vector<uchar> RansacStatus;
cv::Mat Fundamental = findHomography(p01, p02, RansacStatus, CV_RANSAC);
cv::Mat dst;
warpPerspective(img1, dst, Fundamental, cv::Size(img1.cols, img1.rows));
cv::imwrite("epipolarImage.jpg", dst); // 核线影像
// 剔除误匹配的点对
std::vector<cv::KeyPoint> RR_keypoint01, RR_keypoint02;
// 重新定义RR_keypoint 和RR_matches来存储新的关键点和匹配矩阵
std::vector<cv::DMatch> RR_matches;
int index = 0;
for (int i = 0; i<goodMatches.size(); i++) {
if (RansacStatus[i] != 0) {
RR_keypoint01.push_back(R_keypoint01[i]);
RR_keypoint02.push_back(R_keypoint02[i]);
goodMatches[i].queryIdx = index;
goodMatches[i].trainIdx = index;
RR_matches.push_back(goodMatches[i]);
index++;
}
}
std::cout << "refine match pairs: " << RR_matches.size() << std::endl;
// 画出消除误匹配后的图
cv::Mat img_RR_matches;
cv::drawMatches(img1, RR_keypoint01, img2, RR_keypoint02, RR_matches, img_RR_matches,
cv::Scalar(0, 255, 0), cv::Scalar::all(-1));
cv::imwrite("refineMatchResult.jpg", img_RR_matches);
return true;
}
main 函数如下:
int main(int argc, char *argv[])
{
if (argc < 3) {
std::cout << "error: little parameters\n";
return -1;
}
bool bOk = false;
std::string path = argv[1];
std::string imgName1 = path + argv[2];
std::string imgName2 = path + argv[3];
int detectPointNum = atoi(argv[4]);
if (detectPointNum < 5)
detectPointNum = 5000; // 默认提取5000个特征点
std::cout << "per image detect feature point number: " << detectPointNum << std::endl;
bOk = siftPointsDetect(imgName1, imgName2, detectPointNum);
return 0;
}
希望能帮到你啊