OPENCV_ENABLE_NONFREE option in CMak
记得查看一下python路径,千万别选上了anaconda的,会有一些问题,
和以前有点不一样
error: (-213:The function/feature is not implemented) This algorithm is patented and is excluded in this configuration; Set OPENCV_ENABLE_NONFREE CMake option and rebuild the library in function 'create'
//opencv3.4.8
//https://zhuanlan.zhihu.com/p/48854990
//OPENCV_EXTRA_MODULES_PATH=<opencv_contrib>/modules
//OPENCV_ENABLE_NONFREE=ON <opencv_main>
cmake_minimum_required(VERSION 3.12)
project(ASIFT)
set(CMAKE_CXX_STANDARD 14)
set(OpenCV_DIR "/media/spple/新加卷/Dataset/opencv-3.4.8/install/share/OpenCV/")
find_package(OpenCV REQUIRED)
INCLUDE_DIRECTORIES(
${OpenCV_INCLUDE_DIRS}
)
add_executable(ASIFT main.cpp ASiftDetector.h ASiftDetector.cpp)
target_link_libraries(ASIFT ${OpenCV_LIBS})
#include <iostream>
#include <vector>
#include <cmath>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/xfeatures2d.hpp>
#include <opencv2/flann/flann.hpp>
using namespace std;
using namespace cv;
int main() {
//opencv3.4.8
//https://zhuanlan.zhihu.com/p/48854990
//OPENCV_EXTRA_MODULES_PATH=<opencv_contrib>/modules
//OPENCV_ENABLE_NONFREE=ON <opencv_main>
cv::Mat intrinsics = (cv::Mat_<float>(3, 3) << 2269.16, 0, 1065.54, 0, 2268.4, 799.032, 0, 0, 1);
cv::Mat OriginalGrayImage = cv::imread("/home/spple/CLionProjects/ASift_ICP/11.14/P1000965.JPG", -1);
cv::Mat OriginalDepthImage = cv::imread("/home/spple/CLionProjects/ASift_ICP/11.14/11_IMG_DepthMap.tif", -1);
cv::Mat targetGrayImage = cv::imread("/home/spple/CLionProjects/ASift_ICP/11.14/P1000966.JPG", -1);
cv::Mat targetDepthImage = cv::imread("/home/spple/CLionProjects/ASift_ICP/11.14/11_IMG_DepthMap.tif", -1);
// typedef SIFT cv::xfeatures2d::SiftDescriptorExtractor
// typedef SIFT cv::xfeatures2d::SiftFeatureDetector
// typedef SURF cv::xfeatures2d::SurfDescriptorExtractor
// typedef SURF cv::xfeatures2d::SurfFeatureDetector
//Ptr<Feature2D> sift = xfeatures2d::SIFT::create();
//创建检测器
cv::Ptr<cv::xfeatures2d::SiftFeatureDetector> detector = cv::xfeatures2d::SiftFeatureDetector::create();
// cv::Ptr<SurfFeatureDetector> detector = SurfFeatureDetector::create(100);//surf
//提取特征点、计算描述子
vector<cv::KeyPoint> key_points_1, key_points_2;
cv::Mat dstImage1, dstImage2;
detector->detectAndCompute(OriginalGrayImage, Mat(), key_points_1, dstImage1);
detector->detectAndCompute(targetGrayImage, Mat(), key_points_2, dstImage2);
//绘制特征点
Mat img_key_points_1, img_key_points_2;
drawKeypoints(OriginalGrayImage, key_points_1, img_key_points_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
drawKeypoints(targetGrayImage, key_points_2, img_key_points_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
//匹配
// - `BruteForce` (it uses L2 )
// - `BruteForce-L1`
// - `BruteForce-Hamming`
// - `BruteForce-Hamming(2)`
// - `FlannBased`
cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("FlannBased");
vector<cv::DMatch> matches;
matcher->match(dstImage1, dstImage2, matches);
// //计算匹配距离筛选阈值
// double Max_dist = 0;
// double Min_dist = 100;
//
// for (int i = 0; i<dstImage1.rows; i++)
// {
// double dist = mach[i].distance;
// if (dist < Min_dist)Min_dist = dist;
// if (dist > Max_dist)Max_dist = dist;
// }
// std::cout << "最短距离" << Min_dist << std::endl;
// std::cout << "最长距离" << Max_dist << std::endl;
//
// //筛选匹配点
// vector<cv::DMatch>goodmaches;
// for (int i = 0; i < dstImage1.rows; i++)
// {
// if (mach[i].distance < 2* Min_dist)
// {
// goodmaches.push_back(mach[i]);
// }
// }
//
// // 画出匹配图
// Mat imageMatches;
// drawMatches(OriginalGrayImage, key_points_1, targetGrayImage, key_points_2, goodmaches,
// imageMatches, Scalar(0, 0, 255));
//
//
// imshow("",imageMatches);
// waitKey();
// std::cout << "Hello, World!" << std::endl;
// return 0;
//绘制匹配出的关键点
Mat img_matches;
drawMatches(OriginalGrayImage, key_points_1, targetGrayImage, key_points_2, matches, img_matches);
//imshow("Match image",img_matches);
//计算匹配结果中距离最大和距离最小值
double min_dist = matches[0].distance, max_dist = matches[0].distance;
for (int m = 0; m < matches.size(); m++)
{
if (matches[m].distance<min_dist)
{
min_dist = matches[m].distance;
}
if (matches[m].distance>max_dist)
{
max_dist = matches[m].distance;
}
}
cout << "min dist=" << min_dist << endl;
cout << "max dist=" << max_dist << endl;
//筛选出较好的匹配点
vector<DMatch> goodMatches;
for (int m = 0; m < matches.size(); m++)
{
if (matches[m].distance < 0.6*max_dist)
{
goodMatches.push_back(matches[m]);
}
}
cout << "The number of good matches:" <<goodMatches.size()<< endl;
//画出匹配结果
Mat img_out;
//红色连接的是匹配的特征点数,绿色连接的是未匹配的特征点数
//matchColor – Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1) , the color is generated randomly.
//singlePointColor – Color of single keypoints(circles), which means that keypoints do not have the matches.If singlePointColor == Scalar::all(-1), the color is generated randomly.
//CV_RGB(0, 255, 0)存储顺序为R-G-B,表示绿色
drawMatches(OriginalGrayImage, key_points_1, targetGrayImage, key_points_2, goodMatches, img_out, Scalar::all(-1), CV_RGB(0, 0, 255), Mat(), 2);
imshow("good Matches",img_out);
//RANSAC匹配过程
vector<DMatch> m_Matches;
m_Matches = goodMatches;
int ptCount = goodMatches.size();
if (ptCount < 100)
{
cout << "Don't find enough match points" << endl;
return 0;
}
//坐标转换为float类型
vector <KeyPoint> RAN_KP1, RAN_KP2;
//size_t是标准C库中定义的,应为unsigned int,在64位系统中为long unsigned int,在C++中为了适应不同的平台,增加可移植性。
for (size_t i = 0; i < m_Matches.size(); i++)
{
RAN_KP1.push_back(key_points_1[goodMatches[i].queryIdx]);
RAN_KP2.push_back(key_points_2[goodMatches[i].trainIdx]);
//RAN_KP1是要存储img01中能与img02匹配的点
//goodMatches存储了这些匹配点对的img01和img02的索引值
}
//坐标变换
vector <Point2f> p01, p02;
for (size_t i = 0; i < m_Matches.size(); i++)
{
p01.push_back(RAN_KP1[i].pt);
p02.push_back(RAN_KP2[i].pt);
}
/*vector <Point2f> img1_corners(4);
img1_corners[0] = Point(0,0);
img1_corners[1] = Point(OriginalGrayImage.cols,0);
img1_corners[2] = Point(OriginalGrayImage.cols, OriginalGrayImage.rows);
img1_corners[3] = Point(0, OriginalGrayImage.rows);
vector <Point2f> img2_corners(4);*/
求转换矩阵
//Mat m_homography;
//vector<uchar> m;
//m_homography = findHomography(p01, p02, RANSAC);//寻找匹配图像
//求基础矩阵 Fundamental,3*3的基础矩阵
vector<uchar> RansacStatus;
Mat Fundamental = findFundamentalMat(p01, p02, RansacStatus, FM_RANSAC);
//重新定义关键点RR_KP和RR_matches来存储新的关键点和基础矩阵,通过RansacStatus来删除误匹配点
vector <KeyPoint> RR_KP1, RR_KP2;
vector <DMatch> RR_matches;
int index = 0;
for (size_t i = 0; i < m_Matches.size(); i++)
{
if (RansacStatus[i] != 0)
{
RR_KP1.push_back(RAN_KP1[i]);
RR_KP2.push_back(RAN_KP2[i]);
m_Matches[i].queryIdx = index;
m_Matches[i].trainIdx = index;
RR_matches.push_back(m_Matches[i]);
index++;
}
}
cout << "RANSAC后匹配点数" <<RR_matches.size()<< endl;
Mat img_RR_matches;
drawMatches(OriginalGrayImage, RR_KP1, targetGrayImage, RR_KP2, RR_matches, img_RR_matches);
imshow("After RANSAC",img_RR_matches);
//等待任意按键按下
waitKey(0);
return 0;
}