The codes is about how to use the matches for orb the solve the pnp problem with OpenCV 3.2.0
#include<iostream>
using namespace std;
// OpenCV 特征检测模块
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
typedef pcl::PointXYZRGBA PointT;
typedef pcl::PointCloud<PointT> PointCloud;
// 相机内参结构
struct CAMERA_INTRINSIC_PARAMETERS
{
double cx, cy, fx, fy, scale;
};
PointCloud::Ptr image2PointCloud( cv::Mat& rgb, cv::Mat& depth, CAMERA_INTRINSIC_PARAMETERS& camera )
{
PointCloud::Ptr cloud ( new PointCloud );
for (int m = 0; m < depth.rows; m++)
for (int n=0; n < depth.cols; n++)
{
// 获取深度图中(m,n)处的值
ushort d = depth.ptr<ushort>(m)[n];
// d 可能没有值,若如此,跳过此点
if (d == 0)
continue;
// d 存在值,则向点云增加一个点
PointT p;
// 计算这个点的空间坐标
p.z = double(d) / camera.scale;
p.x = (n - camera.cx) * p.z / camera.fx;
p.y = (m - camera.cy) * p.z / camera.fy;
// 从rgb图像中获取它的颜色
// rgb是三通道的BGR格式图,所以按下面的顺序获取颜色
p.b = rgb.ptr<uchar>(m)[n*3];
p.g = rgb.ptr<uchar>(m)[n*3+1];
p.r = rgb.ptr<uchar>(m)[n*3+2];
// 把p加入到点云中
cloud->points.push_back( p );
}
// 设置并保存点云
cloud->height = 1;
cloud->width = cloud->points.size();
cloud->is_dense = false;
return cloud;
}
cv::Point3f point2dTo3d( cv::Point3f& point, CAMERA_INTRINSIC_PARAMETERS& camera )
{
cv::Point3f p; // 3D 点
p.z = double( point.z ) / camera.scale;
p.x = ( point.x - camera.cx) * p.z / camera.fx;
p.y = ( point.y - camera.cy) * p.z / camera.fy;
return p;
}
int main( int argc, char** argv )
{
// 声明并从data文件夹里读取两个rgb与深度图
cv::Mat rgb1 = cv::imread( "../data2/1.png");
cv::Mat rgb2 = cv::imread( "../data2/2.png");
cv::Mat depth1 = cv::imread( "../data2/1.pgm", -1);
cv::Mat depth2 = cv::imread( "../data2/2.pgm", -1);
//orb
imshow("a",rgb1);
waitKey();
//-- 初始化
std::vector<KeyPoint> kp1, kp2;
Mat desp1, desp2;
//Ptr<FeatureDetector> detector = ORB::create();
//Ptr<DescriptorExtractor> descriptor = ORB::create();
// Ptr<FeatureDetector> detector = FeatureDetector::create(detector_name);
// Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create(descriptor_name);
//Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create ( "BruteForce-Hamming" );
Ptr<ORB> orb = ORB::create(500,1.2f,8,31,0,2,ORB::HARRIS_SCORE,31,20);
//-- 第一步:检测 Oriented FAST 角点位置
orb->detect ( rgb1,kp1 );
orb->detect ( rgb2,kp2 );
cout<<"1"<<endl;
//-- 第二步:根据角点位置计算 BRIEF 描述子
orb->compute ( rgb1, kp1, desp1 );
orb->compute ( rgb2, kp2, desp2 );
Mat imgShow;
drawKeypoints( rgb1, kp1, imgShow, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
imshow("ORB特征点",imgShow);
//-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
vector<DMatch> matches;
BFMatcher matcher ( NORM_HAMMING );
matcher.match ( desp1, desp2, matches );
//-- 第四步:匹配点对筛选
double min_dist=10000, max_dist=0;
//找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
for ( int i = 0; i < desp1.rows; i++ )
{
double dist = matches[i].distance;
if ( dist < min_dist ) min_dist = dist;
if ( dist > max_dist ) max_dist = dist;
}
// 仅供娱乐的写法
//min_dist = min_element( matches.begin(), matches.end(), [](const DMatch& m1, const DMatch& m2) {return m1.distance<m2.distance;} )->distance;
//max_dist = max_element( matches.begin(), matches.end(), [](const DMatch& m1, const DMatch& m2) {return m1.distance<m2.distance;} )->distance;
printf ( "-- Max dist : %f \n", max_dist );
printf ( "-- Min dist : %f \n", min_dist );
//当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
std::vector< DMatch > goodMatches;
for ( int i = 0; i < desp1.rows; i++ )
{
// if ( matches[i].distance <= max ( 2*min_dist, 30.0 ) )
if ( matches[i].distance <= 40 )
{
goodMatches.push_back ( matches[i] );
}
}
//-- 第五步:绘制匹配结果
Mat img_match;
Mat img_goodmatch;
drawMatches ( rgb1, kp1, rgb2, kp2, matches, img_match );
drawMatches ( rgb1, kp1, rgb2, kp2, goodMatches, img_goodmatch );
imshow ( "所有匹配点对", img_match );
imshow ( "优化后匹配点对", img_goodmatch );
waitKey(0);
cout<<goodMatches.size()<<endl;
// 计算图像间的运动关系
// 关键函数:cv::solvePnPRansac()
// 为调用此函数准备必要的参数
// 第一个帧的三维点
vector<cv::Point3f> pts_obj;
// 第二个帧的图像点
vector< cv::Point2f > pts_img;
// 相机内参
CAMERA_INTRINSIC_PARAMETERS C;
C.cx = 325.5;
C.cy = 253.5;
C.fx = 518.0;
C.fy = 519.0;
C.scale = 1000.0;
for (size_t i=0; i<goodMatches.size(); i++)
{
// query 是第一个, train 是第二个
cv::Point2f p = kp1[goodMatches[i].queryIdx].pt;
// 获取d是要小心!x是向右的,y是向下的,所以y才是行,x是列!
ushort d = depth1.ptr<ushort>( int(p.y) )[ int(p.x) ];
if (d == 0)
continue;
pts_img.push_back( cv::Point2f( kp2[goodMatches[i].trainIdx].pt ) );
// 将(u,v,d)转成(x,y,z)
cv::Point3f pt ( p.x, p.y, d );
cv::Point3f pd = point2dTo3d( pt, C );
pts_obj.push_back( pd );
}
double camera_matrix_data[3][3] = {
{C.fx, 0, C.cx},
{0, C.fy, C.cy},
{0, 0, 1}
};
// 构建相机矩阵
cv::Mat cameraMatrix( 3, 3, CV_64F, camera_matrix_data );
cv::Mat rvec, tvec, inliers;
// 求解pnp
// cv::solvePnPRansac( pts_obj, pts_img, cameraMatrix, cv::Mat(), rvec, tvec, false, 100, 4.0, 0.99, inliers );
solvePnP(pts_obj,pts_img,cameraMatrix,Mat(),rvec,tvec,false,cv::SOLVEPNP_EPNP);
cout<<"2"<<endl;
cout<<"inliers: "<<inliers.rows<<endl;
cout<<"R="<<rvec<<endl;
cout<<"t="<<tvec<<endl;
// 画出inliers匹配
vector< cv::DMatch > matchesShow;
for (size_t i=0; i<inliers.rows; i++)
{
matchesShow.push_back( goodMatches[inliers.ptr<int>(i)[0]] );
}
Mat imgMatches;
cv::drawMatches( rgb1, kp1, rgb2, kp2, matchesShow,imgMatches );
cv::imshow( "inlier matches", imgMatches );
cv::imwrite( "./data/inliers.png", imgMatches );
cv::waitKey( 0 );
return 0;
}