《视觉slam十四讲》初学小白笔记(8)

 对极约束

#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include "extra.h"

using namespace std;
using namespace cv;

void find_feature_matches(const Mat& img_1,const Mat& img_2,
  std::vector<KeyPoint>& keypoints_1,
  std::vector<KeyPoint>& keypoints_2,
  std::vector<DMatch>& matches)
{
  //初始化
  Mat descriptors_1,descriptors_2;				//创建描述子
  Ptr<FeatureDetector> detector = ORB::create("ORB");		//创建ORB特征点检测
  Ptr<DescriptorExtractor> descriptor = ORB::create("ORB");	//使用ORB特征来描述特征点
  Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
  
  //第一步:检测oriented FAST角点位置
  detector->detect(img_1,keypoints_1);
  detector->detect(img_2,keypoints_2);
  
  //第二步:根据角点位置计算BRIEF描述子
  descriptor->compute(img_1,keypoints_1,descriptors_1);
  descriptor->compute(img_2,keypoints_2,descriptors_2);
  
  //第三步:对两幅图像中BRIEF描述子进行匹配,使用hamming距离
  vector<DMatch>match;
  matcher->match(descriptors_1,descriptors_2,match);
  
  //第四步:匹配点对筛选
  double min_dist=10000,max_dist=0;
  for(int i=0;i<descriptors_1.rows;i++){
    double dist=match[i].distance;
    if(dist<min_dist) min_dist=dist;
    if(dist>max_dist) max_dist=dist;
  }
  
  //显示最大和最小距离
  printf("Max dist =%f\n",max_dist);
  printf("Min dist =%f\n",min_dist);
  
  // 当描述子之间的距离大于两倍的最小距离时,即认为匹配有误
  // 但有时候最小距离会非常小,设置一个经验值30作为下限
  for(int i=0;i<descriptors_1.rows;i++){
    if(match[i].distance<=max(2*min_dist,30.0)){
      matches.push_back(match[i]);
    }
  }  
}

void pose_estimation_2d2d(std::vector<KeyPoint> keypoints_1,
  std::vector<KeyPoint> keypoints_2,
  std::vector<DMatch> matches,
  Mat& R,Mat& t)
{
  //相机内参
  double cx = 325.1;  // 像素坐标系与成像平面之间的原点平移
  double cy = 249.7;
  double fx = 520.9;  // 焦距
  double fy = 521.0;
  Mat K=(Mat_<double>(3,3)<<fx,0,cx,0,fy,cy,0,0,1);
  
  //把匹配点转换为vector<Point2f>形式
  vector<Point2f> points1;
  vector<Point2f> points2;
  
  for(int i=0;i<(int)matches.size();i++){
    points1.push_back(keypoints_1[matches[i].queryIdx].pt);//queryIdx此匹配对应的查询图像的特征描述子索引 
    points2.push_back(keypoints_2[matches[i].trainIdx].pt);//trainIdx此匹配对应的训练(模板)图像的特征描述子索引 
  }
  
  //计算基础矩阵 F=K^(-T)EK^(-1)
  Mat fundamental_matrix;
  fundamental_matrix=findFundamentalMat(points1,points2,CV_FM_8POINT);
  cout<<"基础矩阵 = "<<endl<<fundamental_matrix<<endl;
  
  //计算本质矩阵 E=R^t
  Point2d principal_point(cx,cy);	//相机光心,TUM dataset标定值
  double focal_length = fy;		//相机焦距,TUM dataset标定值
  Mat essential_matrix;
  essential_matrix=findEssentialMat(points1,points2,focal_length,principal_point);
  cout<<"本质矩阵 = "<<endl<<essential_matrix<<endl;
  
  //计算单应矩阵
  Mat homography_matrix;
  homography_matrix=findHomography(points1,points2,RANSAC,3);
  //当可能存在误匹配的情况时,会更倾向于使用 随机采样一致性(Random Sample Concensus RANSAC) 来求,而不是最小二乘
  //RANSAC 是一种通用的做法,适用于很多带错误数据的情况,可以处理带有错误匹配的数据
  cout<<"单应矩阵 = "<<endl<<homography_matrix<<endl;
  
  //从本质矩阵中恢复R,t
  recoverPose(essential_matrix,points1,points2,R,t,focal_length,principal_point);
  cout<<"R is"<<endl<<R<<endl;
  cout<<"t is"<<endl<<t<<endl; 
}

//像素坐标转相机归一化坐标
Point2d pixel2cam(const Point2d& p,const Mat& K){
  return Point2d(
    (p.x-K.at<double>(0,2))/K.at<double>(0,0),	//(x-cx)/fx
    (p.y-K.at<double>(1,2))/K.at<double>(1,1)	//(y-cy)/fy 像素坐标减去平移量后除以旋转值
  );
}

int main(int argc,char** argv){
  if(argc!=3){
    cout<<"usage:pose_estimation_2d2d img_1 img_2"<<endl;
    return 1;
  }
  //读取图像
  Mat img_1=imread(argv[1],CV_LOAD_IMAGE_COLOR);
  Mat img_2=imread(argv[2],CV_LOAD_IMAGE_COLOR);
  
  //特征提取
  vector<KeyPoint> keypoints_1,keypoints_2;
  vector<DMatch> matches;
  find_feature_matches(img_1,img_2,keypoints_1,keypoints_2,matches);
  cout<<"一共找到了"<<matches.size()<<"个匹配点"<<endl;
  
  //估计两张图像间的运动
  Mat R,t;
  pose_estimation_2d2d(keypoints_1,keypoints_2,matches,R,t);
  
  //验证E=t^R*scale
  Mat t_x=(Mat_<double>(3,3)<<			//t的反对称矩阵
    0,-t.at<double>(2,0),t.at<double>(1,0),
    t.at<double>(2,0),0,-t.at<double>(0,0),
    -t.at<double>(1,0),t.at<double>(0,0),0
  );
  cout<<"t^R="<<endl<<t_x*R<<endl;
  
  //验证对极约束
  double cx = 325.1;  // 像素坐标系与成像平面之间的原点平移
  double cy = 249.7;
  double fx = 520.9;  // 焦距
  double fy = 521.0;
  Mat K = (Mat_<double>(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 1);

  //计算对极约束
  for(DMatch m:matches){
    Point2d pt1=pixel2cam(keypoints_1[m.queryIdx].pt,K);
    Mat y1=(Mat_<double>(3,1)<<pt1.x,pt1.y,1);
    
    Point2d pt2=pixel2cam(keypoints_2[m.trainIdx].pt,K);
    Mat y2=(Mat_<double>(3,1)<<pt2.x,pt2.y,1);
    
    Mat d=y2.t()*t_x*R*y1;		//近似为0
    cout<<"对极约束方程="<<d<<endl;
  }
  return 0;

}

因为使用的是opencv2,所以要另外加上extra.cc 和extra.h文件

尤其实在CMakeLists.txt中,否则会报错

add_executable( pose_estimation_2d2d pose_estimation_2d2d.cc extra.cc )
target_link_libraries( pose_estimation_2d2d ${OpenCV_LIBS} )

运行结果

对极约束方程结果近似为0

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值