《视觉slam十四讲》初学小白笔记(9)

在上一讲通过对极运动求解 R,t 值后,根据三角测量,求取两张图的空间坐标以及投影坐标还有深度值

 T1(E, 0)3行4列; T2(R, t) 3行4列;K相机内参矩阵

void triangluation(const vector<KeyPoint>& keypoint_1,
  const vector<KeyPoint>& keypoint_2,
  const std::vector<DMatch>&matches,
  const Mat& R,const Mat& t,
  vector<Point3d>& points)
{
    Mat T1=(Mat_<float>(3,4)<<
    1,0,0,0,
    0,1,0,0,
    0,0,1,0);
    Mat T2=(Mat_<float>(3,4)<<
      R.at<double>(0,0),R.at<double>(0,1),R.at<double>(0,2),t.at<double>(0,0),
      R.at<double>(1,0),R.at<double>(1,1),R.at<double>(1,2),t.at<double>(1,0),
      R.at<double>(2,0),R.at<double>(2,1),R.at<double>(2,2),t.at<double>(2,0));
    cout<<"旋转矩阵="<<T2<<endl;
    
    //相机内参
    double cx = 325.1;  // 像素坐标系与成像平面之间的原点平移
    double cy = 249.7;
    double fx = 520.9;  // 焦距
    double fy = 521.0;
    Mat K = (Mat_<double>(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 1);

    vector<Point2f>pts_1,pts_2;
    for(DMatch m:matches){
      //将像素坐标转换为相机坐标
      pts_1.push_back(pixel2cam(keypoint_1[m.queryIdx].pt,K));
      pts_2.push_back(pixel2cam(keypoint_2[m.trainIdx].pt,K));
    }
    
    //三角测量
    Mat pts_4d;
    cv::triangulatePoints(T1,T2,pts_1,pts_2,pts_4d);
    
    //转换成非齐次坐标
    for(int i=0;i<pts_4d.cols;i++){
      Mat x=pts_4d.col(i);
      x/=x.at<float>(3,0);
      Point3d p(
	x.at<float>(0,0),
	x.at<float>(1,0),
	x.at<float>(2,0)
      );
      points.push_back(p);
    }   
}

 

int main(int argc,char** argv){
  if(argc!=3){
    cout<<"usage:triangluation img1,img2"<<endl;
    return 1;
  }
  
  //读取图像
  Mat img_1=imread(argv[1],CV_LOAD_IMAGE_COLOR);
  Mat img_2=imread(argv[2],CV_LOAD_IMAGE_COLOR);
  
  //特征提取
  vector<KeyPoint> keypoint_1,keypoint_2;
  vector<DMatch> matches;
  find_feature_matches(img_1,img_2,keypoint_1,keypoint_2,matches);
  cout<<"一共找到了"<<matches.size()<<"个匹配点" <<endl;
  
  //估计两张图的运动
  Mat R,t;
  pose_estimation_2d2d(keypoint_1,keypoint_2,matches,R,t);
  
  //三角化
  vector<Point3d> points;
  triangluation(keypoint_1,keypoint_2,matches,R,t,points);
  
  //-- 验证三角化点与特征点的重投影关系
  // 相机内参,TUM Freiburg2
  double cx = 325.1;  // 像素坐标系与成像平面之间的原点平移
  double cy = 249.7;
  double fx = 520.9;  // 焦距
  double fy = 521.0;
  Mat K = (Mat_<double>(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 1);

  for(int i=0;i<matches.size();i++){
    //第一张图
    Point2d pt1_cam=pixel2cam(keypoint_1[matches[i].queryIdx].pt,K);		//相机坐标
    cout<<"第一张图点的坐标:"<<pt1_cam<<endl;
    
    //空间位置
    Point2d pt1_cam_3d(points[i].x/points[i].z,points[i].y/points[i].z);	//投影坐标
    cout<<"第一张图中点投影的3D坐标"<<pt1_cam_3d<<",深度="<<points[i].z<<endl;
    
    //第二张图
    Point2d pt2_cam=pixel2cam(keypoint_2[matches[i].trainIdx].pt,K);
    cout<<"第二张图点的坐标:"<<pt2_cam<<endl;
    
    //重投影
    Mat pt2_trans=R*(Mat_<double>(3,1)<<points[i].x,points[i].y,points[i].z)+t;
    pt2_trans/=pt2_trans.at<double>(2,0);
    cout<<"重投影后第二张图的坐标:"<<pt2_trans.t()<<endl;
    
    cout<<endl;
  }
  return 0;
}

【注】cv::triangulatePoints()函数

void triangulatePoints(InputArray projMatr1, InputArray projMatr2, InputArray projPoints1, InputArray projPoints2, OutputArray points4D);

参数:

projMatr1 – 3x4 projection matrix of the first camera.
projMatr2 – 3x4 projection matrix of the second camera.
projPoints1 – 2xN array of feature points in the first image. In case of c++ version it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
projPoints2 – 2xN array of corresponding points in the second image. In case of c++ version it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
points4D – 4xN array of reconstructed points in homogeneous coordinates.
 

运行结果

由于尺度不变性,得出的深度并不知道其单位的大小

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值