视觉SLAM十四讲CH7课后习题6,7,8

6. 在PnP 优化中,将第一个相机的观测也考虑进来,程序应如何书写?最后结果会有何变化?

代码链接:HW-of-SLAMBOOK2/p6.cpp at main · Philipcjh/HW-of-SLAMBOOK2 · GitHub

 pose_estimation_3d2d1.cpp

#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <Eigen/Core>
#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/sparse_optimizer.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/solver.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
#include <sophus/se3.hpp>
#include <chrono>

using namespace std;
using namespace cv;

void find_feature_matches(
  const Mat &img_1, const Mat &img_2,
  std::vector<KeyPoint> &keypoints_1,
  std::vector<KeyPoint> &keypoints_2,
  std::vector<DMatch> &matches);

// 像素坐标转相机归一化坐标
Point2d pixel2cam(const Point2d &p, const Mat &K);

// BA by g2o
typedef vector<Eigen::Vector2d, Eigen::aligned_allocator<Eigen::Vector2d>> VecVector2d;
typedef vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d>> VecVector3d;

//考虑相机1的观测值
void bundleAdjustmentG2O(
  const VecVector3d &points_3d,
  const VecVector2d &points1_2d,
  const VecVector2d &points2_2d,
  const Mat &K,
  Sophus::SE3d &pose
);

// BA by gauss-newton
void bundleAdjustmentGaussNewton(
  const VecVector3d &points_3d,
  const VecVector2d &points_2d,
  const Mat &K,
  Sophus::SE3d &pose
);

int main(int argc, char **argv) {
  if (argc != 5) {
    cout << "usage: pose_estimation_3d2d img1 img2 depth1 depth2" << endl;
    return 1;
  }
  //-- 读取图像
  Mat img_1 = imread(argv[1], CV_LOAD_IMAGE_COLOR);
  Mat img_2 = imread(argv[2], CV_LOAD_IMAGE_COLOR);
  assert(img_1.data && img_2.data && "Can not load images!");

  vector<KeyPoint> keypoints_1, keypoints_2;
  vector<DMatch> matches;
  find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
  cout << "一共找到了" << matches.size() << "组匹配点" << endl;

  // 建立3D点
  Mat d1 = imread(argv[3], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
  Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
  vector<Point3f> pts_3d;
  vector<Point2f> pts1_2d,pts2_2d;
  for (DMatch m:matches) {
    ushort d = d1.ptr<unsigned short>(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)];
    if (d == 0)   // bad depth
      continue;
    float dd = d / 5000.0;
    Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
    pts_3d.push_back(Point3f(p1.x * dd, p1.y * dd, dd));//第一个相机观察到的3D点坐标
    pts1_2d.push_back(keypoints_1[m.queryIdx].pt);//特征点在第一个相机中的投影
    pts2_2d.push_back(keypoints_2[m.trainIdx].pt);//特征点在第二个相机中的投影
  }

  cout << "3d-2d pairs: " << pts_3d.size() << endl;

  chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
  Mat r, t;
  solvePnP(pts_3d, pts2_2d, K, Mat(), r, t, false, SOLVEPNP_SQPNP); // 调用OpenCV 的 PnP 求解,可选择EPNP,DLS等方法
  Mat R;
  cv::Rodrigues(r, R); // r为旋转向量形式,用Rodrigues公式转换为矩阵
  chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
  chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
  cout << "solve pnp in opencv cost time: " << time_used.count() << " seconds." << endl;

  cout << "R=" << endl << R << endl;
  cout << "t=" << endl << t << endl;

  VecVector3d pts_3d_eigen;
  VecVector2d pts1_2d_eigen,pts2_2d_eigen;
  for (size_t i = 0; i < pts_3d.size(); ++i) {
    pts_3d_eigen.push_back(Eigen::Vector3d(pts_3d[i].x, pts_3d[i].y, pts_3d[i].z));
    pts1_2d_eigen.push_back(Eigen::Vector2d(pts1_2d[i].x, pts1_2d[i].y));//新增部分:求取第一个相机拍摄照片特征点的像素坐标
    pts2_2d_eigen.push_back(Eigen::Vector2d(pts2_2d[i].x, pts2_2d[i].y));
  }

  cout << "calling bundle adjustment by gauss newton" << endl;
  Sophus::SE3d pose_gn;
  t1 = chrono::steady_clock::now();
  bundleAdjustmentGaussNewton(pts_3d_eigen, pts2_2d_eigen, K, pose_gn);
  t2 = chrono::steady_clock::now();
  time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
  cout << "solve pnp by gauss newton cost time: " << time_used.count() << " seconds." << endl;

  cout << "calling bundle adjustment by g2o" << endl;
  Sophus::SE3d pose_g2o;
  t1 = chrono::steady_clock::now();
  bundleAdjustmentG2O(pts_3d_eigen, pts1_2d_eigen,pts2_2d_eigen, K, pose_g2o);
  t2 = chrono::steady_clock::now();
  time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
  cout << "solve pnp by g2o cost time: " << time_used.count() << " seconds." << endl;
  return 0;
}

void find_feature_matches(const Mat &img_1, const Mat &img_2,
                          std::vector<KeyPoint> &keypoints_1,
                          std::vector<KeyPoint> &keypoints_2,
                          std::vector<DMatch> &matches) {
  //-- 初始化
  Mat descriptors_1, descriptors_2;
  // used in OpenCV3
  Ptr<FeatureDetector> detector = ORB::create();
  Ptr<DescriptorExtractor> descriptor = ORB::create();
  // use this if you are in OpenCV2
  // Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" );
  // Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" );
  Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
  //-- 第一步:检测 Oriented FAST 角点位置
  detector->detect(img_1, keypoints_1);
  detector->detect(img_2, keypoints_2);

  //-- 第二步:根据角点位置计算 BRIEF 描述子
  descriptor->compute(img_1, keypoints_1, descriptors_1);
  descriptor->compute(img_2, keypoints_2, descriptors_2);

  //-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
  vector<DMatch> match;
  // BFMatcher matcher ( NORM_HAMMING );
  matcher->match(descriptors_1, descriptors_2, match);

  //-- 第四步:匹配点对筛选
  double min_dist = 10000, max_dist = 0;

  //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
  for (int i = 0; i < descriptors_1.rows; i++) {
    double dist = match[i].distance;
    if (dist < min_dist) min_dist = dist;
    if (dist > max_dist) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist);
  printf("-- Min dist : %f \n", min_dist);

  //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
  for (int i = 0; i < descriptors_1.rows; i++) {
    if (match[i].distance <= max(2 * min_dist, 30.0)) {
      matches.push_back(match[i]);
    }
  }
}

Point2d pixel2cam(const Point2d &p, const Mat &K) {
  return Point2d
    (
      (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
      (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
    );
}

void bundleAdjustmentGaussNewton(
  const VecVector3d &points_3d,
  const VecVector2d &points_2d,
  const Mat &K,
  Sophus::SE3d &pose) {
  typedef Eigen::Matrix<double, 6, 1> Vector6d;
  const int iterations = 10;
  double cost = 0, lastCost = 0;
  double fx = K.at<double>(0, 0);
  double fy = K.at<double>(1, 1);
  double cx = K.at<double>(0, 2);
  double cy = K.at<double>(1, 2);

  for (int iter = 0; iter < iterations; iter++) {
    Eigen::Matrix<double, 6, 6> H = Eigen::Matrix<double, 6, 6>::Zero();
    Vector6d b = Vector6d::Zero();

    cost = 0;
    // compute cost
    for (int i = 0; i < points_3d.size(); i++) {
      Eigen::Vector3d pc = pose * points_3d[i];
      double inv_z = 1.0 / pc[2];
      double inv_z2 = inv_z * inv_z;
      Eigen::Vector2d proj(fx * pc[0] / pc[2] + cx, fy * pc[1] / pc[2] + cy);

      Eigen::Vector2d e = points_2d[i] - proj;

      cost += e.squaredNorm();
      Eigen::Matrix<double, 2, 6> J;
      J << -fx * inv_z,
        0,
        fx * pc[0] * inv_z2,
        fx * pc[0] * pc[1] * inv_z2,
        -fx - fx * pc[0] * pc[0] * inv_z2,
        fx * pc[1] * inv_z,
        0,
        -fy * inv_z,
        fy * pc[1] * inv_z2,
        fy + fy * pc[1] * pc[1] * inv_z2,
        -fy * pc[0] * pc[1] * inv_z2,
        -fy * pc[0] * inv_z;

      H += J.transpose() * J;
      b += -J.transpose() * e;
    }

    Vector6d dx;
    dx = H.ldlt().solve(b);

    if (isnan(dx[0])) {
      cout << "result is nan!" << endl;
      break;
    }

    if (iter > 0 && cost >= lastCost) {
      // cost increase, update is not good
      cout << "cost: " << cost << ", last cost: " << lastCost << endl;
      break;
    }

    // update your estimation
    pose = Sophus::SE3d::exp(dx) * pose;
    lastCost = cost;

    cout << "iteration " << iter << " cost=" << std::setprecision(12) << cost << endl;
    if (dx.norm() < 1e-6) {
      // converge
      break;
    }
  }

  cout << "pose by g-n: \n" << pose.matrix() << endl;
}

/// vertex and edges used in g2o ba
class VertexPose : public g2o::BaseVertex<6, Sophus::SE3d> {
public:
  EIGEN_MAKE_ALIGNED_OPERATOR_NEW;

  virtual void setToOriginImpl() override {
    _estimate = Sophus::SE3d();
  }

  /// left multiplication on SE3
  virtual void oplusImpl(const double *update) override {
    Eigen::Matrix<double, 6, 1> update_eigen;
    update_eigen << update[0], update[1], update[2], update[3], update[4], update[5];
    _estimate = Sophus::SE3d::exp(update_eigen) * _estimate;
  }

  virtual bool read(istream &in) override {}

  virtual bool write(ostream &out) const override {}
};

//1元边,测量值维度是2,对应测量值类型为Eigen::Vector2d,顶点对应的数据类型都是VertexPose
class EdgeProjection : public g2o::BaseUnaryEdge<2, Eigen::Vector2d, VertexPose> {
public:
  EIGEN_MAKE_ALIGNED_OPERATOR_NEW;

  EdgeProjection(const Eigen::Vector3d &pos, const Eigen::Matrix3d &K) : _pos3d(pos), _K(K) {}

  virtual void computeError() override {
    const VertexPose *v = static_cast<VertexPose *> (_vertices[0]);
    Sophus::SE3d T = v->estimate();
    Eigen::Vector3d pos_pixel = _K * (T * _pos3d);
    pos_pixel /= pos_pixel[2];
    _error = _measurement - pos_pixel.head<2>();
  }

  virtual void linearizeOplus() override {
    const VertexPose *v = static_cast<VertexPose *> (_vertices[0]);
    Sophus::SE3d T = v->estimate();
    Eigen::Vector3d pos_cam = T * _pos3d;
    double fx = _K(0, 0);
    double fy = _K(1, 1);
    double cx = _K(0, 2);
    double cy = _K(1, 2);
    double X = pos_cam[0];
    double Y = pos_cam[1];
    double Z = pos_cam[2];
    double Z2 = Z * Z;
    _jacobianOplusXi
      << -fx / Z, 0, fx * X / Z2, fx * X * Y / Z2, -fx - fx * X * X / Z2, fx * Y / Z,
      0, -fy / Z, fy * Y / (Z * Z), fy + fy * Y * Y / Z2, -fy * X * Y / Z2, -fy * X / Z;
  }

  virtual bool read(istream &in) override {}

  virtual bool write(ostream &out) const override {}

private:
  Eigen::Vector3d _pos3d;
  Eigen::Matrix3d _K;
};

//新增部分:points1_2d,将第一个相机拍摄图片的特征点像素坐标传递进BA函数中计算测量量
void bundleAdjustmentG2O(
  const VecVector3d &points_3d,
  const VecVector2d &points1_2d,
  const VecVector2d &points2_2d,
  const Mat &K,
  Sophus::SE3d &pose) {

  // 构建图优化,先设定g2o
  typedef g2o::BlockSolver<g2o::BlockSolverTraits<6, 3>> BlockSolverType;  // pose is 6, landmark is 3
  typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType> LinearSolverType; // 线性求解器类型
  // 梯度下降方法,可以从GN, LM, DogLeg 中选
  auto solver = new g2o::OptimizationAlgorithmGaussNewton(
    g2o::make_unique<BlockSolverType>(g2o::make_unique<LinearSolverType>()));
  g2o::SparseOptimizer optimizer;     // 图模型
  optimizer.setAlgorithm(solver);   // 设置求解器
  optimizer.setVerbose(true);       // 打开调试输出

  // 新增部分:vertex --(0,0,0)  第一个相机的位姿此时是未知的,待优化
  VertexPose *vertex_pose0 = new VertexPose(); // camera vertex_pose
  vertex_pose0->setId(0);
  Eigen::Matrix3d R=Eigen::Matrix3d::Identity();
  Eigen::Vector3d t(0,0,0);
  Sophus::SE3d SE3_Rt(R,t);
  vertex_pose0->setEstimate(SE3_Rt);
  optimizer.addVertex(vertex_pose0);
  
  // vertex --第二个相机的位姿,待优化
  VertexPose *vertex_pose = new VertexPose(); // camera vertex_pose
  vertex_pose->setId(1);
  vertex_pose->setEstimate(Sophus::SE3d());
  optimizer.addVertex(vertex_pose);

  // K
  Eigen::Matrix3d K_eigen;
  K_eigen <<
  K.at<double>(0, 0), K.at<double>(0, 1), K.at<double>(0, 2),
  K.at<double>(1, 0), K.at<double>(1, 1), K.at<double>(1, 2),
  K.at<double>(2, 0), K.at<double>(2, 1), K.at<double>(2, 2);

  // edges
  int index = 1;
  
  //新增部分:第一个相机作为顶点连接的边
  for (size_t i = 0; i < points1_2d.size(); ++i) {
    auto p2d = points1_2d[i];
    auto p3d = points_3d[i];
    EdgeProjection *edge = new EdgeProjection(p3d, K_eigen);
    edge->setId(index);
    edge->setVertex(0, vertex_pose0);
    edge->setMeasurement(p2d);
    edge->setInformation(Eigen::Matrix2d::Identity());
    optimizer.addEdge(edge);
    index++;
  }
  
  //第二个相机作为顶点连接的边
  for (size_t i = 0; i < points2_2d.size(); ++i) {
    auto p2d = points2_2d[i];
    auto p3d = points_3d[i];
    EdgeProjection *edge = new EdgeProjection(p3d, K_eigen);
    edge->setId(index);
    edge->setVertex(0, vertex_pose);
    edge->setMeasurement(p2d);
    edge->setInformation(Eigen::Matrix2d::Identity());
    optimizer.addEdge(edge);
    index++;
  }

  chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
  optimizer.setVerbose(true);
  optimizer.initializeOptimization();
  optimizer.optimize(10);
  chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
  chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
  cout << "optimization costs time: " << time_used.count() << " seconds." << endl;
  cout << "pose estimated of camera1 by g2o =\n" << vertex_pose0->estimate().matrix() << endl;
  cout<<"********************************************************************************************"<<endl;
  cout << "pose estimated of camera2 by g2o =\n" << vertex_pose->estimate().matrix() << endl;
  cout<<"********************************************************************************************"<<endl;
  pose = vertex_pose0->estimate().inverse()*vertex_pose->estimate();//此时待求的两个相机之间的相对位姿
  cout << "pose estimated by g2o =\n" << pose.matrix() << endl;
  cout<<"********************************************************************************************"<<endl;
}


CMakeLists.txt

cmake_minimum_required(VERSION 2.8)
project(ch7_1)

set(CMAKE_BUILD_TYPE "Release")
add_definitions("-DENABLE_SSE")
#set(CMAKE_CXX_FLAGS "-std=c++11 -O2 ${SSE_FLAGS} -msse4")
set(CMAKE_CXX_FLAGS "-std=c++14 -O2 ${SSE_FLAGS} -msse4") 

list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)

find_package(OpenCV 3 REQUIRED)
find_package(G2O REQUIRED)
find_package(Sophus REQUIRED)

include_directories(
        ${OpenCV_INCLUDE_DIRS}
        ${G2O_INCLUDE_DIRS}
        ${Sophus_INCLUDE_DIRS}
        "/usr/local/include/eigen3/"
       
)


add_executable(pose_estimation_3d2d1 pose_estimation_3d2d1.cpp)
target_link_libraries(pose_estimation_3d2d1
        g2o_core g2o_stuff
        ${OpenCV_LIBS} fmt)
     
      
        

执行结果:
 

./pose_estimation_3d2d1 1.png 2.png 1_depth.png 2_depth.png 
-- Max dist : 94.000000 
-- Min dist : 4.000000 
一共找到了79组匹配点
3d-2d pairs: 75
solve pnp in opencv cost time: 0.000222851 seconds.
R=
[0.9978421994904717, -0.05082794169150364, 0.04156278695447151;
 0.04960871746500106, 0.99832231927417, 0.02985836550315016;
 -0.04301069712868395, -0.02773206055177137, 0.9986896478636682]
t=
[-0.1293079832092879;
 -0.0112975606796175;
 0.05863792991603023]
calling bundle adjustment by gauss newton
iteration 0 cost=40517.7576706
iteration 1 cost=410.547029116
iteration 2 cost=299.76468142
iteration 3 cost=299.763574327
pose by g-n: 
   0.997905909549  -0.0509194008562   0.0398874705187   -0.126782139096
   0.049818662505    0.998362315745   0.0281209417649 -0.00843949683874
 -0.0412540489424  -0.0260749135374    0.998808391199   0.0603493575229
                0                 0                 0                 1
solve pnp by gauss newton cost time: 0.000229104 seconds.
calling bundle adjustment by g2o
iteration= 0	 chi2= 410.547029	 time= 5.9878e-05	 cumTime= 5.9878e-05	 edges= 150	 schur= 0
iteration= 1	 chi2= 299.764681	 time= 3.1583e-05	 cumTime= 9.1461e-05	 edges= 150	 schur= 0
iteration= 2	 chi2= 299.763574	 time= 2.9588e-05	 cumTime= 0.000121049	 edges= 150	 schur= 0
iteration= 3	 chi2= 299.763574	 time= 2.8997e-05	 cumTime= 0.000150046	 edges= 150	 schur= 0
iteration= 4	 chi2= 299.763574	 time= 2.8867e-05	 cumTime= 0.000178913	 edges= 150	 schur= 0
iteration= 5	 chi2= 299.763574	 time= 2.8827e-05	 cumTime= 0.00020774	 edges= 150	 schur= 0
iteration= 6	 chi2= 299.763574	 time= 2.8887e-05	 cumTime= 0.000236627	 edges= 150	 schur= 0
iteration= 7	 chi2= 299.763574	 time= 2.8927e-05	 cumTime= 0.000265554	 edges= 150	 schur= 0
iteration= 8	 chi2= 299.763574	 time= 2.8928e-05	 cumTime= 0.000294482	 edges= 150	 schur= 0
iteration= 9	 chi2= 299.763574	 time= 2.9168e-05	 cumTime= 0.00032365	 edges= 150	 schur= 0
optimization costs time: 0.001348107 seconds.
pose estimated of camera1 by g2o =
                 1 -8.30471484567e-10   1.3677928812e-09  -3.5822879406e-09
 8.30471481332e-10                  1  2.36527096186e-09 -4.11344716813e-09
-1.36779288316e-09 -2.36527096072e-09                  1 -6.89559171071e-10
                 0                  0                  0                  1
********************************************************************************************
pose estimated of camera2 by g2o =
    0.99790590955  -0.0509194008911   0.0398874704367   -0.126782138956
  0.0498186625425    0.998362315744   0.0281209417542 -0.00843949681823
 -0.0412540488609  -0.0260749135293    0.998808391203   0.0603493574888
                0                 0                 0                 1
********************************************************************************************
pose estimated by g2o =
   0.997905909648  -0.0509194000263   0.0398874690938   -0.126782135463
  0.0498186618114    0.998362315848   0.0281209393586 -0.00843949274224
 -0.0412540473782  -0.0260749112375    0.998808391324    0.060349357985
                0                 0                 0                 1
********************************************************************************************
solve pnp by g2o cost time: 0.001882344 seconds.

 

7. 在ICP 程序中,将空间点也作为优化变量考虑进来,程序应如何书写?最后结果会有何变化?

代码链接:HW-of-SLAMBOOK2/p7.cpp at main · Philipcjh/HW-of-SLAMBOOK2 · GitHub

pose_estimation_3d3d1.cpp

#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <Eigen/Core>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <Eigen/SVD>
#include <g2o/core/base_vertex.h>
#include <g2o/core/base_binary_edge.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/core/optimization_algorithm_levenberg.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
#include <chrono>
#include <sophus/se3.hpp>

using namespace std;
using namespace cv;

void find_feature_matches(
  const Mat &img_1, const Mat &img_2,
  std::vector<KeyPoint> &keypoints_1,
  std::vector<KeyPoint> &keypoints_2,
  std::vector<DMatch> &matches);

// 像素坐标转相机归一化坐标
Point2d pixel2cam(const Point2d &p, const Mat &K);

void pose_estimation_3d3d(
  const vector<Point3f> &pts1,
  const vector<Point3f> &pts2,
  Mat &R, Mat &t
);

void bundleAdjustment(
  const vector<Point3f> &points_3d,
  vector<Point3f> &points_2d,
  Mat &R, Mat &t
);

/// vertex and edges used in g2o ba
class VertexPose : public g2o::BaseVertex<6, Sophus::SE3d> {
public:
  EIGEN_MAKE_ALIGNED_OPERATOR_NEW;

  virtual void setToOriginImpl() override {
    _estimate = Sophus::SE3d();
  }

  /// left multiplication on SE3
  virtual void oplusImpl(const double *update) override {
    Eigen::Matrix<double, 6, 1> update_eigen;
    update_eigen << update[0], update[1], update[2], update[3], update[4], update[5];
    _estimate = Sophus::SE3d::exp(update_eigen) * _estimate;
  }

  virtual bool read(istream &in) override {}

  virtual bool write(ostream &out) const override {}
};

/// 新增的空间点顶点
class VertexPoint : public g2o::BaseVertex<3, Eigen::Vector3d> {
public:
  EIGEN_MAKE_ALIGNED_OPERATOR_NEW;

  virtual void setToOriginImpl() override {
    _estimate = Eigen::Vector3d(0,0,0);
  }

  /// 空间点
  virtual void oplusImpl(const double *update) override {
    _estimate += Eigen::Vector3d(update[0], update[1], update[2]) ;
  }

  virtual bool read(istream &in) override {}

  virtual bool write(ostream &out) const override {}
};

/// g2o edge-- 修改为二元边,两个顶点为相机位姿VertexPose和空间点坐标VertexPoint
class EdgeProjectXYZRGBDPoseAndPoint : public g2o::BaseBinaryEdge<3, Eigen::Vector3d, VertexPose, VertexPoint> {
public:
  EIGEN_MAKE_ALIGNED_OPERATOR_NEW;

  //EdgeProjectXYZRGBDPoseAndPoint(const Eigen::Vector3d &point) : _point(point) {}
  
  virtual void computeError() override {
    const VertexPose *pose = static_cast<const VertexPose *> ( _vertices[0] );
    const VertexPoint *point = static_cast<const VertexPoint *> ( _vertices[1] );
    _error = _measurement - pose->estimate() * point->estimate();
    //_measurement指世界坐标系下(即第一组相机的坐标系)第一组相机求得的三维点
    //pose->estimate() * point->estimate()指从第二个相机的坐标系下转化到世界坐标系下第二组相机求得的三维点
  }

//   因为我们不知道雅克比矩阵,这里可以不写,g2o会自动求导,不过速度会下降
//   virtual void linearizeOplus() override {
//     VertexPose *pose = static_cast<VertexPose *>(_vertices[0]);
//     Sophus::SE3d T = pose->estimate();
//     Eigen::Vector3d xyz_trans = T * _point;
//     _jacobianOplusXi.block<3, 3>(0, 0) = -Eigen::Matrix3d::Identity();
//     _jacobianOplusXi.block<3, 3>(0, 3) = Sophus::SO3d::hat(xyz_trans);
//   }

  bool read(istream &in) {}

  bool write(ostream &out) const {}

protected:
  Eigen::Vector3d _point;
};

int main(int argc, char **argv) {
  if (argc != 5) {
    cout << "usage: pose_estimation_3d3d img1 img2 depth1 depth2" << endl;
    return 1;
  }
  //-- 读取图像
  Mat img_1 = imread(argv[1], CV_LOAD_IMAGE_COLOR);
  Mat img_2 = imread(argv[2], CV_LOAD_IMAGE_COLOR);

  vector<KeyPoint> keypoints_1, keypoints_2;
  vector<DMatch> matches;
  find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
  cout << "一共找到了" << matches.size() << "组匹配点" << endl;

  // 建立3D点
  Mat depth1 = imread(argv[3], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
  Mat depth2 = imread(argv[4], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
  Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
  vector<Point3f> pts1, pts2;

  for (DMatch m:matches) {
    ushort d1 = depth1.ptr<unsigned short>(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)];
    ushort d2 = depth2.ptr<unsigned short>(int(keypoints_2[m.trainIdx].pt.y))[int(keypoints_2[m.trainIdx].pt.x)];
    if (d1 == 0 || d2 == 0)   // bad depth
      continue;
    Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
    Point2d p2 = pixel2cam(keypoints_2[m.trainIdx].pt, K);
    float dd1 = float(d1) / 5000.0;
    float dd2 = float(d2) / 5000.0;
    pts1.push_back(Point3f(p1.x * dd1, p1.y * dd1, dd1));
    pts2.push_back(Point3f(p2.x * dd2, p2.y * dd2, dd2));
  }

  cout << "3d-3d pairs: " << pts1.size() << endl;
  Mat R, t;
  pose_estimation_3d3d(pts1, pts2, R, t);
  cout << "ICP via SVD results: " << endl;
  cout << "R = " << R << endl;
  cout << "t = " << t << endl;
  cout << "R_inv = " << R.t() << endl;
  cout << "t_inv = " << -R.t() * t << endl;

  cout << "calling bundle adjustment" << endl;

  bundleAdjustment(pts1, pts2, R, t);

  // verify p1 = R * p2 + t
//   for (int i = 0; i < 5; i++) {
//     cout << "p1 = " << pts1[i] << endl;
//     cout << "p2 = " << pts2[i] << endl;
//     cout << "(R*p2+t) = " <<
//          R * (Mat_<double>(3, 1) << pts2[i].x, pts2[i].y, pts2[i].z) + t
//          << endl;
//     cout << endl;
//   }
  
  double error_total = 0;
  for(size_t i = 0; i < pts1.size(); i++)
  {
      cv::Mat error = (Mat_<double>(3, 1) << pts1[i].x, pts1[i].y, pts1[i].z) - (R * (Mat_<double>(3, 1) << pts2[i].x, pts2[i].y, pts2[i].z) + t);
      error_total += norm(error);  
  }
  cout << "total error is " << error_total << endl;
  
}

void find_feature_matches(const Mat &img_1, const Mat &img_2,
                          std::vector<KeyPoint> &keypoints_1,
                          std::vector<KeyPoint> &keypoints_2,
                          std::vector<DMatch> &matches) {
  //-- 初始化
  Mat descriptors_1, descriptors_2;
  // used in OpenCV3
  Ptr<FeatureDetector> detector = ORB::create();
  Ptr<DescriptorExtractor> descriptor = ORB::create();
  // use this if you are in OpenCV2
  // Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" );
  // Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" );
  Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
  //-- 第一步:检测 Oriented FAST 角点位置
  detector->detect(img_1, keypoints_1);
  detector->detect(img_2, keypoints_2);

  //-- 第二步:根据角点位置计算 BRIEF 描述子
  descriptor->compute(img_1, keypoints_1, descriptors_1);
  descriptor->compute(img_2, keypoints_2, descriptors_2);

  //-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
  vector<DMatch> match;
  // BFMatcher matcher ( NORM_HAMMING );
  matcher->match(descriptors_1, descriptors_2, match);

  //-- 第四步:匹配点对筛选
  double min_dist = 10000, max_dist = 0;

  //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
  for (int i = 0; i < descriptors_1.rows; i++) {
    double dist = match[i].distance;
    if (dist < min_dist) min_dist = dist;
    if (dist > max_dist) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist);
  printf("-- Min dist : %f \n", min_dist);

  //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
  for (int i = 0; i < descriptors_1.rows; i++) {
    if (match[i].distance <= max(2 * min_dist, 30.0)) {
      matches.push_back(match[i]);
    }
  }
}

Point2d pixel2cam(const Point2d &p, const Mat &K) {
  return Point2d(
    (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
    (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
  );
}

void pose_estimation_3d3d(const vector<Point3f> &pts1,
                          const vector<Point3f> &pts2,
                          Mat &R, Mat &t) {
  Point3f p1, p2;     // center of mass
  int N = pts1.size();
  for (int i = 0; i < N; i++) {
    p1 += pts1[i];
    p2 += pts2[i];
  }
  p1 = Point3f(Vec3f(p1) / N);
  p2 = Point3f(Vec3f(p2) / N);
  vector<Point3f> q1(N), q2(N); // remove the center
  for (int i = 0; i < N; i++) {
    q1[i] = pts1[i] - p1;
    q2[i] = pts2[i] - p2;
  }

  // compute q1*q2^T
  Eigen::Matrix3d W = Eigen::Matrix3d::Zero();
  for (int i = 0; i < N; i++) {
    W += Eigen::Vector3d(q1[i].x, q1[i].y, q1[i].z) * Eigen::Vector3d(q2[i].x, q2[i].y, q2[i].z).transpose();
  }
  cout << "W=" << W << endl;

  // SVD on W
  Eigen::JacobiSVD<Eigen::Matrix3d> svd(W, Eigen::ComputeFullU | Eigen::ComputeFullV);
  Eigen::Matrix3d U = svd.matrixU();
  Eigen::Matrix3d V = svd.matrixV();

  cout << "U=" << U << endl;
  cout << "V=" << V << endl;

  Eigen::Matrix3d R_ = U * (V.transpose());
  if (R_.determinant() < 0) {
    R_ = -R_;
  }
  Eigen::Vector3d t_ = Eigen::Vector3d(p1.x, p1.y, p1.z) - R_ * Eigen::Vector3d(p2.x, p2.y, p2.z);

  // convert to cv::Mat
  R = (Mat_<double>(3, 3) <<
    R_(0, 0), R_(0, 1), R_(0, 2),
    R_(1, 0), R_(1, 1), R_(1, 2),
    R_(2, 0), R_(2, 1), R_(2, 2)
  );
  t = (Mat_<double>(3, 1) << t_(0, 0), t_(1, 0), t_(2, 0));
}

void bundleAdjustment(
  const vector<Point3f> &pts1,
  vector<Point3f> &pts2,
  Mat &R, Mat &t) {
  // 构建图优化,先设定g2o
  typedef g2o::BlockSolverX BlockSolverType;
  typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType> LinearSolverType; // 线性求解器类型
  // 梯度下降方法,可以从GN, LM, DogLeg 中选
  auto solver = new g2o::OptimizationAlgorithmLevenberg(
    g2o::make_unique<BlockSolverType>(g2o::make_unique<LinearSolverType>()));
  g2o::SparseOptimizer optimizer;     // 图模型
  optimizer.setAlgorithm(solver);   // 设置求解器
  optimizer.setVerbose(true);       // 打开调试输出

  // vertex李代数位姿
  VertexPose *pose = new VertexPose(); // camera pose
  pose->setId(0);
  pose->setEstimate(Sophus::SE3d());
  optimizer.addVertex(pose);
  
  // vertex空间点
  for (size_t i = 0; i < pts2.size(); i++){
  VertexPoint *point = new VertexPoint(); // camera pose
  point->setId(i+1);
  point->setEstimate(Eigen::Vector3d(pts2[i].x, pts2[i].y, pts2[i].z));
  optimizer.addVertex(point);
  }
  
  // edges  pts1.size()=pts2.size()
  for (size_t i = 0; i < pts1.size(); i++) {
    EdgeProjectXYZRGBDPoseAndPoint *edge = new EdgeProjectXYZRGBDPoseAndPoint();
    edge->setVertex(0, pose);
    edge->setVertex(1, dynamic_cast<VertexPoint *> ( optimizer.vertex ( i+1 ) ));
    edge->setMeasurement(Eigen::Vector3d(pts1[i].x, pts1[i].y, pts1[i].z));
    edge->setInformation(Eigen::Matrix3d::Identity());
    optimizer.addEdge(edge);
  }

  chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
  optimizer.initializeOptimization();
  optimizer.optimize(10);
  chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
  chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
  cout << "optimization costs time: " << time_used.count() << " seconds." << endl;

  cout << endl << "after optimization:" << endl;
  cout << "T=\n" << pose->estimate().matrix() << endl;

  // convert to cv::Mat
  Eigen::Matrix3d R_ = pose->estimate().rotationMatrix();
  Eigen::Vector3d t_ = pose->estimate().translation();
  R = (Mat_<double>(3, 3) <<
    R_(0, 0), R_(0, 1), R_(0, 2),
    R_(1, 0), R_(1, 1), R_(1, 2),
    R_(2, 0), R_(2, 1), R_(2, 2)
  );
  t = (Mat_<double>(3, 1) << t_(0, 0), t_(1, 0), t_(2, 0));
  
  for(size_t i = 0; i < pts2.size(); i++)
    {
        Eigen::Vector3d vertex_point = dynamic_cast<VertexPoint *> ( optimizer.vertex ( i+1 ) )->estimate();
        pts2[i] = Point3f(vertex_point(0),vertex_point(1),vertex_point(2));
    }
}


CMakeLists.txt

cmake_minimum_required(VERSION 2.8)
project(ch7_1)

set(CMAKE_BUILD_TYPE "Release")
add_definitions("-DENABLE_SSE")
#set(CMAKE_CXX_FLAGS "-std=c++11 -O2 ${SSE_FLAGS} -msse4")
set(CMAKE_CXX_FLAGS "-std=c++14 -O2 ${SSE_FLAGS} -msse4") 

list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)

find_package(OpenCV 3 REQUIRED)
find_package(G2O REQUIRED)
find_package(Sophus REQUIRED)

include_directories(
        ${OpenCV_INCLUDE_DIRS}
        ${G2O_INCLUDE_DIRS}
        ${Sophus_INCLUDE_DIRS}
        "/usr/local/include/eigen3/"
       
)

        
add_executable(pose_estimation_3d3d1 pose_estimation_3d3d1.cpp)
target_link_libraries(pose_estimation_3d3d1
        g2o_core g2o_stuff
        ${OpenCV_LIBS} fmt)
      

执行结果:

 ./pose_estimation_3d3d1 1.png 2.png 1_depth.png 2_depth.png 
-- Max dist : 94.000000 
-- Min dist : 4.000000 
一共找到了79组匹配点
3d-3d pairs: 72
W=  10.871 -1.01948  2.54771
-2.16033  3.85307 -5.77742
 3.94738 -5.79979  9.62203
U=  0.558087  -0.829399 -0.0252034
 -0.428009  -0.313755   0.847565
  0.710878   0.462228   0.530093
V=  0.617887  -0.784771 -0.0484806
 -0.399894  -0.366747   0.839989
  0.676979   0.499631   0.540434
ICP via SVD results: 
R = [0.9969452349468715, 0.05983347698056557, -0.05020113095482046;
 -0.05932607657705309, 0.9981719679735133, 0.01153858699565957;
 0.05079975545906246, -0.008525103184062521, 0.9986724725659557]
t = [0.144159841091821;
 -0.06667849443812729;
 -0.03009747273569774]
R_inv = [0.9969452349468715, -0.05932607657705309, 0.05079975545906246;
 0.05983347698056557, 0.9981719679735133, -0.008525103184062521;
 -0.05020113095482046, 0.01153858699565957, 0.9986724725659557]
t_inv = [-0.1461462958593589;
 0.05767443542067568;
 0.03806388018483625]
calling bundle adjustment
iteration= 0	 chi2= 0.006041	 time= 0.00104646	 cumTime= 0.00104646	 edges= 72	 schur= 0	 lambda= 0.000758	 levenbergIter= 1
iteration= 1	 chi2= 0.000000	 time= 0.000937414	 cumTime= 0.00198387	 edges= 72	 schur= 0	 lambda= 0.000480	 levenbergIter= 1
iteration= 2	 chi2= 0.000000	 time= 0.000928525	 cumTime= 0.0029124	 edges= 72	 schur= 0	 lambda= 0.000320	 levenbergIter= 1
iteration= 3	 chi2= 0.000000	 time= 0.00100162	 cumTime= 0.00391402	 edges= 72	 schur= 0	 lambda= 0.000213	 levenbergIter= 1
iteration= 4	 chi2= 0.000000	 time= 0.000932574	 cumTime= 0.00484659	 edges= 72	 schur= 0	 lambda= 0.000142	 levenbergIter= 1
iteration= 5	 chi2= 0.000000	 time= 0.000882985	 cumTime= 0.00572958	 edges= 72	 schur= 0	 lambda= 0.000095	 levenbergIter= 1
iteration= 6	 chi2= 0.000000	 time= 0.000873577	 cumTime= 0.00660315	 edges= 72	 schur= 0	 lambda= 0.000063	 levenbergIter= 1
iteration= 7	 chi2= 0.000000	 time= 0.00418757	 cumTime= 0.0107907	 edges= 72	 schur= 0	 lambda= 1.379977	 levenbergIter= 6
iteration= 8	 chi2= 0.000000	 time= 0.000877505	 cumTime= 0.0116682	 edges= 72	 schur= 0	 lambda= 0.919985	 levenbergIter= 1
iteration= 9	 chi2= 0.000000	 time= 0.00153128	 cumTime= 0.0131995	 edges= 72	 schur= 0	 lambda= 1.226646	 levenbergIter= 2
optimization costs time: 0.0136616 seconds.

after optimization:
T=
   0.997706   0.0572456   -0.036134    0.118703
 -0.0571248    0.998357  0.00436848    -0.05348
  0.0363247 -0.00229431    0.999337  -0.0322687
          0           0           0           1
total error is 2.60209e-06

8. *在特征点匹配过程中,不可避免地会遇到误匹配的情况。如果我们把错误匹配输入到PnP 或ICP 中,会发生怎样的情况?你能想到哪些避免误匹配的方法?

代码链接:HW-of-SLAMBOOK2/p8.cpp at main · Philipcjh/HW-of-SLAMBOOK2 · GitHub

8.cpp

#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <Eigen/Core>
#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/sparse_optimizer.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/solver.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
#include <sophus/se3.hpp>
#include <chrono>

using namespace std;
using namespace cv;

void find_feature_matches(
  const Mat &img_1, const Mat &img_2,
  std::vector<KeyPoint> &keypoints_1,
  std::vector<KeyPoint> &keypoints_2,
  std::vector<DMatch> &matches,
  double threshold
);

// 像素坐标转相机归一化坐标
Point2d pixel2cam(const Point2d &p, const Mat &K);

int main(int argc, char **argv) {
  if (argc != 5) {
    cout << "usage: pose_estimation_3d2d img1 img2 depth1 depth2" << endl;
    return 1;
  }
  //-- 读取图像
  Mat img_1 = imread(argv[1], CV_LOAD_IMAGE_COLOR);
  Mat img_2 = imread(argv[2], CV_LOAD_IMAGE_COLOR);
  assert(img_1.data && img_2.data && "Can not load images!");
  
  //正确匹配
  vector<KeyPoint> keypoints_1, keypoints_2;
  vector<DMatch> matches;
  double threshold=30;
  find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches,threshold);
  cout << "当距离为"<<threshold <<",一共找到了" << matches.size() << "组匹配点" << endl;

  // 建立3D点
  Mat d1 = imread(argv[3], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
  Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
  vector<Point3f> pts_3d;
  vector<Point2f> pts_2d;
  for (DMatch m:matches) {
    ushort d = d1.ptr<unsigned short>(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)];
    if (d == 0)   // bad depth
      continue;
    float dd = d / 5000.0;
    Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
    pts_3d.push_back(Point3f(p1.x * dd, p1.y * dd, dd));
    pts_2d.push_back(keypoints_2[m.trainIdx].pt);
  }

  cout << "3d-2d pairs: " << pts_3d.size() << endl;
  
  Mat img_match;
  drawMatches(img_1, keypoints_1, img_2, keypoints_2, matches, img_match);
  imshow("all matches", img_match);
  
  
  chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
  Mat r, t;
  solvePnP(pts_3d, pts_2d, K, Mat(), r, t, false); // 调用OpenCV 的 PnP 求解,可选择EPNP,DLS等方法
  Mat R;
  cv::Rodrigues(r, R); // r为旋转向量形式,用Rodrigues公式转换为矩阵
  chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
  chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
  cout << "solve pnp in opencv cost time: " << time_used.count() << " seconds." << endl;

  cout << "R=" << endl << R << endl;
  cout << "t=" << endl << t << endl;
  
  //******************************************//
  //****************存在误匹配****************//
  vector<KeyPoint> bad_keypoints_1, bad_keypoints_2;
  vector<DMatch> bad_matches;
  threshold=60;
  find_feature_matches(img_1, img_2, bad_keypoints_1, bad_keypoints_2, bad_matches,threshold);
  cout << "当距离为"<<threshold <<",一共找到了" << bad_matches.size() << "组匹配点" << endl;

  // 建立3D点
  vector<Point3f> bad_pts_3d;
  vector<Point2f> bad_pts_2d;
  for (DMatch m:bad_matches) {
    ushort d = d1.ptr<unsigned short>(int(bad_keypoints_1[m.queryIdx].pt.y))[int(bad_keypoints_1[m.queryIdx].pt.x)];
    if (d == 0)   // bad depth
      continue;
    float dd = d / 5000.0;
    Point2d p1 = pixel2cam(bad_keypoints_1[m.queryIdx].pt, K);
    bad_pts_3d.push_back(Point3f(p1.x * dd, p1.y * dd, dd));
    bad_pts_2d.push_back(bad_keypoints_2[m.trainIdx].pt);
  }

  cout << "3d-2d pairs: " << bad_pts_3d.size() << endl;
  
  Mat bad_img_match;
  drawMatches(img_1, bad_keypoints_1, img_2, bad_keypoints_2, bad_matches, bad_img_match);
  imshow("bad matches", bad_img_match);
  
  t1 = chrono::steady_clock::now();
  solvePnP(bad_pts_3d, bad_pts_2d, K, Mat(), r, t, false); // 调用OpenCV 的 PnP 求解,可选择EPNP,DLS等方法
  cv::Rodrigues(r, R); // r为旋转向量形式,用Rodrigues公式转换为矩阵
  t2 = chrono::steady_clock::now();
  time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
  cout << "solve pnp in opencv cost time: " << time_used.count() << " seconds." << endl;

  cout << "R=" << endl << R << endl;
  cout << "t=" << endl << t << endl;
  
  
  waitKey(0);
  
  return 0;

}

void find_feature_matches(const Mat &img_1, const Mat &img_2,
                          std::vector<KeyPoint> &keypoints_1,
                          std::vector<KeyPoint> &keypoints_2,
                          std::vector<DMatch> &matches,
			  double threshold
 			) {
  //-- 初始化
  Mat descriptors_1, descriptors_2;
  // used in OpenCV3
  Ptr<FeatureDetector> detector = ORB::create();
  Ptr<DescriptorExtractor> descriptor = ORB::create();
  // use this if you are in OpenCV2
  // Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" );
  // Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" );
  Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
  //-- 第一步:检测 Oriented FAST 角点位置
  detector->detect(img_1, keypoints_1);
  detector->detect(img_2, keypoints_2);

  //-- 第二步:根据角点位置计算 BRIEF 描述子
  descriptor->compute(img_1, keypoints_1, descriptors_1);
  descriptor->compute(img_2, keypoints_2, descriptors_2);

  //-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
  vector<DMatch> match;
  // BFMatcher matcher ( NORM_HAMMING );
  matcher->match(descriptors_1, descriptors_2, match);

  //-- 第四步:匹配点对筛选
  double min_dist = 10000, max_dist = 0;

  //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
  for (int i = 0; i < descriptors_1.rows; i++) {
    double dist = match[i].distance;
    if (dist < min_dist) min_dist = dist;
    if (dist > max_dist) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist);
  printf("-- Min dist : %f \n", min_dist);

  //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
  for (int i = 0; i < descriptors_1.rows; i++) {
    if (match[i].distance <= max(2 * min_dist, threshold)) {
      matches.push_back(match[i]);
    }
  }
}

Point2d pixel2cam(const Point2d &p, const Mat &K) {
  return Point2d
    (
      (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
      (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
    );
}





CMakeLists.txt

cmake_minimum_required(VERSION 2.8)
project(ch7_1)

set(CMAKE_BUILD_TYPE "Release")
add_definitions("-DENABLE_SSE")
#set(CMAKE_CXX_FLAGS "-std=c++11 -O2 ${SSE_FLAGS} -msse4")
set(CMAKE_CXX_FLAGS "-std=c++14 -O2 ${SSE_FLAGS} -msse4") 

list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)

find_package(OpenCV 3 REQUIRED)
find_package(G2O REQUIRED)
find_package(Sophus REQUIRED)

include_directories(
        ${OpenCV_INCLUDE_DIRS}
        ${G2O_INCLUDE_DIRS}
        ${Sophus_INCLUDE_DIRS}
        "/usr/local/include/eigen3/"
       
)
      
add_executable(8 8.cpp)
target_link_libraries(8
        g2o_core g2o_stuff
        ${OpenCV_LIBS} fmt)
        
        

执行结果:

./8 1.png 2.png 1_depth.png 2_depth.png 
-- Max dist : 94.000000 
-- Min dist : 4.000000 
当距离为30,一共找到了79组匹配点
3d-2d pairs: 75
solve pnp in opencv cost time: 0.000658356 seconds.
R=
[0.9979059095501266, -0.05091940089110201, 0.03988747043653948;
 0.04981866254253315, 0.9983623157438158, 0.02812094175376485;
 -0.04125404886078184, -0.0260749135288436, 0.998808391202765]
t=
[-0.1267821389557959;
 -0.008439496817520764;
 0.06034935748888207]
-- Max dist : 94.000000 
-- Min dist : 4.000000 
当距离为60,一共找到了298组匹配点
3d-2d pairs: 260
solve pnp in opencv cost time: 0.00238102 seconds.
R=
[-0.9269295424726343, 0.0009920501983393115, -0.3752341124256654;
 -0.2025261979285458, -0.8431572649278044, 0.4980652223876625;
 -0.3158872622377807, 0.5376661068319893, 0.7817482939668314]
t=
[0.9217997091475182;
 -0.9197605796315222;
 -5.920853064068873]

 

转载于:

视觉SLAM十四讲(第二版)第7讲习题解答 - 知乎icon-default.png?t=LA46https://zhuanlan.zhihu.com/p/389420864

  • 2
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值