【SLAM】三角测量与PNP

一、三角测量

在深度估计中,双目视觉可以通过极限约束找到对应的三维点信息,在雷达中可以通过TOF找到深度信息,那么在单目视觉中,如何获得深度信息呢?

这就需要三角测量。

> 三角测量在三角学与几何学上是一借由测量目标点与固定基准线的已知端点的角度,测量目标距离的方法。而不是直接测量特定位置的距离(三边量测法)。当已知一个边长及两个观测角度时,观测目标点可以被标定为一个三角形的第三个点。
三角量测亦可意指为超大三角形系统的精确测量,称作三角量测网络。这源自于威理博·司乃耳于1615-17的作品,他展现出一个点如何能够从附属于三个已知点的角度来被定位,是在新的一未知点上量测而不是在先前固定的点上,这样的问题叫做重新区块化。调查误差可被最小化,当大量三角形已建立在最大适当的规模。借此参考方法,所有在三角内的点皆可被准确地定位。直至1980年代全球卫星导航系统崛起之前,此三角量测方法被用来准确化大规模的土地测量。

和极限约束类似,三角测量的平面也是有极限约束的。

由于噪声的影响,这会导致两个视角的交线不是一个点P,往往有一些误差,这么处理这种无法相交的情况呢?

这就需要最小二乘估计了。

我们知道了匹配的特征点、知道了旋转矩阵和平移矩阵,那么计算深度就显得轻而易举了,用三角法即可。

二、三角测量的代码

#include <iostream>
#include <opencv2 opencv.hpp="">
// #include "extra.h" // used in opencv2
using namespace std;
using namespace cv;

void find_feature_matches(
  const Mat &amp;img_1, const Mat &amp;img_2,
  std::vector<keypoint> &amp;keypoints_1,
  std::vector<keypoint> &amp;keypoints_2,
  std::vector<dmatch> &amp;matches);

void pose_estimation_2d2d(
  const std::vector<keypoint> &amp;keypoints_1,
  const std::vector<keypoint> &amp;keypoints_2,
  const std::vector<dmatch> &amp;matches,
  Mat &amp;R, Mat &amp;t);

void triangulation(
  const vector<keypoint> &amp;keypoint_1,
  const vector<keypoint> &amp;keypoint_2,
  const std::vector<dmatch> &amp;matches,
  const Mat &amp;R, const Mat &amp;t,
  vector<point3d> &amp;points
);

/// 作图用
inline cv::Scalar get_color(float depth) {
  float up_th = 50, low_th = 10, th_range = up_th - low_th;
  if (depth &gt; up_th) depth = up_th;
  if (depth &lt; low_th) depth = low_th;
  return cv::Scalar(255 * depth / th_range, 0, 255 * (1 - depth / th_range));
}

// 像素坐标转相机归一化坐标
Point2f pixel2cam(const Point2d &amp;p, const Mat &amp;K);

int main(int argc, char **argv) {
  if (argc != 3) {
    cout &lt;&lt; "usage: triangulation img1 img2" &lt;&lt; endl;
    return 1;
  }
  //-- 读取图像
  Mat img_1 = imread(argv[1], CV_LOAD_IMAGE_COLOR);
  Mat img_2 = imread(argv[2], CV_LOAD_IMAGE_COLOR);

  vector<keypoint> keypoints_1, keypoints_2;
  vector<dmatch> matches;
  find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
  cout &lt;&lt; "一共找到了" &lt;&lt; matches.size() &lt;&lt; "组匹配点" &lt;&lt; endl;

  //-- 估计两张图像间运动
  Mat R, t;
  pose_estimation_2d2d(keypoints_1, keypoints_2, matches, R, t);

  //-- 三角化
  vector<point3d> points;
  triangulation(keypoints_1, keypoints_2, matches, R, t, points);

  //-- 验证三角化点与特征点的重投影关系
  Mat K = (Mat_<double>(3, 3) &lt;&lt; 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
  Mat img1_plot = img_1.clone();
  Mat img2_plot = img_2.clone();
  for (int i = 0; i &lt; matches.size(); i++) {
    // 第一个图
    float depth1 = points[i].z;
    cout &lt;&lt; "depth: " &lt;&lt; depth1 &lt;&lt; endl;
    Point2d pt1_cam = pixel2cam(keypoints_1[matches[i].queryIdx].pt, K);
    cv::circle(img1_plot, keypoints_1[matches[i].queryIdx].pt, 2, get_color(depth1), 2);

    // 第二个图
    Mat pt2_trans = R * (Mat_<double>(3, 1) &lt;&lt; points[i].x, points[i].y, points[i].z) + t;
    float depth2 = pt2_trans.at<double>(2, 0);
    cv::circle(img2_plot, keypoints_2[matches[i].trainIdx].pt, 2, get_color(depth2), 2);
  }
  cv::imshow("img 1", img1_plot);
  cv::imshow("img 2", img2_plot);
  cv::waitKey();

  return 0;
}

void find_feature_matches(const Mat &amp;img_1, const Mat &amp;img_2,
                          std::vector<keypoint> &amp;keypoints_1,
                          std::vector<keypoint> &amp;keypoints_2,
                          std::vector<dmatch> &amp;matches) {
  //-- 初始化
  Mat descriptors_1, descriptors_2;
  // used in OpenCV3
  Ptr<featuredetector> detector = ORB::create();
  Ptr<descriptorextractor> descriptor = ORB::create();
  // use this if you are in OpenCV2
  // Ptr<featuredetector> detector = FeatureDetector::create ( "ORB" );
  // Ptr<descriptorextractor> descriptor = DescriptorExtractor::create ( "ORB" );
  Ptr<descriptormatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
  //-- 第一步:检测 Oriented FAST 角点位置
  detector-&gt;detect(img_1, keypoints_1);
  detector-&gt;detect(img_2, keypoints_2);

  //-- 第二步:根据角点位置计算 BRIEF 描述子
  descriptor-&gt;compute(img_1, keypoints_1, descriptors_1);
  descriptor-&gt;compute(img_2, keypoints_2, descriptors_2);

  //-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
  vector<dmatch> match;
  // BFMatcher matcher ( NORM_HAMMING );
  matcher-&gt;match(descriptors_1, descriptors_2, match);

  //-- 第四步:匹配点对筛选
  double min_dist = 10000, max_dist = 0;

  //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
  for (int i = 0; i &lt; descriptors_1.rows; i++) {
    double dist = match[i].distance;
    if (dist &lt; min_dist) min_dist = dist;
    if (dist &gt; max_dist) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist);
  printf("-- Min dist : %f \n", min_dist);

  //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
  for (int i = 0; i &lt; descriptors_1.rows; i++) {
    if (match[i].distance &lt;= max(2 * min_dist, 30.0)) {
      matches.push_back(match[i]);
    }
  }
}

void pose_estimation_2d2d(
  const std::vector<keypoint> &amp;keypoints_1,
  const std::vector<keypoint> &amp;keypoints_2,
  const std::vector<dmatch> &amp;matches,
  Mat &amp;R, Mat &amp;t) {
  // 相机内参,TUM Freiburg2
  Mat K = (Mat_<double>(3, 3) &lt;&lt; 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);

  //-- 把匹配点转换为vector<point2f>的形式
  vector<point2f> points1;
  vector<point2f> points2;

  for (int i = 0; i &lt; (int) matches.size(); i++) {
    points1.push_back(keypoints_1[matches[i].queryIdx].pt);
    points2.push_back(keypoints_2[matches[i].trainIdx].pt);
  }

  //-- 计算本质矩阵
  Point2d principal_point(325.1, 249.7);        //相机主点, TUM dataset标定值
  int focal_length = 521;            //相机焦距, TUM dataset标定值
  Mat essential_matrix;
  essential_matrix = findEssentialMat(points1, points2, focal_length, principal_point);

  //-- 从本质矩阵中恢复旋转和平移信息.
  recoverPose(essential_matrix, points1, points2, R, t, focal_length, principal_point);
}

void triangulation(
  const vector<keypoint> &amp;keypoint_1,
  const vector<keypoint> &amp;keypoint_2,
  const std::vector<dmatch> &amp;matches,
  const Mat &amp;R, const Mat &amp;t,
  vector<point3d> &amp;points) {
  Mat T1 = (Mat_<float>(3, 4) &lt;&lt;
    1, 0, 0, 0,
    0, 1, 0, 0,
    0, 0, 1, 0);
  Mat T2 = (Mat_<float>(3, 4) &lt;&lt;
    R.at<double>(0, 0), R.at<double>(0, 1), R.at<double>(0, 2), t.at<double>(0, 0),
    R.at<double>(1, 0), R.at<double>(1, 1), R.at<double>(1, 2), t.at<double>(1, 0),
    R.at<double>(2, 0), R.at<double>(2, 1), R.at<double>(2, 2), t.at<double>(2, 0)
  );

  Mat K = (Mat_<double>(3, 3) &lt;&lt; 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
  vector<point2f> pts_1, pts_2;
  for (DMatch m:matches) {
    // 将像素坐标转换至相机坐标
    pts_1.push_back(pixel2cam(keypoint_1[m.queryIdx].pt, K));
    pts_2.push_back(pixel2cam(keypoint_2[m.trainIdx].pt, K));
  }

  Mat pts_4d;
  cv::triangulatePoints(T1, T2, pts_1, pts_2, pts_4d);

  // 转换成非齐次坐标
  for (int i = 0; i &lt; pts_4d.cols; i++) {
    Mat x = pts_4d.col(i);
    x /= x.at<float>(3, 0); // 归一化
    Point3d p(
      x.at<float>(0, 0),
      x.at<float>(1, 0),
      x.at<float>(2, 0)
    );
    points.push_back(p);
  }
}

Point2f pixel2cam(const Point2d &amp;p, const Mat &amp;K) {
  return Point2f
    (
      (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
      (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
    );
}


三、三角法总结

三角法的使用有个条件,就是必须有平移,或者是平移不能为0,也就是说,旋转无法获得三角测量值。

那么如何提高三角化的精度呢?

有2种办法。一种就是提高特征点的匹配与提取精度。第二种就是增加平移个距离。这两种办法都有相应的优缺点。

四、PNP

我们已经介绍了平面的匹配方法,就是使用8点法,那么,如果我们知道在空间中的一个确定的三维点坐标呢,如何求解相应的相机运动呢?

在已经知道的三维点的情况下,只需要三个点就可以了。那么如何获取三维点呢?可以通过深度相机或者双目相机得到三维点,在三维点已知的情况下,就可以使用PNP求解。

> PNP 相机位姿估计pose estimation就是通过几个已知坐标(世界坐标)的特征点,结合他们在相机照片中的成像(像素坐标),求解出相机所在的世界坐标以及旋转角度, 用旋转矩阵®和平移矩阵(t)表示. PnP, which is short for perspective n point就是其中的一种2D-3D解决上述问题的算法, 也就是说, 这个算法的输入是一组点的三维世界坐标和二维像素坐标,输出是相机的旋转矩阵®和平移矩阵(t).

求解PNP的方法有很多,简而言之就是求解3D-2D的问题,常用的方法有:直接线性变换、EPNP、光束平差法(BA)等等。

五、BA

光束平差法(Bundle Adjustment)的主要思想是通过最小化重投影误差,求解位姿。

就是讲我们观察到的三维点的位置对我们推算得到的三维点的位置,进行做差优化,可以使用最小二乘估计的方法。

> 对场景中任意三维点P,由从每个视图所对应的的摄像机的光心发射出来并经过图像中P对应的像素后的光线,都将交于P这一点,对于所有三维点,则形成相当多的光束(bundle);实际过程中由于噪声等存在,每条光线几乎不可能汇聚与一点,因此在求解过程中,需要不断对待求信息进行调整(adjustment),来使得最终光线能交于点P。

#include <iostream>
#include <opencv2 core="" core.hpp="">
#include <opencv2 features2d="" features2d.hpp="">
#include <opencv2 highgui="" highgui.hpp="">
#include <opencv2 calib3d="" calib3d.hpp="">
#include <eigen core="">
#include <g2o core="" base_vertex.h="">
#include <g2o core="" base_unary_edge.h="">
#include <g2o core="" sparse_optimizer.h="">
#include <g2o core="" block_solver.h="">
#include <g2o core="" solver.h="">
#include <g2o core="" optimization_algorithm_gauss_newton.h="">
#include <g2o solvers="" dense="" linear_solver_dense.h="">
#include <sophus se3.hpp="">
#include <chrono>

using namespace std;
using namespace cv;

void find_feature_matches(
  const Mat &amp;img_1, const Mat &amp;img_2,
  std::vector<keypoint> &amp;keypoints_1,
  std::vector<keypoint> &amp;keypoints_2,
  std::vector<dmatch> &amp;matches);

// 像素坐标转相机归一化坐标
Point2d pixel2cam(const Point2d &amp;p, const Mat &amp;K);

// BA by g2o
typedef vector<eigen::vector2d, eigen::aligned_allocator<eigen::vector2d="">&gt; VecVector2d;
typedef vector<eigen::vector3d, eigen::aligned_allocator<eigen::vector3d="">&gt; VecVector3d;

void bundleAdjustmentG2O(
  const VecVector3d &amp;points_3d,
  const VecVector2d &amp;points_2d,
  const Mat &amp;K,
  Sophus::SE3d &amp;pose
);

// BA by gauss-newton
void bundleAdjustmentGaussNewton(
  const VecVector3d &amp;points_3d,
  const VecVector2d &amp;points_2d,
  const Mat &amp;K,
  Sophus::SE3d &amp;pose
);

int main(int argc, char **argv) {
  if (argc != 5) {
    cout &lt;&lt; "usage: pose_estimation_3d2d img1 img2 depth1 depth2" &lt;&lt; endl;
    return 1;
  }
  //-- 读取图像
  Mat img_1 = imread(argv[1], CV_LOAD_IMAGE_COLOR);
  Mat img_2 = imread(argv[2], CV_LOAD_IMAGE_COLOR);
  assert(img_1.data &amp;&amp; img_2.data &amp;&amp; "Can not load images!");

  vector<keypoint> keypoints_1, keypoints_2;
  vector<dmatch> matches;
  find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
  cout &lt;&lt; "一共找到了" &lt;&lt; matches.size() &lt;&lt; "组匹配点" &lt;&lt; endl;

  // 建立3D点
  Mat d1 = imread(argv[3], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
  Mat K = (Mat_<double>(3, 3) &lt;&lt; 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
  vector<point3f> pts_3d;
  vector<point2f> pts_2d;
  for (DMatch m:matches) {
    ushort d = d1.ptr<unsigned short="">(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)];
    if (d == 0)   // bad depth
      continue;
    float dd = d / 5000.0;
    Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
    pts_3d.push_back(Point3f(p1.x * dd, p1.y * dd, dd));
    pts_2d.push_back(keypoints_2[m.trainIdx].pt);
  }

  cout &lt;&lt; "3d-2d pairs: " &lt;&lt; pts_3d.size() &lt;&lt; endl;

  chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
  Mat r, t;
  solvePnP(pts_3d, pts_2d, K, Mat(), r, t, false); // 调用OpenCV 的 PnP 求解,可选择EPNP,DLS等方法
  Mat R;
  cv::Rodrigues(r, R); // r为旋转向量形式,用Rodrigues公式转换为矩阵
  chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
  chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>&gt;(t2 - t1);
  cout &lt;&lt; "solve pnp in opencv cost time: " &lt;&lt; time_used.count() &lt;&lt; " seconds." &lt;&lt; endl;

  cout &lt;&lt; "R=" &lt;&lt; endl &lt;&lt; R &lt;&lt; endl;
  cout &lt;&lt; "t=" &lt;&lt; endl &lt;&lt; t &lt;&lt; endl;

  VecVector3d pts_3d_eigen;
  VecVector2d pts_2d_eigen;
  for (size_t i = 0; i &lt; pts_3d.size(); ++i) {
    pts_3d_eigen.push_back(Eigen::Vector3d(pts_3d[i].x, pts_3d[i].y, pts_3d[i].z));
    pts_2d_eigen.push_back(Eigen::Vector2d(pts_2d[i].x, pts_2d[i].y));
  }

  cout &lt;&lt; "calling bundle adjustment by gauss newton" &lt;&lt; endl;
  Sophus::SE3d pose_gn;
  t1 = chrono::steady_clock::now();
  bundleAdjustmentGaussNewton(pts_3d_eigen, pts_2d_eigen, K, pose_gn);
  t2 = chrono::steady_clock::now();
  time_used = chrono::duration_cast<chrono::duration<double>&gt;(t2 - t1);
  cout &lt;&lt; "solve pnp by gauss newton cost time: " &lt;&lt; time_used.count() &lt;&lt; " seconds." &lt;&lt; endl;

  cout &lt;&lt; "calling bundle adjustment by g2o" &lt;&lt; endl;
  Sophus::SE3d pose_g2o;
  t1 = chrono::steady_clock::now();
  bundleAdjustmentG2O(pts_3d_eigen, pts_2d_eigen, K, pose_g2o);
  t2 = chrono::steady_clock::now();
  time_used = chrono::duration_cast<chrono::duration<double>&gt;(t2 - t1);
  cout &lt;&lt; "solve pnp by g2o cost time: " &lt;&lt; time_used.count() &lt;&lt; " seconds." &lt;&lt; endl;
  return 0;
}

void find_feature_matches(const Mat &amp;img_1, const Mat &amp;img_2,
                          std::vector<keypoint> &amp;keypoints_1,
                          std::vector<keypoint> &amp;keypoints_2,
                          std::vector<dmatch> &amp;matches) {
  //-- 初始化
  Mat descriptors_1, descriptors_2;
  // used in OpenCV3
  Ptr<featuredetector> detector = ORB::create();
  Ptr<descriptorextractor> descriptor = ORB::create();
  // use this if you are in OpenCV2
  // Ptr<featuredetector> detector = FeatureDetector::create ( "ORB" );
  // Ptr<descriptorextractor> descriptor = DescriptorExtractor::create ( "ORB" );
  Ptr<descriptormatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
  //-- 第一步:检测 Oriented FAST 角点位置
  detector-&gt;detect(img_1, keypoints_1);
  detector-&gt;detect(img_2, keypoints_2);

  //-- 第二步:根据角点位置计算 BRIEF 描述子
  descriptor-&gt;compute(img_1, keypoints_1, descriptors_1);
  descriptor-&gt;compute(img_2, keypoints_2, descriptors_2);

  //-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
  vector<dmatch> match;
  // BFMatcher matcher ( NORM_HAMMING );
  matcher-&gt;match(descriptors_1, descriptors_2, match);

  //-- 第四步:匹配点对筛选
  double min_dist = 10000, max_dist = 0;

  //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
  for (int i = 0; i &lt; descriptors_1.rows; i++) {
    double dist = match[i].distance;
    if (dist &lt; min_dist) min_dist = dist;
    if (dist &gt; max_dist) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist);
  printf("-- Min dist : %f \n", min_dist);

  //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
  for (int i = 0; i &lt; descriptors_1.rows; i++) {
    if (match[i].distance &lt;= max(2 * min_dist, 30.0)) {
      matches.push_back(match[i]);
    }
  }
}

Point2d pixel2cam(const Point2d &amp;p, const Mat &amp;K) {
  return Point2d
    (
      (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
      (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
    );
}

void bundleAdjustmentGaussNewton(
  const VecVector3d &amp;points_3d,
  const VecVector2d &amp;points_2d,
  const Mat &amp;K,
  Sophus::SE3d &amp;pose) {
  typedef Eigen::Matrix<double, 6,="" 1=""> Vector6d;
  const int iterations = 10;
  double cost = 0, lastCost = 0;
  double fx = K.at<double>(0, 0);
  double fy = K.at<double>(1, 1);
  double cx = K.at<double>(0, 2);
  double cy = K.at<double>(1, 2);

  for (int iter = 0; iter &lt; iterations; iter++) {
    Eigen::Matrix<double, 6,="" 6=""> H = Eigen::Matrix<double, 6,="" 6="">::Zero();
    Vector6d b = Vector6d::Zero();

    cost = 0;
    // compute cost
    for (int i = 0; i &lt; points_3d.size(); i++) {
      Eigen::Vector3d pc = pose * points_3d[i];
      double inv_z = 1.0 / pc[2];
      double inv_z2 = inv_z * inv_z;
      Eigen::Vector2d proj(fx * pc[0] / pc[2] + cx, fy * pc[1] / pc[2] + cy);

      Eigen::Vector2d e = points_2d[i] - proj;

      cost += e.squaredNorm();
      Eigen::Matrix<double, 2,="" 6=""> J;
      J &lt;&lt; -fx * inv_z,
        0,
        fx * pc[0] * inv_z2,
        fx * pc[0] * pc[1] * inv_z2,
        -fx - fx * pc[0] * pc[0] * inv_z2,
        fx * pc[1] * inv_z,
        0,
        -fy * inv_z,
        fy * pc[1] * inv_z2,
        fy + fy * pc[1] * pc[1] * inv_z2,
        -fy * pc[0] * pc[1] * inv_z2,
        -fy * pc[0] * inv_z;

      H += J.transpose() * J;
      b += -J.transpose() * e;
    }

    Vector6d dx;
    dx = H.ldlt().solve(b);

    if (isnan(dx[0])) {
      cout &lt;&lt; "result is nan!" &lt;&lt; endl;
      break;
    }

    if (iter &gt; 0 &amp;&amp; cost &gt;= lastCost) {
      // cost increase, up<ickey>date is not good
      cout &lt;&lt; "cost: " &lt;&lt; cost &lt;&lt; ", last cost: " &lt;&lt; lastCost &lt;&lt; endl;
      break;
    }

    // precision(12) &lt;&lt; cost &lt;&lt; endl;
    if (dx.norm() &lt; 1e-6) {
      // converge
      break;
    }
  }

  cout &lt;&lt; "pose by g-n: \n" &lt;&lt; pose.matrix() &lt;&lt; endl;
}

/// vertex and edges used in g2o ba
class VertexPose : public g2o::BaseVertex&lt;6, Sophus::SE3d&gt; {
public:
  EIGEN_MAKE_ALIGNED_OPERATOR_NEW;

  virtual void setToOriginImpl() override {
    _estimate = Sophus::SE3d();
  }

  /// left multiplication on SE3
  virtual void oplusImpl(const double *update) override {
    Eigen::Matrix<double, 6,="" 1=""> update_eigen;
    update_eigen &lt;&lt; update[0], update[1], update[2], update[3], update[4], update[5];
    _estimate = Sophus::SE3d::exp(update_eigen) * _estimate;
  }

  virtual bool read(istream &amp;in) override {}

  virtual bool write(ostream &amp;out) const override {}
};

class EdgeProjection : public g2o::BaseUnaryEdge&lt;2, Eigen::Vector2d, VertexPose&gt; {
public:
  EIGEN_MAKE_ALIGNED_OPERATOR_NEW;

  EdgeProjection(const Eigen::Vector3d &amp;pos, const Eigen::Matrix3d &amp;K) : _pos3d(pos), _K(K) {}

  virtual void computeError() override {
    const VertexPose *v = static_cast<vertexpose *=""> (_vertices[0]);
    Sophus::SE3d T = v-&gt;estimate();
    Eigen::Vector3d pos_pixel = _K * (T * _pos3d);
    pos_pixel /= pos_pixel[2];
    _error = _measurement - pos_pixel.head&lt;2&gt;();
  }

  virtual void linearizeOplus() override {
    const VertexPose *v = static_cast<vertexpose *=""> (_vertices[0]);
    Sophus::SE3d T = v-&gt;estimate();
    Eigen::Vector3d pos_cam = T * _pos3d;
    double fx = _K(0, 0);
    double fy = _K(1, 1);
    double cx = _K(0, 2);
    double cy = _K(1, 2);
    double X = pos_cam[0];
    double Y = pos_cam[1];
    double Z = pos_cam[2];
    double Z2 = Z * Z;
    _jacobianOplusXi
      &lt;&lt; -fx / Z, 0, fx * X / Z2, fx * X * Y / Z2, -fx - fx * X * X / Z2, fx * Y / Z,
      0, -fy / Z, fy * Y / (Z * Z), fy + fy * Y * Y / Z2, -fy * X * Y / Z2, -fy * X / Z;
  }

  virtual bool read(istream &amp;in) override {}

  virtual bool write(ostream &amp;out) const override {}

private:
  Eigen::Vector3d _pos3d;
  Eigen::Matrix3d _K;
};

void bundleAdjustmentG2O(
  const VecVector3d &amp;points_3d,
  const VecVector2d &amp;points_2d,
  const Mat &amp;K,
  Sophus::SE3d &amp;pose) {

  // 构建图优化,先设定g2o
  typedef g2o::BlockSolver<g2o::blocksolvertraits<6, 3="">&gt; BlockSolverType;  // pose is 6, landmark is 3
  typedef g2o::LinearSolverDense<blocksolvertype::posematrixtype> LinearSolverType; // 线性求解器类型
  // 梯度下降方法,可以从GN, LM, DogLeg 中选
  auto solver = new g2o::OptimizationAlgorithmGaussNewton(
    g2o::make_unique<blocksolvertype>(g2o::make_unique<linearsolvertype>()));
  g2o::SparseOptimizer optimizer;     // 图模型
  optimizer.setAlgorithm(solver);   // 设置求解器
  optimizer.setVerbose(true);       // 打开调试输出

  // vertex
  VertexPose *vertex_pose = new VertexPose(); // camera vertex_pose
  vertex_pose-&gt;setId(0);
  vertex_pose-&gt;setEstimate(Sophus::SE3d());
  optimizer.addVertex(vertex_pose);

  // K
  Eigen::Matrix3d K_eigen;
  K_eigen &lt;&lt;
          K.at<double>(0, 0), K.at<double>(0, 1), K.at<double>(0, 2),
    K.at<double>(1, 0), K.at<double>(1, 1), K.at<double>(1, 2),
    K.at<double>(2, 0), K.at<double>(2, 1), K.at<double>(2, 2);

  // edges
  int index = 1;
  for (size_t i = 0; i &lt; points_2d.size(); ++i) {
    auto p2d = points_2d[i];
    auto p3d = points_3d[i];
    EdgeProjection *edge = new EdgeProjection(p3d, K_eigen);
    edge-&gt;setId(index);
    edge-&gt;setVertex(0, vertex_pose);
    edge-&gt;setMeasurement(p2d);
    edge-&gt;setInformation(Eigen::Matrix2d::Identity());
    optimizer.addEdge(edge);
    index++;
  }

  chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
  optimizer.setVerbose(true);
  optimizer.initializeOptimization();
  optimizer.optimize(10);
  chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
  chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>&gt;(t2 - t1);
  cout &lt;&lt; "optimization costs time: " &lt;&lt; time_used.count() &lt;&lt; " seconds." &lt;&lt; endl;
  cout &lt;&lt; "pose estimated by g2o =\n" &lt;&lt; vertex_pose-&gt;estimate().matrix() &lt;&lt; endl;
  pose = vertex_pose-&gt;estimate();
}

六、总结

光束平差是常用的优方法之一,重投影问题也是各大算法会采用的办法。希望大家可以深刻领会这里面的算法思想。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值