【SLAM】G2OBA优化及ICP的学习实践

1.G2O

在之前的章节我已经讲过了G2O(General Graph Optimization)的相关原理以及应用,这次的BA优化问题呢,我们也可以转化为一个G2O来求解。

G2O(General Graph Optimization)———— 通用图优化。

G2O(General Graph Optimization)的核里带有种类多样的求解器,而它的顶点、边的类型也是多种多样。我们可以自己定义顶点和边。总的来说,如果一个优化问题能够表达成图(顶点与边),那么这个问题就可以用G2O(General Graph Optimization)去求解它。常见的,比如bundle adjustment(这里的BA优化),ICP,数据拟合,都可以用G2O(General Graph Optimization)来做。

如何使用图优化进行BA求解呢?

对于图优化问题,首先需要定义一个顶点和一个边。这里呢,我们使用顶点为第二个相机帧的位姿作为顶点,即节点。对于边呢,就是定义相机种的三维点在第二个相机帧的投影作为边。

2.BA实践



void find_feature_matches(
  const Mat &img_1, const Mat &img_2,
  std::vector<keypoint> &amp;keypoints_1,
  std::vector<keypoint> &amp;keypoints_2,
  std::vector<dmatch> &amp;matches);

// 像素坐标转相机归一化坐标
Point2d pixel2cam(const Point2d &amp;p, const Mat &amp;K);

// BA by g2o
typedef vector<eigen::vector2d, eigen::aligned_allocator<eigen::vector2d="">&gt; VecVector2d;
typedef vector<eigen::vector3d, eigen::aligned_allocator<eigen::vector3d="">&gt; VecVector3d;

void bundleAdjustmentG2O(
  const VecVector3d &amp;points_3d,
  const VecVector2d &amp;points_2d,
  const Mat &amp;K,
  Sophus::SE3d &amp;pose
);

// BA by gauss-newton
void bundleAdjustmentGaussNewton(
  const VecVector3d &amp;points_3d,
  const VecVector2d &amp;points_2d,
  const Mat &amp;K,
  Sophus::SE3d &amp;pose
);

int main(int argc, char **argv) {
  if (argc != 5) {
    cout &lt;&lt; "usage: pose_estimation_3d2d img1 img2 depth1 depth2" &lt;&lt; endl;
    return 1;
  }
  //-- 读取图像
  Mat img_1 = imread(argv[1], CV_LOAD_IMAGE_COLOR);
  Mat img_2 = imread(argv[2], CV_LOAD_IMAGE_COLOR);
  assert(img_1.data &amp;&amp; img_2.data &amp;&amp; "Can not load images!");

  vector<keypoint> keypoints_1, keypoints_2;
  vector<dmatch> matches;
  find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
  cout &lt;&lt; "一共找到了" &lt;&lt; matches.size() &lt;&lt; "组匹配点" &lt;&lt; endl;

  // 建立3D点
  Mat d1 = imread(argv[3], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
  Mat K = (Mat_<double>(3, 3) &lt;&lt; 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
  vector<point3f> pts_3d;
  vector<point2f> pts_2d;
  for (DMatch m:matches) {
    ushort d = d1.ptr<unsigned short="">(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)];
    if (d == 0)   // bad depth
      continue;
    float dd = d / 5000.0;
    Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
    pts_3d.push_back(Point3f(p1.x * dd, p1.y * dd, dd));
    pts_2d.push_back(keypoints_2[m.trainIdx].pt);
  }

  cout &lt;&lt; "3d-2d pairs: " &lt;&lt; pts_3d.size() &lt;&lt; endl;

  chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
  Mat r, t;
  solvePnP(pts_3d, pts_2d, K, Mat(), r, t, false); // 调用OpenCV 的 PnP 求解,可选择EPNP,DLS等方法
  Mat R;
  cv::Rodrigues(r, R); // r为旋转向量形式,用Rodrigues公式转换为矩阵
  chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
  chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>&gt;(t2 - t1);
  cout &lt;&lt; "solve pnp in opencv cost time: " &lt;&lt; time_used.count() &lt;&lt; " seconds." &lt;&lt; endl;

  cout &lt;&lt; "R=" &lt;&lt; endl &lt;&lt; R &lt;&lt; endl;
  cout &lt;&lt; "t=" &lt;&lt; endl &lt;&lt; t &lt;&lt; endl;

  VecVector3d pts_3d_eigen;
  VecVector2d pts_2d_eigen;
  for (size_t i = 0; i &lt; pts_3d.size(); ++i) {
    pts_3d_eigen.push_back(Eigen::Vector3d(pts_3d[i].x, pts_3d[i].y, pts_3d[i].z));
    pts_2d_eigen.push_back(Eigen::Vector2d(pts_2d[i].x, pts_2d[i].y));
  }

  cout &lt;&lt; "calling bundle adjustment by gauss newton" &lt;&lt; endl;
  Sophus::SE3d pose_gn;
  t1 = chrono::steady_clock::now();
  bundleAdjustmentGaussNewton(pts_3d_eigen, pts_2d_eigen, K, pose_gn);
  t2 = chrono::steady_clock::now();
  time_used = chrono::duration_cast<chrono::duration<double>&gt;(t2 - t1);
  cout &lt;&lt; "solve pnp by gauss newton cost time: " &lt;&lt; time_used.count() &lt;&lt; " seconds." &lt;&lt; endl;

  cout &lt;&lt; "calling bundle adjustment by g2o" &lt;&lt; endl;
  Sophus::SE3d pose_g2o;
  t1 = chrono::steady_clock::now();
  bundleAdjustmentG2O(pts_3d_eigen, pts_2d_eigen, K, pose_g2o);
  t2 = chrono::steady_clock::now();
  time_used = chrono::duration_cast<chrono::duration<double>&gt;(t2 - t1);
  cout &lt;&lt; "solve pnp by g2o cost time: " &lt;&lt; time_used.count() &lt;&lt; " seconds." &lt;&lt; endl;
  return 0;
}


Point2d pixel2cam(const Point2d &amp;p, const Mat &amp;K) {
  return Point2d
    (
      (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
      (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
    );
}

void bundleAdjustmentGaussNewton(
  const VecVector3d &amp;points_3d,
  const VecVector2d &amp;points_2d,
  const Mat &amp;K,
  Sophus::SE3d &amp;pose) {
  typedef Eigen::Matrix<double, 6,="" 1=""> Vector6d;
  const int iterations = 10;
  double cost = 0, lastCost = 0;
  double fx = K.at<double>(0, 0);
  double fy = K.at<double>(1, 1);
  double cx = K.at<double>(0, 2);
  double cy = K.at<double>(1, 2);

  for (int iter = 0; iter &lt; iterations; iter++) {
    Eigen::Matrix<double, 6,="" 6=""> H = Eigen::Matrix<double, 6,="" 6="">::Zero();
    Vector6d b = Vector6d::Zero();

    cost = 0;
    // compute cost
    for (int i = 0; i &lt; points_3d.size(); i++) {
      Eigen::Vector3d pc = pose * points_3d[i];
      double inv_z = 1.0 / pc[2];
      double inv_z2 = inv_z * inv_z;
      Eigen::Vector2d proj(fx * pc[0] / pc[2] + cx, fy * pc[1] / pc[2] + cy);

      Eigen::Vector2d e = points_2d[i] - proj;

      cost += e.squaredNorm();
      Eigen::Matrix<double, 2,="" 6=""> J;
      J &lt;&lt; -fx * inv_z,
        0,
        fx * pc[0] * inv_z2,
        fx * pc[0] * pc[1] * inv_z2,
        -fx - fx * pc[0] * pc[0] * inv_z2,
        fx * pc[1] * inv_z,
        0,
        -fy * inv_z,
        fy * pc[1] * inv_z2,
        fy + fy * pc[1] * pc[1] * inv_z2,
        -fy * pc[0] * pc[1] * inv_z2,
        -fy * pc[0] * inv_z;

      H += J.transpose() * J;
      b += -J.transpose() * e;
    }

    Vector6d dx;
    dx = H.ldlt().solve(b);

    if (isnan(dx[0])) {
      cout &lt;&lt; "result is nan!" &lt;&lt; endl;
      break;
    }

    if (iter &gt; 0 &amp;&amp; cost &gt;= lastCost) {
      // cost increase, up<ickey>date is not good
      cout &lt;&lt; "cost: " &lt;&lt; cost &lt;&lt; ", last cost: " &lt;&lt; lastCost &lt;&lt; endl;
      break;
    }

    // precision(12) &lt;&lt; cost &lt;&lt; endl;
    if (dx.norm() &lt; 1e-6) {
      // converge
      break;
    }
  }

  cout &lt;&lt; "pose by g-n: \n" &lt;&lt; pose.matrix() &lt;&lt; endl;
}

/// vertex and edges used in g2o ba
class VertexPose : public g2o::BaseVertex&lt;6, Sophus::SE3d&gt; {
public:
  EIGEN_MAKE_ALIGNED_OPERATOR_NEW;

  virtual void setToOriginImpl() override {
    _estimate = Sophus::SE3d();
  }

  /// left multiplication on SE3
  virtual void oplusImpl(const double *update) override {
    Eigen::Matrix<double, 6,="" 1=""> update_eigen;
    update_eigen &lt;&lt; update[0], update[1], update[2], update[3], update[4], update[5];
    _estimate = Sophus::SE3d::exp(update_eigen) * _estimate;
  }

  virtual bool read(istream &amp;in) override {}

  virtual bool write(ostream &amp;out) const override {}
};

class EdgeProjection : public g2o::BaseUnaryEdge&lt;2, Eigen::Vector2d, VertexPose&gt; {
public:
  EIGEN_MAKE_ALIGNED_OPERATOR_NEW;

  EdgeProjection(const Eigen::Vector3d &amp;pos, const Eigen::Matrix3d &amp;K) : _pos3d(pos), _K(K) {}

  virtual void computeError() override {
    const VertexPose *v = static_cast<vertexpose *=""> (_vertices[0]);
    Sophus::SE3d T = v-&gt;estimate();
    Eigen::Vector3d pos_pixel = _K * (T * _pos3d);
    pos_pixel /= pos_pixel[2];
    _error = _measurement - pos_pixel.head&lt;2&gt;();
  }

  virtual void linearizeOplus() override {
    const VertexPose *v = static_cast<vertexpose *=""> (_vertices[0]);
    Sophus::SE3d T = v-&gt;estimate();
    Eigen::Vector3d pos_cam = T * _pos3d;
    double fx = _K(0, 0);
    double fy = _K(1, 1);
    double cx = _K(0, 2);
    double cy = _K(1, 2);
    double X = pos_cam[0];
    double Y = pos_cam[1];
    double Z = pos_cam[2];
    double Z2 = Z * Z;
    _jacobianOplusXi
      &lt;&lt; -fx / Z, 0, fx * X / Z2, fx * X * Y / Z2, -fx - fx * X * X / Z2, fx * Y / Z,
      0, -fy / Z, fy * Y / (Z * Z), fy + fy * Y * Y / Z2, -fy * X * Y / Z2, -fy * X / Z;
  }

  virtual bool read(istream &amp;in) override {}

  virtual bool write(ostream &amp;out) const override {}

private:
  Eigen::Vector3d _pos3d;
  Eigen::Matrix3d _K;
};

void bundleAdjustmentG2O(
  const VecVector3d &amp;points_3d,
  const VecVector2d &amp;points_2d,
  const Mat &amp;K,
  Sophus::SE3d &amp;pose) {

  // 构建图优化,先设定g2o
  typedef g2o::BlockSolver<g2o::blocksolvertraits<6, 3="">&gt; BlockSolverType;  // pose is 6, landmark is 3
  typedef g2o::LinearSolverDense<blocksolvertype::posematrixtype> LinearSolverType; // 线性求解器类型
  // 梯度下降方法,可以从GN, LM, DogLeg 中选
  auto solver = new g2o::OptimizationAlgorithmGaussNewton(
    g2o::make_unique<blocksolvertype>(g2o::make_unique<linearsolvertype>()));
  g2o::SparseOptimizer optimizer;     // 图模型
  optimizer.setAlgorithm(solver);   // 设置求解器
  optimizer.setVerbose(true);       // 打开调试输出

  // vertex
  VertexPose *vertex_pose = new VertexPose(); // camera vertex_pose
  vertex_pose-&gt;setId(0);
  vertex_pose-&gt;setEstimate(Sophus::SE3d());
  optimizer.addVertex(vertex_pose);

  // K
  Eigen::Matrix3d K_eigen;
  K_eigen &lt;&lt;
          K.at<double>(0, 0), K.at<double>(0, 1), K.at<double>(0, 2),
    K.at<double>(1, 0), K.at<double>(1, 1), K.at<double>(1, 2),
    K.at<double>(2, 0), K.at<double>(2, 1), K.at<double>(2, 2);

  // edges
  int index = 1;
  for (size_t i = 0; i &lt; points_2d.size(); ++i) {
    auto p2d = points_2d[i];
    auto p3d = points_3d[i];
    EdgeProjection *edge = new EdgeProjection(p3d, K_eigen);
    edge-&gt;setId(index);
    edge-&gt;setVertex(0, vertex_pose);
    edge-&gt;setMeasurement(p2d);
    edge-&gt;setInformation(Eigen::Matrix2d::Identity());
    optimizer.addEdge(edge);
    index++;
  }

  chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
  optimizer.setVerbose(true);
  optimizer.initializeOptimization();
  optimizer.optimize(10);
  chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
  chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>&gt;(t2 - t1);
  cout &lt;&lt; "optimization costs time: " &lt;&lt; time_used.count() &lt;&lt; " seconds." &lt;&lt; endl;
  cout &lt;&lt; "pose estimated by g2o =\n" &lt;&lt; vertex_pose-&gt;estimate().matrix() &lt;&lt; endl;
  pose = vertex_pose-&gt;estimate();
}

3.ICP

ICP(Iterative Closest Point)———— 迭代最近点。这是一个3D-3D点的优化问题,具体就是在三维点知道了如何匹配特征点,通过三维特征点匹配出相关的位姿优化问题。所以ICP(Iterative Closest Point)可以是经常在激光雷达的SLAM构建中经常遇到。

对于ICP(Iterative Closest Point)问题的求解,在学术领域主要分为2种类型:第一:使用奇异值分解求解;第二:使用非线性优化求解。

> ICP算法流程
首先对于一幅点云中的每个点,在另一幅点云中计算匹配点(最近点)
极小化匹配点间的匹配误差,计算位姿
然后将计算的位姿作用于点云
再重新计算匹配点
如此迭代,直到迭代次数达到阈值,或者极小化的能量函数变化量小于设定阈值

4.奇异值求解

对于使用奇异值求解的办法,即SVD方法。首先对于已经知道的2个三维点进行投影误差的计算,构建最小二乘问题。分别求取旋转矩阵,在求取平移矩阵。

5.非线性求解

我们依然构建三维点误差的最小二乘估计,通过之前介绍的PNP类似的办法求取优化。如果使用李群李代数,则在求导时用李群李代数的扰动模型。

6.ICP实践



/// g2o edge
class EdgeProjectXYZRGBDPoseOnly : public g2o::BaseUnaryEdge&lt;3, Eigen::Vector3d, VertexPose&gt; {
public:
  EIGEN_MAKE_ALIGNED_OPERATOR_NEW;

  EdgeProjectXYZRGBDPoseOnly(const Eigen::Vector3d &amp;point) : _point(point) {}

  virtual void computeError() override {
    const VertexPose *pose = static_cast<const vertexpose="" *=""> ( _vertices[0] );
    _error = _measurement - pose-&gt;estimate() * _point;
  }

  virtual void linearizeOplus() override {
    VertexPose *pose = static_cast<vertexpose *="">(_vertices[0]);
    Sophus::SE3d T = pose-&gt;estimate();
    Eigen::Vector3d xyz_trans = T * _point;
    _jacobianOplusXi.block&lt;3, 3&gt;(0, 0) = -Eigen::Matrix3d::Identity();
    _jacobianOplusXi.block&lt;3, 3&gt;(0, 3) = Sophus::SO3d::hat(xyz_trans);
  }

  bool read(istream &amp;in) {}

  bool write(ostream &amp;out) const {}

protected:
  Eigen::Vector3d _point;
};

int main(int argc, char **argv) {

  vector<keypoint> keypoints_1, keypoints_2;
  vector<dmatch> matches;
  find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
  cout &lt;&lt; "一共找到了" &lt;&lt; matches.size() &lt;&lt; "组匹配点" &lt;&lt; endl;

  // 建立3D点
  Mat depth1 = imread(argv[3], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
  Mat depth2 = imread(argv[4], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
  Mat K = (Mat_<double>(3, 3) &lt;&lt; 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
  vector<point3f> pts1, pts2;

  for (DMatch m:matches) {
    ushort d1 = depth1.ptr<unsigned short="">(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)];
    ushort d2 = depth2.ptr<unsigned short="">(int(keypoints_2[m.trainIdx].pt.y))[int(keypoints_2[m.trainIdx].pt.x)];
  }

  cout &lt;&lt; "3d-3d pairs: " &lt;&lt; pts1.size() &lt;&lt; endl;
  Mat R, t;
  pose_estimation_3d3d(pts1, pts2, R, t);

  cout &lt;&lt; "calling bundle adjustment" &lt;&lt; endl;

  bundleAdjustment(pts1, pts2, R, t);

  }
}

void find_feature_matches(const Mat &amp;img_1, const Mat &amp;img_2,
                          std::vector<keypoint> &amp;keypoints_1,
                          std::vector<keypoint> &amp;keypoints_2,
                          std::vector<dmatch> &amp;matches) {
  //-- 初始化
  Mat descriptors_1, descriptors_2;
  // used in OpenCV3
  Ptr<featuredetector> detector = ORB::create();
  Ptr<descriptorextractor> descriptor = ORB::create();
  // use this if you are in OpenCV2
  // Ptr<featuredetector> detector = FeatureDetector::create ( "ORB" );
  // Ptr<descriptorextractor> descriptor = DescriptorExtractor::create ( "ORB" );
  Ptr<descriptormatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
  //-- 第一步:检测 Oriented FAST 角点位置
  detector-&gt;detect(img_1, keypoints_1);
  detector-&gt;detect(img_2, keypoints_2);

  //-- 第二步:根据角点位置计算 BRIEF 描述子
  descriptor-&gt;compute(img_1, keypoints_1, descriptors_1);
  descriptor-&gt;compute(img_2, keypoints_2, descriptors_2);

  //-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
  vector<dmatch> match;
  // BFMatcher matcher ( NORM_HAMMING );
  matcher-&gt;match(descriptors_1, descriptors_2, match);

  //-- 第四步:匹配点对筛选
  double min_dist = 10000, max_dist = 0;

  //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
  for (int i = 0; i &lt; descriptors_1.rows; i++) {
    double dist = match[i].distance;
    if (dist &lt; min_dist) min_dist = dist;
    if (dist &gt; max_dist) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist);
  printf("-- Min dist : %f \n", min_dist);

  //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
  for (int i = 0; i &lt; descriptors_1.rows; i++) {
    if (match[i].distance &lt;= max(2 * min_dist, 30.0)) {
      matches.push_back(match[i]);
    }
  }
}


void pose_estimation_3d3d(const vector<point3f> &amp;pts1,
                          const vector<point3f> &amp;pts2,
                          Mat &amp;R, Mat &amp;t) {
  Point3f p1, p2;     // center of mass
  int N = pts1.size();
  for (int i = 0; i &lt; N; i++) {
    p1 += pts1[i];
    p2 += pts2[i];
  }
  p1 = Point3f(Vec3f(p1) / N);
  p2 = Point3f(Vec3f(p2) / N);
  vector<point3f> q1(N), q2(N); // remove the center
  for (int i = 0; i &lt; N; i++) {
    q1[i] = pts1[i] - p1;
    q2[i] = pts2[i] - p2;
  }

  // compute q1*q2^T
  Eigen::Matrix3d W = Eigen::Matrix3d::Zero();
  for (int i = 0; i &lt; N; i++) {
    W += Eigen::Vector3d(q1[i].x, q1[i].y, q1[i].z) * Eigen::Vector3d(q2[i].x, q2[i].y, q2[i].z).transpose();
  }
  cout &lt;&lt; "W=" &lt;&lt; W &lt;&lt; endl;

  // SVD on W
  Eigen::JacobiSVD<eigen::matrix3d> svd(W, Eigen::ComputeFullU | Eigen::ComputeFullV);
  Eigen::Matrix3d U = svd.matrixU();
  Eigen::Matrix3d V = svd.matrixV();

  cout &lt;&lt; "U=" &lt;&lt; U &lt;&lt; endl;
  cout &lt;&lt; "V=" &lt;&lt; V &lt;&lt; endl;

  Eigen::Matrix3d R_ = U * (V.transpose());
  if (R_.determinant() &lt; 0) {
    R_ = -R_;
  }
  Eigen::Vector3d t_ = Eigen::Vector3d(p1.x, p1.y, p1.z) - R_ * Eigen::Vector3d(p2.x, p2.y, p2.z);

  // convert to cv::Mat
  R = (Mat_<double>(3, 3) &lt;&lt;
    R_(0, 0), R_(0, 1), R_(0, 2),
    R_(1, 0), R_(1, 1), R_(1, 2),
    R_(2, 0), R_(2, 1), R_(2, 2)
  );
  t = (Mat_<double>(3, 1) &lt;&lt; t_(0, 0), t_(1, 0), t_(2, 0));
}

void bundleAdjustment(
  const vector<point3f> &amp;pts1,
  const vector<point3f> &amp;pts2,
  Mat &amp;R, Mat &amp;t) {
  // 构建图优化,先设定g2o
  typedef g2o::BlockSolverX BlockSolverType;
  typedef g2o::LinearSolverDense<blocksolvertype::posematrixtype> LinearSolverType; // 线性求解器类型
  // 梯度下降方法,可以从GN, LM, DogLeg 中选
  auto solver = new g2o::OptimizationAlgorithmLevenberg(
    g2o::make_unique<blocksolvertype>(g2o::make_unique<linearsolvertype>()));
  g2o::SparseOptimizer optimizer;     // 图模型
  optimizer.setAlgorithm(solver);   // 设置求解器
  optimizer.setVerbose(true);       // 打开调试输出

  // vertex
  VertexPose *pose = new VertexPose(); // camera pose
  pose-&gt;setId(0);
  pose-&gt;setEstimate(Sophus::SE3d());
  optimizer.addVertex(pose);

  // edges
  for (size_t i = 0; i &lt; pts1.size(); i++) {
    EdgeProjectXYZRGBDPoseOnly *edge = new EdgeProjectXYZRGBDPoseOnly(
      Eigen::Vector3d(pts2[i].x, pts2[i].y, pts2[i].z));
    edge-&gt;setVertex(0, pose);
    edge-&gt;setMeasurement(Eigen::Vector3d(
      pts1[i].x, pts1[i].y, pts1[i].z));
    edge-&gt;setInformation(Eigen::Matrix3d::Identity());
    optimizer.addEdge(edge);
  }

  chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
  optimizer.initializeOptimization();
  optimizer.optimize(10);
  chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
  chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>&gt;(t2 - t1);
  cout &lt;&lt; "optimization costs time: " &lt;&lt; time_used.count() &lt;&lt; " seconds." &lt;&lt; endl;

  cout &lt;&lt; endl &lt;&lt; "after optimization:" &lt;&lt; endl;
  cout &lt;&lt; "T=\n" &lt;&lt; pose-&gt;estimate().matrix() &lt;&lt; endl;

  // convert to cv::Mat
  Eigen::Matrix3d R_ = pose-&gt;estimate().rotationMatrix();
  Eigen::Vector3d t_ = pose-&gt;estimate().translation();
  R = (Mat_<double>(3, 3) &lt;&lt;
    R_(0, 0), R_(0, 1), R_(0, 2),
    R_(1, 0), R_(1, 1), R_(1, 2),
    R_(2, 0), R_(2, 1), R_(2, 2)
  );
  t = (Mat_<double>(3, 1) &lt;&lt; t_(0, 0), t_(1, 0), t_(2, 0));
}

7.总结

到这里,我们已经介绍完了2D-2D,3D-2D(PNP),3D-3D问题的求解,对于前端的位姿估计想必也有了更深的理解,这里其实还有很多问题,在实际运行的机器人上还会存在各种各样的情况与误差,我在未来会继续讲解这一部分的问题和相关求解。

> 参考资料:视觉SLAM十四讲

  • 1
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值