使用g2o求解PnP的步骤:
1、定义顶点和边的数据类型
2、提取ORB特征、匹配点对
3、构造顶点、边
4、定义、设置求解器
5、调用求解器的 initializeOptimization 函数求解问题
如果无法链接g2o动态库,可以参考这篇文章的注意事项
https://blog.csdn.net/floatinglong/article/details/116159431
#include <iostream>
using namespace std;
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
using namespace cv;
#include <Eigen/Core>
using namespace Eigen;
#include <chrono>
#include <sophus/se3.hpp>
#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/sparse_optimizer.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/solver.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
//提取ORB特征并匹配
void findAndMatchOrbFeature(const Mat &img1, const Mat &img2,
vector<KeyPoint> &kp1, vector<KeyPoint> &kp2,
vector<DMatch> &matches)
{
Ptr<FeatureDetector> detector = ORB::create();
Ptr<DescriptorExtractor> desc = ORB::create();
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
//检测角点
detector->detect(img1, kp1);
detector->detect(img2, kp2);
//计算描述子
Mat desc1, desc2;
desc->compute(img1, kp1, desc1);
desc->compute(img2, kp2, desc2);
//匹配点对
vector<DMatch> allMatches;
matcher->match(desc1, desc2, allMatches);
//检测误匹配
double minDist = 10000;
for(auto m : allMatches)
if(m.distance < minDist) minDist = m.distance;
double useDist = max(2*minDist, 30.0);
for(auto m : allMatches)
if(m.distance <= useDist)
matches.push_back(m);
}
// 顶点,模板参数:优化变量维度和数据类型
class Vertex : public g2o::BaseVertex<6, Sophus::SE3d> {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
// 重置
virtual void setToOriginImpl() override { //override保留字表示当前函数重写了基类的虚函数
_estimate = Sophus::SE3d();
}
// 更新
virtual void oplusImpl(const double *update) override {
Eigen::Matrix<double, 6, 1> updateVector;
updateVector << update[0], update[1], update[2], update[3], update[4], update[5];
_estimate = Sophus::SE3d::exp(updateVector) * _estimate;
}
// 存盘和读盘:留空
virtual bool read(istream &in) {}
virtual bool write(ostream &out) const {}
};
// 误差模型 模板参数:观测值维度,类型,连接顶点类型
class Edge : public g2o::BaseUnaryEdge<2, Eigen::Vector2d, Vertex> {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
Edge(const Eigen::Vector3d &p3d, const Eigen::Matrix3d &k) : _p3d(p3d), _k(k) {}
// 计算误差
virtual void computeError() override {
const Vertex *v = static_cast<const Vertex *> (_vertices[0]); //static_cast将_vertices[0]的类型强制转换为const Edge *
const Sophus::SE3d T = v->estimate(); //取出顶点优化变量
Eigen::Vector3d tp3d = T * _p3d; //旋转后的空间坐标
tp3d = _k * tp3d / tp3d[2]; //映射到像素平面,前两维是像素坐标,因为这里用到了k的运算,因此k要定义为Eigen库里的类型
Eigen::Vector2d tp2d = tp3d.head<2>();
_error = _measurement - tp2d; //计算误差
}
// 计算雅可比矩阵
virtual void linearizeOplus() override {
const Vertex *v = static_cast<const Vertex *> (_vertices[0]);
const Sophus::SE3d T = v->estimate(); //取出上次估计的T
Eigen::Vector3d tp3d = T * _p3d; //3d点经过旋转
double invZ = 1 / tp3d[2], invZ2 = invZ * invZ;
double fx = _k(0, 0);
double fy = _k(1, 1);
double cx = _k(0, 2);
double cy = _k(1, 2);
_jacobianOplusXi << -fx * invZ,
0,
fx * tp3d[0] * invZ2,
fx * tp3d[0] * tp3d[1] * invZ2,
-fx - fx * tp3d[0] * tp3d[0] * invZ2,
fx * tp3d[1] * invZ,
0,
-fy * invZ,
fy * tp3d[1] * invZ2,
fy + fy * tp3d[1] * tp3d[1] * invZ2,
-fy * tp3d[0] * tp3d[1] * invZ2,
-fy * tp3d[0] * invZ;
}
virtual bool read(istream &in) {}
virtual bool write(ostream &out) const {}
private:
Eigen::Vector3d _p3d;
Eigen::Matrix3d _k;
};
int main(int argc, char **argv)
{
if(argc != 4)
{
cout << "error!" << endl;
return 0;
}
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_COLOR);
Mat img2 = imread(argv[2], CV_LOAD_IMAGE_COLOR);
vector<KeyPoint> kp1, kp2;
vector<DMatch> matches;
findAndMatchOrbFeature(img1, img2, kp1, kp2, matches);
//g2o
typedef g2o::BlockSolver<g2o::BlockSolverTraits<6, 2>> BlockSolverType; //优化变量维度为6,误差维度为2
typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType> LinearSolverType; // 线性求解器类型
//梯度下降方法
auto solver = new g2o::OptimizationAlgorithmGaussNewton(
g2o::make_unique<BlockSolverType>(g2o::make_unique<LinearSolverType>()));
g2o::SparseOptimizer optimizer; // 图模型
optimizer.setAlgorithm(solver); // 设置求解器
optimizer.setVerbose(true); // 打开调试输出
//构造顶点
Vertex *v = new Vertex();
v->setEstimate(Sophus::SE3d());
v->setId(0);
optimizer.addVertex(v);
//取3d-2d点对
Eigen::Matrix3d k;
k << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1;
vector<Eigen::Vector2d, Eigen::aligned_allocator<Eigen::Vector2d>> ps2d;
vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d>> ps3d;
for(auto m : matches)
{
Point2d p2d = kp1[m.queryIdx].pt;
int x = p2d.x, y = p2d.y;
unsigned short pixel = img3.ptr<unsigned short>(y)[x];
if(pixel == 0) continue;
double dPixel = pixel / 5000.0;
Point2d pNor2d = pixelToNor(p2d, k);
ps3d.push_back(Eigen::Vector3d(pNor2d.x*dPixel, pNor2d.y*dPixel, dPixel));
p2d = kp2[m.trainIdx].pt;
x = p2d.x;
y = p2d.y;
ps2d.push_back(Eigen::Vector2d(x, y));
}
//构造边
for (int i = 0; i < (int)ps3d.size(); i++) {
Edge *edge = new Edge(ps3d[i], k);
edge->setId(i);
edge->setVertex(0, v); // 设置连接的顶点
edge->setMeasurement(ps2d[i]); // 观测值
edge->setInformation(Eigen::Matrix2d::Identity()); // 信息矩阵:协方差矩阵之逆,这里没有,填单位阵,维度与误差变量一致
optimizer.addEdge(edge);
}
// 执行优化
optimizer.initializeOptimization();
optimizer.optimize(10); //迭代次数
// 输出优化值
Sophus::SE3d T = v->estimate();
cout << "estimated model: " << endl << T.matrix() << endl;
return 0;
}