视觉SLAM十四讲第七讲pnp带上3D点优化的版本
在原先只有相机位姿优化的ba基础上添加了空间3d点的优化
/home/george/LEARN_SLAM/learn_visionSLAM/slambook2/ch7/2.png
/home/george/LEARN_SLAM/learn_visionSLAM/slambook2/ch7/2.png
/home/george/LEARN_SLAM/learn_visionSLAM/slambook2/ch7/1_depth.png
#include <iostream>
#include <algorithm>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <Eigen/Core>
#include <Eigen/Dense>
#include <sophus/se3.hpp>
#include <iomanip>
using namespace std;
using namespace cv;
typedef vector<Eigen::Vector2d, Eigen::aligned_allocator<Eigen::Vector2d>> VecVector2d;
typedef vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d>> VecVector3d;
void find_feature_matches(const Mat &img_1, const Mat &img_2, vector<KeyPoint> &keypoints_1, vector<KeyPoint> &keypoints_2, vector<DMatch> &matches);
Point2d pixel2cam(const Point2d &keypoint, Mat & K);
void myBA(VecVector3d &pts_3d, VecVector2d &pts_2d, const Mat & K, Sophus::SE3d &pose);
int main()
{
Mat img_1 = imread("/home/george/LEARN_SLAM/learn_visionSLAM/slambook2/ch7/1.png", CV_LOAD_IMAGE_COLOR);
Mat img_2 = imread("/home/george/LEARN_SLAM/learn_visionSLAM/slambook2/ch7/2.png", CV_LOAD_IMAGE_COLOR);
assert(img_1.data && img_2.data);
vector<KeyPoint> keypoints1, keypoints2;
vector<DMatch> matches;
find_feature_matches(img_1, img_2, keypoints1, keypoints2, matches);
cout << "一共找到" << matches.size() << "组匹配点" << endl;
Mat d1 = imread("/home/george/LEARN_SLAM/learn_visionSLAM/slambook2/ch7/1_depth.png", CV_LOAD_IMAGE_UNCHANGED);
Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
vector<Point3f> pts_3d;
vector<Point2f> pts_2d;
for(DMatch m : matches)
{
ushort d = d1.ptr<unsigned short>(int(keypoints1[m.queryIdx].pt.y))[int(keypoints1[m.queryIdx].pt.x)];
if (d == 0)
{
cout << "bad depth " << endl;
continue;
}
float dd = d / 5000.0;
Point2d p1 = pixel2cam(keypoints1[m.queryIdx].pt, K);
pts_3d.push_back(Point3f(p1.x * dd, p1.y * dd, dd));
pts_2d.push_back(keypoints2[m.trainIdx].pt);
}
cout << "3d-2d pairs : " << pts_3d.size() << endl;
VecVector3d pts_3d_eigen;
VecVector2d pts_2d_eigen;
for (int i = 0; i < pts_3d.size(); i++)
{
pts_3d_eigen.push_back(Eigen::Vector3d(pts_3d[i].x, pts_3d[i].y, pts_3d[i].z));
pts_2d_eigen.push_back(Eigen::Vector2d(pts_2d[i].x, pts_2d[i].y));
}
cout << "using myBA to slove the problem : " << endl;
Sophus::SE3d pose_gn;
myBA(pts_3d_eigen, pts_2d_eigen, K, pose_gn);
return 0;
}
void find_feature_matches(const Mat &img_1, const Mat &img_2, vector<KeyPoint> &keypoints_1, vector<KeyPoint> &keypoints_2, vector<DMatch> &matches)
{
Mat descriptors_1, descriptors_2;
Ptr<FeatureDetector> detector = ORB::create();
Ptr<DescriptorExtractor> descriptor = ORB::create();
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
detector->detect(img_1, keypoints_1);
detector->detect(img_2, keypoints_2);
descriptor->compute(img_1, keypoints_1, descriptors_1);
descriptor->compute(img_2, keypoints_2, descriptors_2);
vector<DMatch> match;
matcher->match(descriptors_1, descriptors_2, match);
double min_dist = 10000, max_dist = 0;
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = match[i].distance;
if (dist > max_dist) max_dist = dist;
if (dist < min_dist) min_dist = dist;
}
cout << "--Max dist : " << max_dist << endl;
cout << "--Min dist : " << min_dist << endl;
for (int i = 0; i < descriptors_1.rows; i++)
{
if (match[i].distance <= max(2*min_dist, 30.0))
{
matches.push_back(match[i]);
}
}
}
Point2d pixel2cam(const Point2d &keypoint, Mat & K)
{
return Point2d(
(keypoint.x - K.at<double>(0, 2))/K.at<double>(0, 0),
(keypoint.y - K.at<double>(1, 2))/K.at<double>(1, 1)
);
}
void myBA(VecVector3d &pts_3d, VecVector2d&pts_2d, const Mat & K, Sophus::SE3d &pose)
{
typedef Eigen::Matrix<double, 6, 1> Vector6d;
typedef Eigen::Matrix<double, 9, 1> Vector9d;
const int iterators = 10;
double cost = 0, lastcost = 0;
double fx = K.at<double>(0, 0);
double cx = K.at<double>(0, 2);
double fy = K.at<double>(1, 1);
double cy = K.at<double>(1, 2);
int nums_of_points = pts_3d.size();
for (int iter = 0; iter < iterators; iter++)
{
int row_of_H = 6 + 3*nums_of_points;
Eigen::MatrixXd H = Eigen::MatrixXd::Zero(row_of_H, row_of_H);
Eigen::MatrixXd J_total = Eigen::MatrixXd::Zero(2, row_of_H);
Eigen::VectorXd b = Eigen::VectorXd::Zero(row_of_H);
Eigen::Vector2d e = Eigen::Vector2d::Zero();
cost = 0;
for (int i = 0; i < pts_3d.size(); i++)
{
Sophus::SO3d R = pose.rotationMatrix();
Eigen::Matrix<double, 3, 3> R1 = R.matrix();
Eigen::Vector3d pc = pose * pts_3d[i];
double inv_z = 1.0 / pc[2];
double inv_z2 = inv_z * inv_z;
Eigen::Matrix<double, 2, 3> eP;
eP << -fx * inv_z,
0,
fx*pc[0]*inv_z2,
0,
-fy*inv_z,
fy*pc[1]*inv_z2;
Eigen::Matrix<double, 2, 3> J_P;
J_P = eP * R1;
Eigen::Vector2d proj(fx * pc[0]/pc[2] + cx, fy * pc[1]/pc[2] + cy);
e = pts_2d[i] - proj;
cost += e.squaredNorm();
Eigen::Matrix<double, 2, 6> J_se3;
J_se3 << -fx * inv_z,
0,
fx * pc[0] * inv_z2,
fx * pc[0] * pc[1] * inv_z2,
-fx - fx * pc[0] * pc[0] * inv_z2,
fx * pc[1] * inv_z,
0,
-fy * inv_z,
fy * pc[1] * inv_z2,
fy + fy * pc[1] * pc[1] * inv_z2,
-fy * pc[0] * pc[1] * inv_z2,
-fy * pc[0] * inv_z;
J_total.block<2,6>(0,0) += J_se3;
J_total.block<2,3>(0,3*i+6) = J_P;
}
H += J_total.transpose() * J_total;
b += -J_total.transpose() * e;
Eigen::VectorXd dx = Eigen::VectorXd::Zero(row_of_H);
dx = H.ldlt().solve(b);
if (isnan(dx[0]))
{
cout << "result is nan " << endl;
}
if (iter > 0 && cost >= lastcost)
{
cout << "cost : " << cost << ", last cost : " << lastcost << endl;
break;
}
Vector6d d_se3 = dx.block<6,1>(0,0);
pose = Sophus::SE3d::exp(d_se3)*pose;
for (int i = 0; i < pts_3d.size(); i++)
{
Eigen::Vector3d dp = dx.block<3,1>(6+3*i, 0);
pts_3d[i] = pts_3d[i] + dp;
}
lastcost = cost;
cout << "iteration " << iter << " cost = " << std::setprecision(12) << cost << endl;
if (dx.norm() < 1e-6)
{
break;
}
}
cout << "pose by myBA : \n" << pose.matrix() << endl;
cout << "the point pose after estimation : " << endl;
for (int i = 0; i < pts_3d.size(); i++)
{
cout << pts_3d[i].transpose() << endl;
}
}
CMakeLists.txt配置
cmake_minimum_required(VERSION 3.5)
project(gauss_newton1)
#设置c++版本为c++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE "Debug")
#可以自己添加find_package的搜索路径
LIST(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules)
#设置可执行文件的输出路径
#${PROJECT_SOURCE_DIR}这个代表的是源文件对应的文件路径
#${PROJECT_BINARY_DIR}是编译时的文件夹路径
set( EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/bin )
find_package(OpenCV REQUIRED)
find_package(Eigen3 REQUIRED)
find_package(sophus REQUIRED)
include_directories(
${PROJECT_SOURCE_DIR}
${PROJECT_SOURCE_DIR}/include
${OpenCV_INCLUDE_DIRS}
${EIGEN3_INCLUDE_DIR}
${sophus_INCLUDE_DIRS}
)
set(lib
${OpenCV_LIBS}
)
#add_executable(gaussNewton1 ./src/gaussNewton1.cpp)
#target_link_libraries(gaussNewton1 ${lib})
add_executable(pose_estimation_3d2d ./src/pose_estimation_3d2d.cpp)
target_link_libraries(pose_estimation_3d2d ${lib})
文件夹结构
learn_ba
-
bin
-
src
-
pose_estimaton_3d2d.cpp
-
build