视觉SLAM十四讲第七讲pnp带上3D点优化的版本

在原先只有相机位姿优化的ba基础上添加了空间3d点的优化

/home/george/LEARN_SLAM/learn_visionSLAM/slambook2/ch7/2.png //第一帧图像的绝对路径
/home/george/LEARN_SLAM/learn_visionSLAM/slambook2/ch7/2.png//第二帧图像的绝对路径
/home/george/LEARN_SLAM/learn_visionSLAM/slambook2/ch7/1_depth.png//对应第一帧图像的深度图
/*
 * @Author: your name
 * @Date: 2021-01-26 15:50:52
 * @LastEditTime: 2021-01-26 20:46:34
 * @LastEditors: Please set LastEditors
 * @Description: In User Settings Edit
 * @FilePath: /learn_ba/src/pose_estimation_3d2d.cpp
 */
#include <iostream>
#include <algorithm>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <Eigen/Core>
#include <Eigen/Dense>
#include <sophus/se3.hpp>
#include <iomanip>

using namespace std;
using namespace cv;

typedef vector<Eigen::Vector2d, Eigen::aligned_allocator<Eigen::Vector2d>> VecVector2d;
typedef vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d>> VecVector3d;

void find_feature_matches(const Mat &img_1, const Mat &img_2, vector<KeyPoint> &keypoints_1, vector<KeyPoint> &keypoints_2, vector<DMatch> &matches);

Point2d pixel2cam(const Point2d &keypoint, Mat & K);

void myBA(VecVector3d &pts_3d, VecVector2d &pts_2d, const Mat & K, Sophus::SE3d &pose);

int main()
{
    Mat img_1 = imread("/home/george/LEARN_SLAM/learn_visionSLAM/slambook2/ch7/1.png", CV_LOAD_IMAGE_COLOR);
    Mat img_2 = imread("/home/george/LEARN_SLAM/learn_visionSLAM/slambook2/ch7/2.png", CV_LOAD_IMAGE_COLOR);

    assert(img_1.data && img_2.data);

    vector<KeyPoint> keypoints1, keypoints2;
    vector<DMatch> matches;
    find_feature_matches(img_1, img_2, keypoints1, keypoints2, matches);
    cout << "一共找到" << matches.size() << "组匹配点" << endl;

    Mat d1 = imread("/home/george/LEARN_SLAM/learn_visionSLAM/slambook2/ch7/1_depth.png", CV_LOAD_IMAGE_UNCHANGED);
    Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
    vector<Point3f> pts_3d;
    vector<Point2f> pts_2d;

    for(DMatch m : matches)
    {
        ushort d = d1.ptr<unsigned short>(int(keypoints1[m.queryIdx].pt.y))[int(keypoints1[m.queryIdx].pt.x)];
        if (d == 0)
        {
            cout << "bad depth " << endl;
            continue;
        }
        float dd = d / 5000.0;
        Point2d p1 = pixel2cam(keypoints1[m.queryIdx].pt, K);
        pts_3d.push_back(Point3f(p1.x * dd, p1.y * dd, dd));
        pts_2d.push_back(keypoints2[m.trainIdx].pt);
    }

    cout << "3d-2d pairs : " << pts_3d.size() << endl;

    VecVector3d pts_3d_eigen;
    VecVector2d pts_2d_eigen;
    for (int i = 0; i < pts_3d.size(); i++)
    {
        pts_3d_eigen.push_back(Eigen::Vector3d(pts_3d[i].x, pts_3d[i].y, pts_3d[i].z));
        pts_2d_eigen.push_back(Eigen::Vector2d(pts_2d[i].x, pts_2d[i].y));
    }
    cout << "using myBA to slove the problem : " << endl;
    Sophus::SE3d pose_gn;
    myBA(pts_3d_eigen, pts_2d_eigen, K, pose_gn);
   
    return 0;
}


void find_feature_matches(const Mat &img_1, const Mat &img_2, vector<KeyPoint> &keypoints_1, vector<KeyPoint> &keypoints_2, vector<DMatch> &matches)
{
    Mat descriptors_1, descriptors_2;
    Ptr<FeatureDetector> detector = ORB::create();
    Ptr<DescriptorExtractor> descriptor = ORB::create();
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");

    detector->detect(img_1, keypoints_1);
    detector->detect(img_2, keypoints_2);

    descriptor->compute(img_1, keypoints_1, descriptors_1);
    descriptor->compute(img_2, keypoints_2, descriptors_2);

    vector<DMatch> match;
    matcher->match(descriptors_1, descriptors_2, match);

    double min_dist = 10000, max_dist = 0;

    for (int i = 0; i < descriptors_1.rows; i++)
    {
        double dist = match[i].distance;
        if (dist > max_dist) max_dist = dist;
        if (dist < min_dist) min_dist = dist;
    }
    cout << "--Max dist : " << max_dist << endl;
    cout << "--Min dist : " << min_dist << endl;
    for (int i = 0; i < descriptors_1.rows; i++)
    {
        if (match[i].distance <= max(2*min_dist, 30.0))
        {
            matches.push_back(match[i]);
        }
    }
}

Point2d pixel2cam(const Point2d &keypoint, Mat & K)
{
    return Point2d(
        (keypoint.x - K.at<double>(0, 2))/K.at<double>(0, 0),
        (keypoint.y - K.at<double>(1, 2))/K.at<double>(1, 1)
        );
}

void myBA(VecVector3d &pts_3d, VecVector2d&pts_2d, const Mat & K, Sophus::SE3d &pose)
{
    typedef Eigen::Matrix<double, 6, 1> Vector6d;
    typedef Eigen::Matrix<double, 9, 1> Vector9d;
    const int iterators = 10;
    double cost = 0, lastcost = 0;
    double fx = K.at<double>(0, 0);
    double cx = K.at<double>(0, 2);
    double fy = K.at<double>(1, 1);
    double cy = K.at<double>(1, 2);

    int nums_of_points = pts_3d.size();

    for (int iter = 0; iter < iterators; iter++)
    {
        int row_of_H = 6 + 3*nums_of_points;
        Eigen::MatrixXd H = Eigen::MatrixXd::Zero(row_of_H, row_of_H);//确定信息矩阵的维数

        //确定雅克比矩阵的维数
        Eigen::MatrixXd J_total = Eigen::MatrixXd::Zero(2, row_of_H);
        //确定右侧b的矩阵维数
        Eigen::VectorXd b = Eigen::VectorXd::Zero(row_of_H);

        //声明残差
        Eigen::Vector2d e = Eigen::Vector2d::Zero();

        cost = 0;
        for (int i = 0; i < pts_3d.size(); i++)
        {
            //加入需要优化的点位置之后,H矩阵的大小时刻在变
            
            //从位姿矩阵对应的李群中提取出旋转矩阵
            Sophus::SO3d R = pose.rotationMatrix();
            //cout << R.matrix() << endl;
            Eigen::Matrix<double, 3, 3> R1 = R.matrix();

            Eigen::Vector3d pc = pose * pts_3d[i];//坐标值从第一帧图像转换到第二帧图像的坐标系下
            double inv_z = 1.0 / pc[2];
            double inv_z2 = inv_z * inv_z;

            //重投影误差关于第二帧相机坐标系下的坐标的雅克比矩阵
            Eigen::Matrix<double, 2, 3> eP;
            eP << -fx * inv_z,
                  0,
                  fx*pc[0]*inv_z2,
                  0,
                  -fy*inv_z,
                  fy*pc[1]*inv_z2;
            Eigen::Matrix<double, 2, 3> J_P;
            J_P = eP * R1;

            //每个点的投影误差计算 
            Eigen::Vector2d proj(fx * pc[0]/pc[2] + cx, fy * pc[1]/pc[2] + cy);//投影到第二帧图像上
            e = pts_2d[i] - proj;
            cost += e.squaredNorm();///计算代价函数

            //计算冲投影误差关于SE3的雅克比矩阵
            Eigen::Matrix<double, 2, 6> J_se3;
            //计算雅克比矩阵
            J_se3 << -fx * inv_z,
                    0,
                    fx * pc[0] * inv_z2,
                    fx * pc[0] * pc[1] * inv_z2,
                    -fx - fx * pc[0] * pc[0] * inv_z2,
                    fx * pc[1] * inv_z,
                    0,
                    -fy * inv_z,
                    fy * pc[1] * inv_z2,
                    fy + fy * pc[1] * pc[1] * inv_z2,
                    -fy * pc[0] * pc[1] * inv_z2,
                    -fy * pc[0] * inv_z;

            J_total.block<2,6>(0,0) += J_se3;
            J_total.block<2,3>(0,3*i+6) = J_P;
        }
        H += J_total.transpose() * J_total;
        b += -J_total.transpose() * e;

        //确定更新量的变量维数
        Eigen::VectorXd dx = Eigen::VectorXd::Zero(row_of_H);
        dx = H.ldlt().solve(b);
        
        if (isnan(dx[0]))
        {
            cout << "result is nan " << endl;
        }

        if (iter > 0 && cost >= lastcost)
        {
            cout << "cost : " << cost << ", last cost : " << lastcost << endl;
            break;
        }

        //更新优化变量
        Vector6d d_se3 = dx.block<6,1>(0,0);
        pose = Sophus::SE3d::exp(d_se3)*pose;//更新李代数
        for (int i = 0; i < pts_3d.size(); i++)
        {
            Eigen::Vector3d dp = dx.block<3,1>(6+3*i, 0);
            pts_3d[i] = pts_3d[i] + dp;
        }
        lastcost = cost;

        cout << "iteration " << iter << " cost = " << std::setprecision(12) << cost << endl;
        if (dx.norm() < 1e-6)
        {
            break;
        }
    }
    cout << "pose by myBA : \n" << pose.matrix() << endl;
    /* Sophus::SO3d R = pose.rotationMatrix();
    cout << R.matrix() << endl;
    Eigen::Matrix<double, 3, 3> R1 = R.matrix();
    cout << R1 << endl; */
    cout << "the point pose after estimation : " << endl;
    for (int i = 0; i < pts_3d.size(); i++)
    {
        cout << pts_3d[i].transpose() << endl;
    }
}

CMakeLists.txt配置

cmake_minimum_required(VERSION 3.5)
project(gauss_newton1)

#设置c++版本为c++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE "Debug")

#可以自己添加find_package的搜索路径
LIST(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules)

#设置可执行文件的输出路径
#${PROJECT_SOURCE_DIR}这个代表的是源文件对应的文件路径
#${PROJECT_BINARY_DIR}是编译时的文件夹路径
set( EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/bin )

find_package(OpenCV REQUIRED)
find_package(Eigen3 REQUIRED)
find_package(sophus REQUIRED)

include_directories(
    ${PROJECT_SOURCE_DIR}
    ${PROJECT_SOURCE_DIR}/include
    ${OpenCV_INCLUDE_DIRS}
    ${EIGEN3_INCLUDE_DIR}
    ${sophus_INCLUDE_DIRS}
    )

set(lib
    ${OpenCV_LIBS}
)

#add_executable(gaussNewton1 ./src/gaussNewton1.cpp)
#target_link_libraries(gaussNewton1 ${lib})

add_executable(pose_estimation_3d2d ./src/pose_estimation_3d2d.cpp)
target_link_libraries(pose_estimation_3d2d ${lib})

文件夹结构

learn_ba
bin
src
 pose_estimaton_3d2d.cpp
build
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值