视觉SLAM理论与实践5

一、ORB特征点

1.1 ORB提取
ORB 即Oriented FAST 简称。它实际上是FAST 特征再加上一个旋转量。本习题将使用OpenCV 自带的FAST 提取算法,但是你要完成旋转部分的计算。旋转的计算过程描述如下 【2】:

在这里插入图片描述
1.2 ORB描述
ORB 描述即带旋转的BRIEF 描述。所谓BRIEF 描述是指一个0-1 组成的字符串(可以取256 位或128 位),每一个bit 表示一次像素间的比较。算法流程如下:
在这里插入图片描述
其中up; vp 为p 的坐标,对q 亦然。记旋转后的p; q 为p′; q′,那么比较I(p′) 和I(q′),若前者大,记di = 0,反之记di = 1【1】。
在这里插入图片描述

1.3 暴力匹配
在提取描述之后,我们需要根据描述子进行匹配。暴力匹配是一种简单粗暴的匹配方法,在特征点不多时很有用。下面你将根据习题指导,书写暴力匹配算法。所谓暴力匹配思路很简单。给定两组描述子P = [p1, … , pM] 和Q = [q1, … , qN]。那么,对P 中任意一个点,找到Q 中对应最小距离点,即算一次匹配。但是这样做会对每个特征点都找到一个匹配,所以我们通常还会限制一个距离阈值dmax,即认作匹配的特征点距离不应该大于dmax。下面请你根据上述描述,实现函数bfMatch,返回给定特征点的匹配情况。实践中取dmax = 50。
在这里插入图片描述
部分代码片段如下:

1)提取图像中的角点

// compute the angle
void computeAngle(const cv::Mat &image, vector<cv::KeyPoint> &keypoints) {
    int half_patch_size = 8;
    for (auto &kp : keypoints) {
	// START YOUR CODE HERE (~7 lines)
        kp.angle = 0; // compute kp.angle
        cv::Point2f p=kp.pt;
        if(p.x<8||p.x>image.cols-8||p.y<8||p.y>image.rows-8){continue;} // 把越界点去除不考虑
        double m10=0,m01=0,m00=0; //1.在小的图像块B中,定义图像块的矩
        for(int i=-8;i<8;i++){
            const uchar* col = image.ptr<uchar>(p.y+i);
            for(int j=-8;j<8;j++){
                m00+=col[(int)p.x+j];
                m10+=j*col[(int)p.x+j];
                m01+=i*col[(int)p.x+j];
            }
        }
        double cx=0,cy=0; //2.通过矩可以找到图像块的质心
        cx=m10/m00;
        cy=m01/m00; 
        kp.angle=atan2(cy,cx)*180/pi;//3.连接图像块的几何中心O与质心,得到方向向量
        // END YOUR CODE HERE
    }
    return;
}

2)计算描述子

// compute the descriptor
    void computeORBDesc(const cv::Mat &image, vector<cv::KeyPoint> &keypoints, vector<DescType> &desc) {
        for (auto &kp: keypoints) {
            DescType d(256, false);
            for (int i = 0; i < 256; i++) {
                // START YOUR CODE HERE (~7 lines)
                float cos_ = cos(kp.angle * pi / 180);
                float sin_ = sin(kp.angle * pi / 180);
                //旋转两个pattrn点到特征点角度旋转后的位置上
                cv::Point2f up_t(cos_ * ORB_pattern[4 * i] - sin_ * ORB_pattern[4 * i + 1],
                                 sin_ * ORB_pattern[4 * i] + cos_ * ORB_pattern[4 * i + 1]);
                cv::Point2f uq_t(cos_ * ORB_pattern[4 * i + 2] - sin_ * ORB_pattern[4 * i + 3],
                                 sin_ * ORB_pattern[4 * i + 2] + cos_* ORB_pattern[4 * i + 3]);
                //求作比较两点的坐标
                cv::Point2f up = up_t + kp.pt;
                cv::Point2f uq = uq_t + kp.pt;
                //出界点把特征向量清空,并且不计入总数
                if (up.x < 0 || up.y < 0 || up.x > image.cols || up.y > image.rows ||
                    uq.x < 0 || uq.y < 0 || uq.x > image.cols || uq.y > image.rows) {
                    d.clear();
                    break;// if kp goes outside, set d.clear()
                }
                d[i] = image.at<uchar>(up) > image.at<uchar>(uq) ? 0 : 1;//又是一种读取像素的方式

                //d[i] = 0;  // if kp goes outside, set d.clear()
                // END YOUR CODE HERE
            }
            desc.push_back(d);
        }

        int bad = 0;
        for (auto &d: desc) {
            if (d.empty()) bad++;
        }
        cout << "bad/total: " << bad << "/" << desc.size() << endl;
        return;
    }

3)暴力匹配

// brute-force matching
    void bfMatch(const vector<DescType> &desc1, const vector<DescType> &desc2, vector<cv::DMatch> &matches) {
        int d_max = 50;

        // START YOUR CODE HERE (~12 lines)
        // find matches between desc1 and desc2.
    for (size_t i = 0; i < desc1.size(); ++i){
        if(desc1[i].empty())
            continue;

        int d_min=256;
        int index2=-1;
        for(size_t j=0;j < desc2.size();j++){
            if(desc2[j].empty())continue;
            int dist=0;
            for(size_t k=0;k<256;k++){
                dist+=desc1[i][k]^desc2[j][k];
                if(dist>d_max)break;
            }
            if(dist<d_max&&dist<d_min){
                d_min=dist;
                index2=j;
            }
        }
        if(d_min<d_max){
            matches.push_back(cv::DMatch(i,index2,d_min));
        }
    }
}

CMakeLists.txt:

cmake_minimum_required(VERSION 2.8)
project(computeORB)

IF(NOT CMAKE_BUILD_TYPE)
    SET(CMAKE_BUILD_TYPE Release)
ENDIF()

MESSAGE("Build type: " ${CMAKE_BUILD_TYPE})

set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}  -Wall  -O3 -march=native ")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall   -O3 -march=native")

# Check C++11 or C++0x support
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11)
CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X)
if(COMPILER_SUPPORTS_CXX11)
    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
    add_definitions(-DCOMPILEDWITHC11)
    message(STATUS "Using flag -std=c++11.")
elseif(COMPILER_SUPPORTS_CXX0X)
    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x")
    add_definitions(-DCOMPILEDWITHC0X)
    message(STATUS "Using flag -std=c++0x.")
else()
    message(FATAL_ERROR "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.")
endif()

LIST(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules)

find_package(OpenCV 3.0 QUIET)
if(NOT OpenCV_FOUND)
    find_package(OpenCV 2.4.3 QUIET)
    if(NOT OpenCV_FOUND)
        message(FATAL_ERROR "OpenCV > 2.4.3 not found.")
    endif()
endif()

include_directories(${OpenCV_INCLUDE_DIRS})
add_executable( ORB computeORB.cpp )
target_link_libraries( ORB ${OpenCV_LIBS} )

完整代码请点击这里获取

运行结果如下图:
在这里插入图片描述
在这里插入图片描述
1)为什么说ORB是一种二进制特征?
因为ORB由Oriented FAST关键点和改进的BRIEF描述子组成,其中BRIEF是一种二进制的描述子,其描述向量由许多0,1组成。所以也可以认为ORB是一种二进制特征。
2)为什么在匹配时使用50作为阈值,取更大或更小值会怎么样?
本人尝试阈值改变,将阈值改成10之后运行截图如下:
在这里插入图片描述
从结果看,似乎并未匹配成功。
改成500后,运行截图如下:
在这里插入图片描述
从结果来看出现误匹配过大的情况。

3)暴力匹配在你的机器上表现如何?你能想到什么减少计算量的匹配方法吗?

ORB+PROSAC 可减少误匹配,采用光流法或直接法可降低前端的计算量

二、从 E 恢复 R, t

在这里插入图片描述
代码E2Rt.cpp如下:

#include <Eigen/Core>
#include <Eigen/Dense>
#include <Eigen/Geometry>

using namespace Eigen;

#include <sophus/so3.h>

#include <iostream>

using namespace std;

int main(int argc, char **argv) {

   // 给定Essential矩阵
   Matrix3d E;
   E << -0.0203618550523477, -0.4007110038118445, -0.03324074249824097,
           0.3939270778216369, -0.03506401846698079, 0.5857110303721015,
           -0.006788487241438284, -0.5815434272915686, -0.01438258684486258;

   // 待计算的R,t
   Matrix3d R;
   Vector3d t;

   // SVD and fix sigular values
   // START YOUR CODE HERE
   Eigen::JacobiSVD<Matrix3d> svd(E, ComputeFullU | ComputeFullV); //svd分解
   Vector3d sigma1 = svd.singularValues(); //svd分解出来的sigma是3×1的向量
   Matrix3d SIGMA; //将向量sigma调整成矩阵SIGMA
   cout << "sigma = \n" << sigma1 << endl;
   SIGMA << (sigma1(0, 0) + sigma1(1, 0)) / 2, 0, 0,
           0, (sigma1(0, 0) + sigma1(1, 0)) / 2, 0,
           0, 0, 0;
//    SIGMA<<1,0,0,
//            0,1,0,
//            0,0,0;
   cout << "SIGMA = \n" << SIGMA << endl;

   // set t1, t2, R1, R2
   Matrix3d t_wedge1;
   Matrix3d t_wedge2;
   Matrix3d R1;
   Matrix3d R2;

   Matrix3d R_z1 = AngleAxisd(M_PI / 2, Vector3d(0, 0, 1)).toRotationMatrix(); //定义旋转矩阵,沿 Z 轴旋转 90 度
   Matrix3d R_z2 = AngleAxisd(-M_PI / 2, Vector3d(0, 0, 1)).toRotationMatrix(); //定义旋转矩阵沿 Z 轴旋转 -90 度

   Matrix3d U = svd.matrixU(); //u的值
   Matrix3d V = svd.matrixV(); //v的值

   // END YOUR CODE HERE

   // set t1, t2, R1, R2 
   // START YOUR CODE HERE

   t_wedge1 = U * R_z1 * SIGMA * U.transpose(); //t1的值
   t_wedge2 = U * R_z2 * SIGMA * U.transpose(); //t2的值
   R1 = U * R_z1.transpose() * V.transpose();
   R2 = U * R_z2.transpose() * V.transpose();
   // END YOUR CODE HERE
   cout << "R1 =\n" << R1 << endl;
   cout << "R2 =\n" << R2 << endl;
   cout << "t1 =\n" << Sophus::SO3::vee(t_wedge1) << endl;
   cout << "t2 =\n" << Sophus::SO3::vee(t_wedge2) << endl; // check t^R=E up to scale

   // check t^R=E up to scale
   Matrix3d tR = t_wedge1 * R1;
   cout << "t^R = " << tR << endl;

   return 0;
}

CMakeLists.txt:

cmake_minimum_required(VERSION 2.8)
project(E2Rt)

#set(Sophus_LIBRARIES "/usr/local/libSophus.so")
set( CMAKE_BUILD_TYPE "Release" )
set( CMAKE_CXX_FLAGS "-std=c++11 -O3" )
set( CMAKE_BUILD_TYPE "Debug" )

#set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR})
#include_directories($ {EIGEN3_INCLUDE_DIRS})

find_package(Sophus REQUIRED)

include_directories("/usr/include/eigen3")
include_directories(${Sophus_INCLUDE_DIRS})

#add_executable(eigenMatrix eigenMatrix.cpp)
add_executable(E2Rt E2Rt.cpp)
target_link_libraries(E2Rt ${Sophus_LIBRARIES})

运行截图如下:
在这里插入图片描述

三、用 G-N 实现 Bundle Adjustment 中的位姿估计

在这里插入图片描述
1)GN-BA.cpp:

#include <Eigen/Core>
#include <Eigen/Dense>

using namespace Eigen;

#include <vector>
#include <fstream>
#include <iostream>
#include <iomanip>

#include "sophus/se3.h"

using namespace std;

typedef vector<Vector3d, Eigen::aligned_allocator<Vector3d>> VecVector3d;
typedef vector<Vector2d, Eigen::aligned_allocator<Vector3d>> VecVector2d;
typedef Matrix<double, 6, 1> Vector6d;

string p3d_file = "../p3d.txt";
string p2d_file = "../p2d.txt";

int main(int argc, char **argv) {

   VecVector2d p2d;
   VecVector3d p3d;
   Matrix3d K;
   double fx = 520.9, fy = 521.0, cx = 325.1, cy = 249.7;
   K << fx, 0, cx, 0, fy, cy, 0, 0, 1;

   // load points in to p3d and p2d 
   // START YOUR CODE HERE
   fstream i3dFile(p3d_file);
   fstream i2dFile(p2d_file);
   string line;
   while(getline(i3dFile,line))
   {
       stringstream record(line);
       Vector3d vEle;
       for(auto i=0;i<3;i++)
           record>>vEle[i];
       p3d.push_back(vEle);
   }

   while(getline(i2dFile,line))
   {
       stringstream record(line);
       Vector2d vEle;
       for(auto i=0;i<2;i++)
           record>>vEle[i];
       p2d.push_back(vEle);
   }


   // END YOUR CODE HERE
   assert(p3d.size() == p2d.size());

   int iterations = 100;
   double cost = 0, lastCost = 0;
   int nPoints = p3d.size();
   cout << "points: " << nPoints << endl;

   Sophus::SE3 T_esti; // estimated pose

   for (int iter = 0; iter < iterations; iter++) {

       Matrix<double, 6, 6> H = Matrix<double, 6, 6>::Zero();
       Vector6d b = Vector6d::Zero();

       cost = 0;
       // compute cost
       for (int i = 0; i < nPoints; i++) {
           // compute cost for p3d[I] and p2d[I]
           // START YOUR CODE HERE 
           Vector2d p2 = p2d[i];
           Vector3d p3 = p3d[i];

           Vector3d P = T_esti * p3;
           double x = P[0];
           double y = P[1];
           double z = P[2];

           Vector2d p2_ = {fx * ( x/z ) + cx, fy * ( y/z ) + cy};
           Vector2d e = p2 - p2_;
           cost += (e[0]*e[0]+e[1]*e[1]);

       // END YOUR CODE HERE

       // compute jacobian
           Matrix<double, 2, 6> J;
           // START YOUR CODE HERE 
           J(0,0) = -(fx/z);
           J(0,1) = 0;
           J(0,2) = (fx*x/(z*z));
           J(0,3) = (fx*x*y/(z*z));
           J(0,4) = -(fx*x*x/(z*z)+fx);
           J(0,5) = (fx*y/z);
           J(1,0) = 0;
           J(1,1) = -(fy/z);
           J(1,2) = (fy*y/(z*z));
           J(1,3) = (fy*y*y/(z*z)+fy);
           J(1,4) = -(fy*x*y/(z*z));
           J(1,5) = -(fy*x/z);
       // END YOUR CODE HERE

           H += J.transpose() * J;
           b += -J.transpose() * e;
       }

   // solve dx 
       Vector6d dx;

       // START YOUR CODE HERE 
       dx = H.ldlt().solve(b);
       // END YOUR CODE HERE

       if (isnan(dx[0])) {
           cout << "result is nan!" << endl;
           break;
       }

       if (iter > 0 && cost >= lastCost) {
           // cost increase, update is not good
           cout << "cost: " << cost << ", last cost: " << lastCost << endl;
           break;
       }
       // update your estimation
       // START YOUR CODE HERE 
       T_esti = Sophus::SE3::exp(dx)*T_esti;
       // END YOUR CODE HERE   
       lastCost = cost;
       cout << "iteration " << iter << " cost=" << cout.precision(12) << cost << endl;
   }
   cout << "estimated pose: \n" << T_esti.matrix() << endl;
   return 0;
}

2)CMakeLists.txt:

cmake_minimum_required(VERSION 2.8)
project(GN-BA)

set(CMAKE_BUILD_TYPE "Release")

set(CMAKE_CXX_FLAGS "-std=c++11")
#set(CMAKE_BUILD_TYPE "Debug")

find_package(Sophus REQUIRED)
include_directories(${Sophus_INCLUDE_DIRS})


include_directories("/usr/include/eigen3")

add_executable(GN-BA GN-BA.cpp)
target_link_libraries(GN-BA ${Sophus_LIBRARIES})

运行截图如下:
在这里插入图片描述
1)如何定义重投影误差?
重投影误差:指的真实三维空间点在图像平面上的投影(也就是图像上的像素点)和重投影(其实是用我们的计算值得到的虚拟的像素点)的差值。
2)该误差关于自变量的雅克比矩阵是什么?
在这里插入图片描述
3)解出更新量之后,如何更新至之前的估计上?
通过李代数左乘更新,或四元素左乘更新。(右乘也行)代码中我是通过李代数左乘的方法。

四、用 ICP 实现轨迹对齐

在这里插入图片描述
在这里插入图片描述
使用SVD分解求ICP问题步骤如下:
1)计算两组点的质心位置p ,p’,然后计算每个点的去质心坐标
在这里插入图片描述

Point3d pMid1, pMid2;     // center of mass质心
    int N = pEstimated.size();
    for ( int i=0; i<N; i++ )
    {
        pMid1 += pEstimated[i];
        pMid2 += pGroundTruth[i];
    }
    pMid1 = Point3d( pMid1/ N);
    pMid2 = Point3d( pMid2/ N);
    vector<Point3d>     q1 ( N ), q2 ( N ); // remove the center去质心
    for ( int i=0; i<N; i++ )
    {
        q1[i] = pEstimated[i] - pMid1;
        q2[i] = pGroundTruth[i] - pMid2;
    }

2)根据以下优化问题计算旋转矩阵:
在这里插入图片描述
在这里插入图片描述

 Eigen::Matrix3d W = Eigen::Matrix3d::Zero();
    for ( int i=0; i<N; i++ )
    {
    W=W+Eigen::Vector3d ( q1[i].x, q1[i].y, q1[i].z ) *Eigen::Vector3d ( q2[i].x, q2[i].y, q2[i].z ).transpose();
    Eigen::JacobiSVD<Eigen::Matrix3d> svd ( W, Eigen::ComputeFullU|Eigen::ComputeFullV );
    Eigen::Matrix3d U = svd.matrixU();
    Eigen::Matrix3d V = svd.matrixV();
    cout<<"U="<<U<<endl;
    cout<<"V="<<V<<endl;
    Eigen::Matrix3d R_ = U* ( V.transpose() );
    }

3)根据第二步的R,计算t:
在这里插入图片描述

 Eigen::Vector3d t_ = Eigen::Vector3d ( pMid1.x, pMid1.y, pMid1.z ) - R_ * Eigen::Vector3d ( pMid2.x, pMid2.y, pMid2.z );

轨迹未对齐代码如下“tra_icp.cpp”

//
// Created by chengjun on 2019/12/4.
//
#include <sophus/se3.h>
#include <string>
#include <iostream>
#include <fstream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <Eigen/Core>
#include <Eigen/Geometry>
#include <Eigen/SVD>
#include <chrono>
#include <pangolin/pangolin.h>
using namespace std;
using namespace cv;
using namespace Sophus;
//pangolin画图
void DrawTrajectory(vector<Sophus::SE3, Eigen::aligned_allocator<Sophus::SE3>> poses1,vector<Sophus::SE3, Eigen::aligned_allocator<Sophus::SE3>> poses2) {
    if (poses1.empty()||poses2.empty()) {
        cerr << "Trajectory is empty!" << endl;
        return;
    }
    // create pangolin window and plot the trajectory
    pangolin::CreateWindowAndBind("Trajectory Viewer", 1024, 768);
    glEnable(GL_DEPTH_TEST);
    glEnable(GL_BLEND);
    glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
    pangolin::OpenGlRenderState s_cam(
            pangolin::ProjectionMatrix(1024, 768, 500, 500, 512, 389, 0.1, 1000),
            pangolin::ModelViewLookAt(0, -0.1, -1.8, 0, 0, 0, 0.0, -1.0, 0.0)
    );
    pangolin::View &d_cam = pangolin::CreateDisplay()
            .SetBounds(0.0, 1.0, pangolin::Attach::Pix(175), 1.0, -1024.0f / 768.0f)
            .SetHandler(new pangolin::Handler3D(s_cam));
    while (pangolin::ShouldQuit() == false) {
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        d_cam.Activate(s_cam);
        glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
        glLineWidth(2);
        for (size_t i = 0; i < poses1.size() - 1; i++) {
            //glColor3f(1 - (float) i / poses1.size(), 0.0f, (float) i / poses1.size());
            glColor3f(1.0f, 0.0f, 0.0f);
            glBegin(GL_LINES);
            auto p1 = poses1[i], p2 = poses1[i + 1];
            glVertex2d(p1.translation()[0], p1.translation()[1]);//p1.translation()[2]
            glVertex2d(p2.translation()[0], p2.translation()[1]);//p2.translation()[2]
            glEnd();
        }
        for (size_t i = 0; i < poses2.size() - 1; i++) {
            //glColor3f(1 - (float) i / poses2.size(), 0.0f, (float) i / poses2.size());
            glColor3f(0.0f, 0.0f, 0.0f);
            glBegin(GL_LINES);
            auto p1 = poses2[i], p2 = poses2[i + 1];
            glVertex2d(p1.translation()[0], p1.translation()[1]);//p1.translation()[2]
            glVertex2d(p2.translation()[0], p2.translation()[1]);//p2.translation()[2]
            glEnd();
        }
        pangolin::FinishFrame();
        usleep(5000);   // sleep 5 ms
    }
}
//主函数
int main ( int argc, char** argv ) {
    vector<Sophus::SE3, Eigen::aligned_allocator<Sophus::SE3>> ps1, ps2;
    ifstream file("../compare.txt");
    string line; //按行来读取数据
    vector<Point3d> pEstimated, pGroundTruth;
    while (getline(file, line)) {
        stringstream record(line);
        vector<double> vTmp(16, 0);
        for (auto &v:vTmp)
            record >> v;
        Point3d p1{vTmp[1], vTmp[2], vTmp[3]};//位置
        Eigen::Quaterniond q1{vTmp[4], vTmp[5], vTmp[6], vTmp[7]};//旋转四元数
        Sophus::SE3 SE3_qt1(q1, Eigen::Vector3d(p1.x, p1.y, p1.z));//组成李群(欧式变换群)--estimated
        Point3d p2{vTmp[9], vTmp[10], vTmp[11]};
        Eigen::Quaterniond q2{vTmp[12], vTmp[13], vTmp[14], vTmp[15]};
        Sophus::SE3 SE3_qt2(q2, Eigen::Vector3d(p2.x, p2.y, p2.z));//组成李群(欧式变换群)--groundtruth
        pEstimated.push_back(p1);//求估计到真实的变换矩阵只需要用到位置
        pGroundTruth.push_back(p2);
        ps1.push_back(SE3_qt1);
        ps2.push_back(SE3_qt2);
    }
    DrawTrajectory(ps1,ps2);
    return 0;
}

CmakeLists.txt:

cmake_minimum_required(VERSION 2.8)
project(draw_trajectory)
#set(Sophus_LIBRARIES "/usr/local/libSophus.so")
set( CMAKE_BUILD_TYPE "Release" )
set( CMAKE_CXX_FLAGS "-std=c++11 -O3" )
set( CMAKE_BUILD_TYPE "Debug" )
#set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR})
#include_directories($ {EIGEN3_INCLUDE_DIRS})
find_package(OpenCV 3.0 QUIET)
if(NOT OpenCV_FOUND)
    find_package(OpenCV 2.4.3 QUIET)
    if(NOT OpenCV_FOUND)
        message(FATAL_ERROR "OpenCV > 2.4.3 not found.")
    endif()
endif()
find_package(Pangolin REQUIRED)
find_package(Sophus REQUIRED)
include_directories("/usr/include/eigen3")
include_directories(
        ${Pangolin_INCLUDE_DIRS}
        ${Sophus_INCLUDE_DIR}
        ${OpenCV_INCLUDE_DIRS}
)
#add_executable(eigenMatrix eigenMatrix.cpp)
add_executable(trajectory tra_icp.cpp)
target_link_libraries(trajectory
        ${Pangolin_LIBRARIES}
        ${Sophus_LIBRARIES}
        ${OpenCV_LIBS}
        )

运行结果截图如下:
在这里插入图片描述
轨迹通过ICP对齐代码如下

//
// Created by chengjun on 2019/12/4.
//
#include <sophus/se3.h>
#include <string>
#include <iostream>
#include <fstream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <Eigen/Core>
#include <Eigen/Geometry>
#include <Eigen/SVD>
#include <chrono>
#include <pangolin/pangolin.h>
using namespace std;
using namespace cv;
using namespace Sophus;
//pangolin画图
void DrawTrajectory(vector<Sophus::SE3, Eigen::aligned_allocator<Sophus::SE3>> poses1,vector<Sophus::SE3, Eigen::aligned_allocator<Sophus::SE3>> poses2) {
    if (poses1.empty()||poses2.empty()) {
        cerr << "Trajectory is empty!" << endl;
        return;
    }
    // create pangolin window and plot the trajectory
    pangolin::CreateWindowAndBind("Trajectory Viewer", 1024, 768);
    glEnable(GL_DEPTH_TEST);
    glEnable(GL_BLEND);
    glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
    pangolin::OpenGlRenderState s_cam(
            pangolin::ProjectionMatrix(1024, 768, 500, 500, 512, 389, 0.1, 1000),
            pangolin::ModelViewLookAt(0, -0.1, -1.8, 0, 0, 0, 0.0, -1.0, 0.0)
    );
    pangolin::View &d_cam = pangolin::CreateDisplay()
            .SetBounds(0.0, 1.0, pangolin::Attach::Pix(175), 1.0, -1024.0f / 768.0f)
            .SetHandler(new pangolin::Handler3D(s_cam));
    while (pangolin::ShouldQuit() == false) {
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        d_cam.Activate(s_cam);
        glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
        glLineWidth(2);
        for (size_t i = 0; i < poses1.size() - 1; i++) {
            //glColor3f(1 - (float) i / poses1.size(), 0.0f, (float) i / poses1.size());
            glColor3f(1.0f, 0.0f, 0.0f);
            glBegin(GL_LINES);
            auto p1 = poses1[i], p2 = poses1[i + 1];
            glVertex2d(p1.translation()[0], p1.translation()[1]);//p1.translation()[2]
            glVertex2d(p2.translation()[0], p2.translation()[1]);//p2.translation()[2]
            glEnd();
        }
        for (size_t i = 0; i < poses2.size() - 1; i++) {
            //glColor3f(1 - (float) i / poses2.size(), 0.0f, (float) i / poses2.size());
            glColor3f(0.0f, 0.0f, 0.0f);
            glBegin(GL_LINES);
            auto p1 = poses2[i], p2 = poses2[i + 1];
            glVertex2d(p1.translation()[0], p1.translation()[1]);//p1.translation()[2]
            glVertex2d(p2.translation()[0], p2.translation()[1]);//p2.translation()[2]
            glEnd();
        }
        pangolin::FinishFrame();
        usleep(5000);   // sleep 5 ms
    }
}
//主函数
int main ( int argc, char** argv ) {
    vector<Sophus::SE3, Eigen::aligned_allocator<Sophus::SE3>> ps1, ps2;
    ifstream file("../compare.txt");
    string line; //按行来读取数据
    vector<Point3d> pEstimated, pGroundTruth;
    while (getline(file, line)) {
        stringstream record(line);
        vector<double> vTmp(16, 0);
        for (auto &v:vTmp)
            record >> v;
        Point3d p1{vTmp[1], vTmp[2], vTmp[3]};//位置
        Eigen::Quaterniond q1{vTmp[4], vTmp[5], vTmp[6], vTmp[7]};//旋转四元数
        Sophus::SE3 SE3_qt1(q1, Eigen::Vector3d(p1.x, p1.y, p1.z));//组成李群(欧式变换群)--estimated
        Point3d p2{vTmp[9], vTmp[10], vTmp[11]};
        Eigen::Quaterniond q2{vTmp[12], vTmp[13], vTmp[14], vTmp[15]};
        Sophus::SE3 SE3_qt2(q2, Eigen::Vector3d(p2.x, p2.y, p2.z));//组成李群(欧式变换群)--groundtruth
        pEstimated.push_back(p1);//求估计到真实的变换矩阵只需要用到位置
        pGroundTruth.push_back(p2);
        ps1.push_back(SE3_qt1);
        ps2.push_back(SE3_qt2);
    }
    //下面是ICP过程,p1=Rp2+t,即为真实到估计之间的变换,之后取逆即为估计到真实之间的变换
    Point3d pMid1, pMid2;     // center of mass
    int N = pEstimated.size();
    for ( int i=0; i<N; i++ )
    {
        pMid1 += pEstimated[i];
        pMid2 += pGroundTruth[i];
    }
    pMid1 = Point3d( pMid1/ N);
    pMid2 = Point3d( pMid2/ N);
    vector<Point3d>     q1 ( N ), q2 ( N ); // remove the center
    for ( int i=0; i<N; i++ )
    {
        q1[i] = pEstimated[i] - pMid1;
        q2[i] = pGroundTruth[i] - pMid2;
    }
    // compute q1*q2^T
    Eigen::Matrix3d W = Eigen::Matrix3d::Zero();
    for ( int i=0; i<N; i++ )
    {
        W=W+Eigen::Vector3d ( q1[i].x, q1[i].y, q1[i].z ) * Eigen::Vector3d ( q2[i].x, q2[i].y, q2[i].z ).transpose();
    }
    cout<<"W="<<W<<endl;
    // SVD on W
    Eigen::JacobiSVD<Eigen::Matrix3d> svd ( W, Eigen::ComputeFullU|Eigen::ComputeFullV );
    Eigen::Matrix3d U = svd.matrixU();
    Eigen::Matrix3d V = svd.matrixV();
    cout<<"U="<<U<<endl;
    cout<<"V="<<V<<endl;
    Eigen::Matrix3d R_ = U* ( V.transpose() );
    Eigen::Vector3d t_ = Eigen::Vector3d ( pMid1.x, pMid1.y, pMid1.z ) - R_ * Eigen::Vector3d ( pMid2.x, pMid2.y, pMid2.z );
    cout<<"R_="<<R_<<endl;
    cout<<"t_="<<t_<<endl;
    //取逆
    Eigen::Matrix3d R=R_.inverse();
    Eigen::Vector3d t=-R*t_;
    SE3 SE3_T(R,t);
    for(auto &p:ps1)
        p=SE3_T*p;
    DrawTrajectory(ps1,ps2);
    return 0;
}

CMakeList.txt与上述一样,运行结果如图:
在这里插入图片描述

友情提示:代码下载需要C币,请事先判断是否对您有帮助,谨慎下载哦!!!

  • 3
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值