更加多注释地学习视觉SLAM第九讲程序[2]

先谢大佬的资料,本文主要参考https://blog.csdn.net/robinhjwy/article/category/7301364

由于大佬写的注释非常细,我基本上找不到补充的地方,所以这次标转载吧。

目录

Visual_odometry类

visual_odometry.h

visual_odometry.cpp

Run_vo.cpp

G2o_types类

g2o_types.h

g2o_types.cpp


Visual_odometry类

高博:只关心两个帧之间的运动估计,并且不优化特征点的位置。然而把估得的位姿“串”起来,也能得到一条运动轨迹。这种方式可以看成两两帧间的(Pair-wise),无结构(Structureless)的 VO。

visual_odometry.h

#ifndef VISUALODOMETRY_H
#define VISUALODOMETRY_H

#include "myslam/common_include.h"
#include "myslam/map.h"

#include <opencv2/features2d/features2d.hpp>

namespace myslam 
{
class VisualOdometry
{
public:
    //定义指向自身的智能指针,在后面传递参数是使用VisualOdometry::Ptr即可
    typedef shared_ptr<VisualOdometry> Ptr;
    //定义枚举来表征VO状态,分别为:初始化、正常、丢失
    enum VOState {
        INITIALIZING=-1,
        OK=0,
        LOST
    };
    //这里为两两帧VO所用到的参考帧和当前帧。还有VO状态和整个地图。
    VOState     state_;     // current VO status
    Map::Ptr    map_;       // map with all frames and map points
    
    Frame::Ptr  ref_;       // reference key-frame 
    Frame::Ptr  curr_;      // current frame 
    //这里是两帧匹配需要的:keypoints,descriptors,matches,相比0.2去掉了关于参考帧的东西,3D点,描述子等
    cv::Ptr<cv::ORB> orb_;  // orb detector and computer 
    vector<cv::KeyPoint>    keypoints_curr_;    // keypoints in current frame
    Mat                     descriptors_curr_;  // descriptor in current frame 
    //在匹配器中,所需要的匹配变成了地图点和帧中的关键点
    cv::FlannBasedMatcher   matcher_flann_;     // flann matcher
    vector<MapPoint::Ptr>   match_3dpts_;       // matched 3d points 
    vector<int>             match_2dkp_index_;  // matched 2d pixels (index of kp_curr)
    //这里为匹配结果T,还有表征结果好坏的内点数和丢失数
    SE3 T_c_w_estimated_;    // the estimated pose of current frame 
    int num_inliers_;        // number of inlier features in icp
    int num_lost_;           // number of lost times
    
    // parameters 
    int num_of_features_;   // number of features
    double scale_factor_;   // scale in image pyramid
    int level_pyramid_;     // number of pyramid levels
    float match_ratio_;     // ratio for selecting  good matches
    int max_num_lost_;      // max number of continuous lost times
    int min_inliers_;       // minimum inliers
    //用于判定是否为关键帧的标准,说白了就是规定一定幅度的旋转和平移,大于这个幅度就归为关键帧
    double key_frame_min_rot;   // minimal rotation of two key-frames
    double key_frame_min_trans; // minimal translation of two key-frames
    double  map_point_erase_ratio_; // remove map point ratio
    
public: // functions 
    VisualOdometry();
    ~VisualOdometry();
    //这个函数为核心处理函数,将帧添加进来,然后处理
    bool addFrame( Frame::Ptr frame );      // add a new frame 
    
protected:  
    // inner operation 
    //一些内部处理函数,这块主要是特征匹配的
    void extractKeyPoints();
    void computeDescriptors(); 
    void featureMatching();
    void poseEstimationPnP(); 
    //增加的优化地图的函数,这个函数可能实现的就是对整个后端地图的优化
    void optimizeMap();
    //这里是关键帧的一些功能函数
    //增加地图点函数
    void addKeyFrame();
    void addMapPoints();
    bool checkEstimatedPose(); 
    bool checkKeyFrame();
    //增加的取得视角函数
    double getViewAngle( Frame::Ptr frame, MapPoint::Ptr point );
    
};
}

#endif // VISUALODOMETRY_H

visual_odometry.cpp

#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <algorithm>
#include <boost/timer.hpp>

#include "myslam/config.h"
#include "myslam/visual_odometry.h"
#include "myslam/g2o_types.h"

namespace myslam
{
//默认构造函数,提供默认值、读取配置参数
VisualOdometry::VisualOdometry() :
    state_ ( INITIALIZING ), ref_ ( nullptr ), curr_ ( nullptr ), map_ ( new Map ), num_lost_ ( 0 ), num_inliers_ ( 0 ), matcher_flann_ ( new cv::flann::LshIndexParams ( 5,10,2 ) )
{
    num_of_features_    = Config::get<int> ( "number_of_features" );
    scale_factor_       = Config::get<double> ( "scale_factor" );
    level_pyramid_      = Config::get<int> ( "level_pyramid" );
    match_ratio_        = Config::get<float> ( "match_ratio" );
    max_num_lost_       = Config::get<float> ( "max_num_lost" );
    min_inliers_        = Config::get<int> ( "min_inliers" );
    key_frame_min_rot   = Config::get<double> ( "keyframe_rotation" );
    key_frame_min_trans = Config::get<double> ( "keyframe_translation" );
    map_point_erase_ratio_ = Config::get<double> ( "map_point_erase_ratio" );
    //这个create(),之前用的时候,都是用的默认值,所以没有任何参数,这里传入了一些参数,可参见函数定义
    orb_ = cv::ORB::create ( num_of_features_, scale_factor_, level_pyramid_ );
}

VisualOdometry::~VisualOdometry()
{

}
//最核心的添加帧,参数即为新的一帧,根据VO当前状态选择是进行初始化还是计算T
bool VisualOdometry::addFrame ( Frame::Ptr frame )
{
    //根据VO状态来进行不同处理
    switch ( state_ )
    {
    //第一帧,则进行初始化处理
    case INITIALIZING:
    {
        //更改状态为OK
        state_ = OK;
        //因为是初始化,所以当前帧和参考帧都为此第一帧
        curr_ = ref_ = frame;
        // extract features from first frame and add them into map
        //匹配的操作,提取keypoint和计算描述子
        extractKeyPoints();
        computeDescriptors();
        //之前增加关键帧需调用map类中的insertKeyFrame()函数,
        // 这里第一帧的话,就直接调用自身的addKeyFrame()函数添加进地图
        addKeyFrame();      // the first frame is a key-frame
        break;
    }
    //如果为正常,则匹配并调用poseEstimationPnP()函数计算T
    case OK:
    {
        //整个流程的改变就是只需要不断进行每一帧的位姿迭代,
        //而不需要用到参考帧的3D点进行匹配得T了
        curr_ = frame;
        //说一下这一句,新的帧来了,先将其位姿赋值为参考帧的位姿,
        //因为考虑到匹配失败的情况下,这一帧就定义为丢失了,所以位姿就用参考帧的了。
        //如果一切正常,求得了当前帧的位姿,就进行赋值覆盖掉就好了
        curr_->T_c_w_ = ref_->T_c_w_;
        extractKeyPoints();
        computeDescriptors();
        featureMatching();
        //进行位姿估计
        poseEstimationPnP();
        //根据位姿估计的结果进行分别处理
        if ( checkEstimatedPose() == true ) // a good estimation
        {
            //好的估计,计算当前位姿
            curr_->T_c_w_ = T_c_w_estimated_;
            optimizeMap();
            num_lost_ = 0;
            //检验一下是否为关键帧,是的话加入关键帧
            if ( checkKeyFrame() == true ) // is a key-frame
            {
                addKeyFrame();
            }
        }
        else // bad estimation due to various reasons
        {
            //坏的估计将丢失计数+1,并判断是否大于最大丢失数,如果是,将VO状态切换为lost
            num_lost_++;
            if ( num_lost_ > max_num_lost_ )
            {
                state_ = LOST;
            }
            return false;
        }
        break;
    }
    case LOST:
    {
        cout<<"vo has lost."<<endl;
        break;
    }
    }

    return true;
}
//提取keypoint
void VisualOdometry::extractKeyPoints()
{
    boost::timer timer;
    orb_->detect ( curr_->color_, keypoints_curr_ );
    cout<<"extract keypoints cost time: "<<timer.elapsed() <<endl;
}
//计算描述子
void VisualOdometry::computeDescriptors()
{
    boost::timer timer;
    orb_->compute ( curr_->color_, keypoints_curr_, descriptors_curr_ );
    cout<<"descriptor computation cost time: "<<timer.elapsed() <<endl;
}
//特征匹配
void VisualOdometry::featureMatching()
{
    boost::timer timer;
    vector<cv::DMatch> matches;
    // select the candidates in map 
    //建立一个目标图,承接匹配需要地图点的描述子,因为匹配是需要的参数是描述子
    Mat desp_map;
    //建立一个候选地图点数组,承接匹配需要的地图点
    vector<MapPoint::Ptr> candidate;
    //检查地图点是否为匹配需要的,逻辑就是遍历维护的局部地图中所有地图点,
    //然后利用isInFrame()函数检查有哪些地图点在当前观察帧中,
    //如果在则把地图点push进candidate中,描述子push进desp_map中
    for ( auto& allpoints: map_->map_points_ )
    {
        //总之功能上就是把地图点取出,赋值给p
        MapPoint::Ptr& p = allpoints.second;
        // check if p in curr frame image 
        //利用是否在匹配帧中来判断是否添加进去
        if ( curr_->isInFrame(p->pos_) )
        {
            // add to candidate 
            //观察次数增加一次
            p->visible_times_++;
            //点push进candidate
            candidate.push_back( p );
            //描述子push进desp_map
            desp_map.push_back( p->descriptor_ );
        }
    }
    //这步匹配中,由原来的参考帧,变成了上面定义的desp_map地图,进行匹配
    matcher_flann_.match ( desp_map, descriptors_curr_, matches );
    // select the best matches
    //寻找最小距离,这里用到了STL中的std::min_element和lambda表达式
    //总之这的作用就是找到matches数组中最小的距离,然后赋值给min_dis
    float min_dis = std::min_element (
                        matches.begin(), matches.end(),
                        [] ( const cv::DMatch& m1, const cv::DMatch& m2 )
    {
        return m1.distance < m2.distance;
    } )->distance;
    
    //根据最小距离,对matches数组进行刷选,只有小于最小距离一定倍率或者小于30的才能push_back进数组。
    //最终得到筛选过的,距离控制在一定范围内的可靠匹配
    match_3dpts_.clear();
    match_2dkp_index_.clear();
    for ( cv::DMatch& m : matches )
    {
        if ( m.distance < max<float> ( min_dis*match_ratio_, 30.0 ) )
        {
            //这里变化是不像之前直接将好的m  push进feature_matches_就完了。 
            //这里感觉像做一个记录,candidate中存的是观察到的地图点 
            // 进一步,在candidate中选出好的匹配的点,push进match_3dpts_, 
            //这个match_3dpts_代表当前这一帧计算T时所利用到的所有好的地图点,放入其中。 
            //由此可见,candidate只是个中间存储,新的一帧过来会被刷新。 //同样match_2dkp_index_也是同样的道理,只不过是记录的当前帧detect出来的keypoint数组中的点的索引
            match_3dpts_.push_back( candidate[m.queryIdx] );
            match_2dkp_index_.push_back( m.trainIdx );
        }
    }
    cout<<"good matches: "<<match_3dpts_.size() <<endl;
    cout<<"match cost time: "<<timer.elapsed() <<endl;
}
//核心功能函数,用PnP估计位姿
void VisualOdometry::poseEstimationPnP()
{
    // construct the 3d 2d observations
    vector<cv::Point3f> pts3d;
    vector<cv::Point2f> pts2d;
    //从这里就可以看出,参考帧用的是3D,当前帧用的2D
    for ( int index:match_2dkp_index_ )
    {
        pts2d.push_back ( keypoints_curr_[index].pt );
    }
    for ( MapPoint::Ptr pt:match_3dpts_ )
    {
        pts3d.push_back( pt->getPositionCV() );
    }
    //构造相机内参矩阵K
    Mat K = ( cv::Mat_<double> ( 3,3 ) <<
              ref_->camera_->fx_, 0, ref_->camera_->cx_,
              0, ref_->camera_->fy_, ref_->camera_->cy_,
              0,0,1
            );
    //旋转向量,平移向量,内点数组
    Mat rvec, tvec, inliers;
    //整个核心就是用这个cv::solvePnPRansac()去求解两帧之间的位姿变化
    cv::solvePnPRansac ( pts3d, pts2d, K, Mat(), rvec, tvec, false, 100, 4.0, 0.99, inliers );
    //内点数量为内点行数,所以为列存储
    num_inliers_ = inliers.rows;
    cout<<"pnp inliers: "<<num_inliers_<<endl;
    //根据旋转和平移构造出当前帧相对于参考帧的T,这样一个T计算完成了。循环计算就能得到轨迹
    T_c_w_estimated_ = SE3 (
                           SO3 ( rvec.at<double> ( 0,0 ), rvec.at<double> ( 1,0 ), rvec.at<double> ( 2,0 ) ),
                           Vector3d ( tvec.at<double> ( 0,0 ), tvec.at<double> ( 1,0 ), tvec.at<double> ( 2,0 ) )
                       );

    // using bundle adjustment to optimize the pose
    typedef g2o::BlockSolver<g2o::BlockSolverTraits<6,2>> Block;
    Block::LinearSolverType* linearSolver = new g2o::LinearSolverDense<Block::PoseMatrixType>();
    Block* solver_ptr = new Block ( linearSolver );
    g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg ( solver_ptr );
    g2o::SparseOptimizer optimizer;
    optimizer.setAlgorithm ( solver );

    g2o::VertexSE3Expmap* pose = new g2o::VertexSE3Expmap();
    pose->setId ( 0 );
    pose->setEstimate ( g2o::SE3Quat (
        T_c_w_estimated_.rotation_matrix(), T_c_w_estimated_.translation()
    ));
    optimizer.addVertex ( pose );

    // edges
    for ( int i=0; i<inliers.rows; i++ )
    {
        int index = inliers.at<int> ( i,0 );
        // 3D -> 2D projection
        EdgeProjectXYZ2UVPoseOnly* edge = new EdgeProjectXYZ2UVPoseOnly();
        edge->setId ( i );
        edge->setVertex ( 0, pose );
        edge->camera_ = curr_->camera_.get();
        edge->point_ = Vector3d ( pts3d[index].x, pts3d[index].y, pts3d[index].z );
        edge->setMeasurement ( Vector2d ( pts2d[index].x, pts2d[index].y ) );
        edge->setInformation ( Eigen::Matrix2d::Identity() );
        optimizer.addEdge ( edge );
        // set the inlier map points 
        match_3dpts_[index]->matched_times_++;
    }

    optimizer.initializeOptimization();
    optimizer.optimize ( 10 );

    T_c_w_estimated_ = SE3 (
        pose->estimate().rotation(),
        pose->estimate().translation()
    );
    
    cout<<"T_c_w_estimated_: "<<endl<<T_c_w_estimated_.matrix()<<endl;
}

bool VisualOdometry::checkEstimatedPose()
{
    // check if the estimated pose is good
    if ( num_inliers_ < min_inliers_ )
    {
        cout<<"reject because inlier is too small: "<<num_inliers_<<endl;
        return false;
    }
    // if the motion is too large, it is probably wrong
    SE3 T_r_c = ref_->T_c_w_ * T_c_w_estimated_.inverse();
    Sophus::Vector6d d = T_r_c.log();
    if ( d.norm() > 5.0 )
    {
        cout<<"reject because motion is too large: "<<d.norm() <<endl;
        return false;
    }
    return true;
}

bool VisualOdometry::checkKeyFrame()
{
    //说一下这个是否为关键帧的判断,也很简单, 
    //关键帧并不是之前理解的轨迹比较长了,隔一段选取一个,而还是每一帧的T都判断一下,比较大就说明为关键帧,说明在这一帧中,要么平移比较大,要么拐弯导致旋转比较大,所以添加,如果在运动上一直就是小运动,运动多久都不会添加为关键帧。 
    //另外上方的判断T计算错误也是运动过大,但是量级不一样,判断计算错误是要大于5,而关键帧,在配置文件中看只需要0.1就定义为关键帧了,所以0.1到5的差距,导致这两个函数并不冲突 
    SE3 T_r_c = ref_->T_c_w_ * T_c_w_estimated_.inverse();
    Sophus::Vector6d d = T_r_c.log();
    Vector3d trans = d.head<3>();
    Vector3d rot = d.tail<3>();
    if ( rot.norm() >key_frame_min_rot || trans.norm() >key_frame_min_trans )
        return true;
    return false;
}
//关键帧添加,直接调用insertKeyFrame()将当前帧插入就好了
//增加关键帧函数多了一步在第一帧时,将其对应的地图点全部添加进地图中
void VisualOdometry::addKeyFrame()
{


    if ( map_->keyframes_.empty() )
    {
        // first key-frame, add all 3d points into map
        for ( size_t i=0; i<keypoints_curr_.size(); i++ )
        {
            double d = curr_->findDepth ( keypoints_curr_[i] );
            if ( d < 0 ) 
                continue;
            Vector3d p_world = ref_->camera_->pixel2world (
                Vector2d ( keypoints_curr_[i].pt.x, keypoints_curr_[i].pt.y ), curr_->T_c_w_, d
            );
            Vector3d n = p_world - ref_->getCamCenter();
            n.normalize();
            //上方求出构造地图点所需所有参数,3D点、模长、描述子、帧,然后构造一个地图点
            MapPoint::Ptr map_point = MapPoint::createMapPoint(
                p_world, n, descriptors_curr_.row(i).clone(), curr_.get()
            );
            //添加进地图
            map_->insertMapPoint( map_point );
        }
    }
    //一样的,第一帧添加进关键帧
    map_->insertKeyFrame ( curr_ );
    ref_ = curr_;
}
//新增函数,增加地图中的点。局部地图类似于slidewindow一样,随时的增删地图中的点,来跟随运动
void VisualOdometry::addMapPoints()
{
    // add the new map points into map
    //创建一个bool型的数组matched,大小为当前keypoints数组大小,值全为false
    vector<bool> matched(keypoints_curr_.size(), false); 
    //首先这个match_2dkp_index_是新来的当前帧跟地图匹配时,好的匹配到的关键点在keypoins数组中的索引
    //在这里将已经匹配的keypoint索引置为true
    for ( int index:match_2dkp_index_ )
        matched[index] = true;
    //遍历当前keypoints数组
    for ( int i=0; i<keypoints_curr_.size(); i++ )
    {
        //如果为true,说明在地图中找到了匹配,也就意味着地图中已经有这个点了。直接continue
        if ( matched[i] == true )   
            continue;
        //如果没有continue的话,说明这个点在地图中没有找到匹配,认定为新的点,
        //下一步就是找到depth数据,构造3D点,然后构造地图点,添加进地图即可
        double d = ref_->findDepth ( keypoints_curr_[i] );
        if ( d<0 )  
            continue;
        Vector3d p_world = ref_->camera_->pixel2world (
            Vector2d ( keypoints_curr_[i].pt.x, keypoints_curr_[i].pt.y ), 
            curr_->T_c_w_, d
        );
        Vector3d n = p_world - ref_->getCamCenter();
        n.normalize();
        MapPoint::Ptr map_point = MapPoint::createMapPoint(
            p_world, n, descriptors_curr_.row(i).clone(), curr_.get()
        );
        map_->insertMapPoint( map_point );
    }
}
//新增函数:优化地图。主要是为了维护地图的规模。删除一些地图点,在点过少时增加地图点等操作
void VisualOdometry::optimizeMap()
{
    // remove the hardly seen and no visible points
    //删除地图点,遍历地图中的地图点。并分几种情况进行删除
    for ( auto iter = map_->map_points_.begin(); iter != map_->map_points_.end(); )
    {
        //如果点在当前帧都不可见了,说明跑的比较远了,删掉
        if ( !curr_->isInFrame(iter->second->pos_) )
        {
            iter = map_->map_points_.erase(iter);
            continue;
        }
        //定义匹配率,用匹配次数/可见次数,匹配率过低说明经常见但是没有几次匹配。应该是一些比较难识别的点,也就是出来的描述子比价奇葩。所以删掉
        float match_ratio = float(iter->second->matched_times_)/iter->second->visible_times_;
        if ( match_ratio < map_point_erase_ratio_ )
        {
            iter = map_->map_points_.erase(iter);
            continue;
        }
        
        double angle = getViewAngle( curr_, iter->second );
        if ( angle > M_PI/6. )
        {
            iter = map_->map_points_.erase(iter);
            continue;
        }
        //继续,可以根据一些其他条件自己添加要删除点的情况
        if ( iter->second->good_ == false )
        {
            // TODO try triangulate this map point 
        }
        iter++;
    }
    //下面说一些增加点的情况,首先当前帧去地图中匹配时,点少于100个了,
    // 一般情况是运动幅度过大了,跟之前的帧没多少交集了,所以增加一下
    if ( match_2dkp_index_.size()<100 )
        addMapPoints();
    //如果点过多了,多于1000个,适当增加释放率,让地图处于释放点的趋势
    if ( map_->map_points_.size() > 1000 )  
    {
        // TODO map is too large, remove some one 
        map_point_erase_ratio_ += 0.05;
    }
    //如果没有多于1000个,保持释放率在0.1,维持地图的体量为平衡态
    else 
        map_point_erase_ratio_ = 0.1;
    cout<<"map points: "<<map_->map_points_.size()<<endl;
}
//取得一个空间点在一个帧下的视角角度。返回值是double类型的角度值
double VisualOdometry::getViewAngle ( Frame::Ptr frame, MapPoint::Ptr point )
{
    //构造发方法是空间点坐标减去相机中心坐标。得到从相机中心指向指向空间点的向量
    Vector3d n = point->pos_ - frame->getCamCenter();
    //单位化
    n.normalize();
    //返回一个角度,acos()为反余弦, 
    //向量*乘为:a*b=|a||b|cos<a,b> 
    //所以单位向量的*乘得到的是两个向量的余弦值,再用acos()即得到角度,返回 
    //物理意义就是得到匆匆世界坐标系下看空间点和从相机坐标系下看空间点,视角差的角度
    return acos( n.transpose()*point->norm_ );
}


}

 

Run_vo.cpp

#include <fstream>
#include <boost/timer.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/viz.hpp>
#include <opencv2/imgproc/imgproc.hpp>

#include "myslam/config.h"
#include "myslam/visual_odometry.h"

int main ( int argc, char** argv )
{
    //参数文件命令行
    if ( argc != 2 )
    {
        cout<<"usage: run_vo parameter_file"<<endl;
        return 1;
    }
    //链接参数文件
    myslam::Config::setParameterFile ( argv[1] );
    //构造VO,类型就是在VisualOdometry类中定义的指向自身类型的指针,然后用New开辟内存
    myslam::VisualOdometry::Ptr vo ( new myslam::VisualOdometry );
    //地址获取
    string dataset_dir = myslam::Config::get<string> ( "dataset_dir" );
    cout<<"dataset: "<<dataset_dir<<endl;
    //读取与错误报错
    ifstream fin ( dataset_dir+"/associate.txt" );
    if ( !fin )
    {
        cout<<"please generate the associate file called associate.txt!"<<endl;
        return 1;
    }
    //定义图片名数组和时间戳数组,用于存放associate.txt文件中所示的时间戳对其的RGB图像和dep
    vector<string> rgb_files, depth_files;
    vector<double> rgb_times, depth_times;
    //循环读取直到文件末尾
    while ( !fin.eof() )
    {
        string rgb_time, rgb_file, depth_time, depth_file;
        //这样输入就是每个空格之间的string内容录入
        fin>>rgb_time>>rgb_file>>depth_time>>depth_file;
        //push_back进各个数组。
        //double atof (const char* str);
        //将一个单字节字符串转化成一个浮点数(上次我好像傻傻地用stringstream编子函数。。)
        rgb_times.push_back ( atof ( rgb_time.c_str() ) );
        depth_times.push_back ( atof ( depth_time.c_str() ) );
        rgb_files.push_back ( dataset_dir+"/"+rgb_file );
        depth_files.push_back ( dataset_dir+"/"+depth_file );
        //.good()返回是否读取到文件末尾,文件末尾处此函数会返回false。所以跳出
        if ( fin.good() == false )
            break;
    }
    //创建相机
    myslam::Camera::Ptr camera ( new myslam::Camera );

    // visualization
    //第一步、创造一个可视化窗口,构造参数为窗口名称
    cv::viz::Viz3d vis ( "Visual Odometry" );
    //第二步、创建坐标系部件,这里坐标系是以Widget部件类型存在的,
    // 构造参数是坐标系长度,也就是可视窗里的锥形小坐标系的长度,下面对坐标系部件进行设置
    cv::viz::WCoordinateSystem world_coor ( 1.0 ), camera_coor ( 0.5 );
    //第三步、设置视角。这步是非必要步骤,进行设置有利于观察, 
    //不设置也会有默认视角,就是可能比较别扭。而且开始后拖动鼠标,也可以改变观察视角。 
    //构建三个3D点,这里其实就是构造makeCameraPose()函数需要的三个向量: 
    //相机位置坐标、相机焦点坐标、相机y轴朝向 
    //蓝色-Z,红色-X,绿色-Y
    cv::Point3d cam_pos ( 0, -1.0, -1.0 ), cam_focal_point ( 0,0,0 ), cam_y_dir ( 0,1,0 );
    //由这三个参数,用makeCameraPose()函数构造Affine3d类型的相机位姿,这里其实是视角位姿,也就是程序开始时你处在什么视角看
    cv::Affine3d cam_pose = cv::viz::makeCameraPose ( cam_pos, cam_focal_point, cam_y_dir );
    //用setViewerPose()设置观看视角
    vis.setViewerPose ( cam_pose );
    //这里设置坐标系部件属性,然后添加到视图窗口上去
    //首先利用setRenderingProperty()函数设置渲染属性,
    // 第一个参数是个枚举,对应要渲染的属性这里是线宽,后面是属性值
    world_coor.setRenderingProperty ( cv::viz::LINE_WIDTH, 2.0 );
    camera_coor.setRenderingProperty ( cv::viz::LINE_WIDTH, 1.0 );
    //用showWidget()函数将部件添加到窗口内
    vis.showWidget ( "World", world_coor );
    vis.showWidget ( "Camera", camera_coor );
    //至此,窗口中已经显示了全部需要显示的东西,就是两个坐标系:世界坐标系,相机坐标系。 
    //世界坐标系就是写死不动的了,所以后面也没有再提起过世界坐标系。需要做的就是计算出各个帧的相机坐标系位置 
    //后续的核心就是下面的for循环,在循环中,不断的给相机坐标系设置新的pose,然后达到动画的效果。 
    //输出RGB图像信息,共读到文件数
    cout<<"read total "<<rgb_files.size() <<" entries"<<endl;
    //整个画面的快速刷新呈现动态,由此for循环控制
    for ( int i=0; i<rgb_files.size(); i++ )
    {
        cout<<"****** loop "<<i<<" ******"<<endl;
        //读取图像,创建帧操作
        Mat color = cv::imread ( rgb_files[i] );
        Mat depth = cv::imread ( depth_files[i], -1 );
        if ( color.data==nullptr || depth.data==nullptr )
            break;
        myslam::Frame::Ptr pFrame = myslam::Frame::createFrame();
        pFrame->camera_ = camera;
        pFrame->color_ = color;
        pFrame->depth_ = depth;
        pFrame->time_stamp_ = rgb_times[i];
        //这里加个每帧的运算时间,看看实时性
        boost::timer timer;
        vo->addFrame ( pFrame );
        cout<<"VO costs time: "<<timer.elapsed() <<endl;
        //VO状态为LOST时,跳出循环
        if ( vo->state_ == myslam::VisualOdometry::LOST )
            break;
        //这里要说一下,可视化窗口中动的是相机坐标系,所以本质上是求取相机坐标系下的点在世界坐标系下的坐标,
        //Pw=Twc*Pc;
        SE3 Twc = pFrame->T_c_w_.inverse();

        // show the map and the camera pose
        //用Twc构造Affine3d类型的pose所需要的旋转矩阵和平移矩阵
        cv::Affine3d M (
            cv::Affine3d::Mat3 (
                Twc.rotation_matrix() ( 0,0 ), Twc.rotation_matrix() ( 0,1 ), Twc.rotation_matrix() ( 0,2 ),
                Twc.rotation_matrix() ( 1,0 ), Twc.rotation_matrix() ( 1,1 ), Twc.rotation_matrix() ( 1,2 ),
                Twc.rotation_matrix() ( 2,0 ), Twc.rotation_matrix() ( 2,1 ), Twc.rotation_matrix() ( 2,2 )
            ),
            cv::Affine3d::Vec3 (
                Twc.translation() ( 0,0 ), Twc.translation() ( 1,0 ), Twc.translation() ( 2,0 )
            )
        );
        //注意clone
        Mat img_show = color.clone();
        for ( auto& pt:vo->map_->map_points_ )
        {
            myslam::MapPoint::Ptr p = pt.second;
            Vector2d pixel = pFrame->camera_->world2pixel ( p->pos_, pFrame->T_c_w_ );
            cv::circle ( img_show, cv::Point2f ( pixel ( 0,0 ),pixel ( 1,0 ) ), 5, cv::Scalar ( 0,255,0 ), 2 );
        }
        //显示图像和viz可视化窗口
        cv::imshow ( "image", img_show );
        cv::waitKey ( 1 );
        vis.setWidgetPose ( "Camera", M );
        vis.spinOnce ( 1, false );
        cout<<endl;
    }

    return 0;
}

G2o_types类

目标是估计位姿而非结构,我们以相机位姿 ξ 为优化变量,通过最小化重投影误差,来构建优化问题。与之前一样,我们自定义一个 g2o 中的优化边。它只优化一个位姿,因此是一个一元边。

g2o_types.h

#ifndef MYSLAM_G2O_TYPES_H
#define MYSLAM_G2O_TYPES_H

#include "myslam/common_include.h"
#include "camera.h"

#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/optimization_algorithm_levenberg.h>
#include <g2o/types/sba/types_six_dof_expmap.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
#include <g2o/core/robust_kernel.h>
#include <g2o/core/robust_kernel_impl.h>

namespace myslam
{
// point and pose
class EdgeProjectXYZRGBD : public g2o::BaseBinaryEdge<3, Eigen::Vector3d, g2o::VertexSBAPointXYZ, g2o::VertexSE3Expmap>
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
    virtual void computeError();
    virtual void linearizeOplus();
    virtual bool read( std::istream& in ){}
    virtual bool write( std::ostream& out) const {}
    
};

// only to optimize the pose, no point
class EdgeProjectXYZRGBDPoseOnly: public g2o::BaseUnaryEdge<3, Eigen::Vector3d, g2o::VertexSE3Expmap >
{
public:
    // Eigen内存分布标准化
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW
    // Error: measure = R*point+t
    //还是边类型定义中最核心的老两样:
    //误差计算函数,实现误差计算方法
    virtual void computeError();
    //线性增量函数,也就是雅克比矩阵J的计算方法
    virtual void linearizeOplus();
    //读写功能函数,这里没用到,所以只是定义了,并没有在源文件中实现
    virtual bool read( std::istream& in ){}
    virtual bool write( std::ostream& out) const {}
    //把三维点和相机模型写成员变量,方便误差计算和J计算,因为都需要这两项数据
    Vector3d point_;
};

class EdgeProjectXYZ2UVPoseOnly: public g2o::BaseUnaryEdge<2, Eigen::Vector2d, g2o::VertexSE3Expmap >
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW
    
    virtual void computeError();
    virtual void linearizeOplus();
    
    virtual bool read( std::istream& in ){}
    virtual bool write(std::ostream& os) const {};
    //把三维点和相机模型写成员变量,方便误差计算和J计算,因为都需要这两项数据
    Vector3d point_;
    Camera* camera_;
};

}


#endif // MYSLAM_G2O_TYPES_H

g2o_types.cpp

#include "myslam/g2o_types.h"

namespace myslam
{
void EdgeProjectXYZRGBD::computeError()
{
    const g2o::VertexSBAPointXYZ* point = static_cast<const g2o::VertexSBAPointXYZ*> ( _vertices[0] );
    const g2o::VertexSE3Expmap* pose = static_cast<const g2o::VertexSE3Expmap*> ( _vertices[1] );
    _error = _measurement - pose->estimate().map ( point->estimate() );
}

void EdgeProjectXYZRGBD::linearizeOplus()
{
    g2o::VertexSE3Expmap* pose = static_cast<g2o::VertexSE3Expmap *> ( _vertices[1] );
    g2o::SE3Quat T ( pose->estimate() );
    g2o::VertexSBAPointXYZ* point = static_cast<g2o::VertexSBAPointXYZ*> ( _vertices[0] );
    Eigen::Vector3d xyz = point->estimate();
    Eigen::Vector3d xyz_trans = T.map ( xyz );
    double x = xyz_trans[0];
    double y = xyz_trans[1];
    double z = xyz_trans[2];
    
    //后面的雅克比矩阵在书上应该是6-15,位姿六维,转换为误差三维
    _jacobianOplusXi = - T.rotation().toRotationMatrix();

    _jacobianOplusXj ( 0,0 ) = 0;
    _jacobianOplusXj ( 0,1 ) = -z;
    _jacobianOplusXj ( 0,2 ) = y;
    _jacobianOplusXj ( 0,3 ) = -1;
    _jacobianOplusXj ( 0,4 ) = 0;
    _jacobianOplusXj ( 0,5 ) = 0;

    _jacobianOplusXj ( 1,0 ) = z;
    _jacobianOplusXj ( 1,1 ) = 0;
    _jacobianOplusXj ( 1,2 ) = -x;
    _jacobianOplusXj ( 1,3 ) = 0;
    _jacobianOplusXj ( 1,4 ) = -1;
    _jacobianOplusXj ( 1,5 ) = 0;

    _jacobianOplusXj ( 2,0 ) = -y;
    _jacobianOplusXj ( 2,1 ) = x;
    _jacobianOplusXj ( 2,2 ) = 0;
    _jacobianOplusXj ( 2,3 ) = 0;
    _jacobianOplusXj ( 2,4 ) = 0;
    _jacobianOplusXj ( 2,5 ) = -1;
}

void EdgeProjectXYZRGBDPoseOnly::computeError()
{
    const g2o::VertexSE3Expmap* pose = static_cast<const g2o::VertexSE3Expmap*> ( _vertices[0] );
    _error = _measurement - pose->estimate().map ( point_ );
}

void EdgeProjectXYZRGBDPoseOnly::linearizeOplus()
{
    g2o::VertexSE3Expmap* pose = static_cast<g2o::VertexSE3Expmap*> ( _vertices[0] );
    g2o::SE3Quat T ( pose->estimate() );
    Vector3d xyz_trans = T.map ( point_ );
    double x = xyz_trans[0];
    double y = xyz_trans[1];
    double z = xyz_trans[2];

    _jacobianOplusXi ( 0,0 ) = 0;
    _jacobianOplusXi ( 0,1 ) = -z;
    _jacobianOplusXi ( 0,2 ) = y;
    _jacobianOplusXi ( 0,3 ) = -1;
    _jacobianOplusXi ( 0,4 ) = 0;
    _jacobianOplusXi ( 0,5 ) = 0;

    _jacobianOplusXi ( 1,0 ) = z;
    _jacobianOplusXi ( 1,1 ) = 0;
    _jacobianOplusXi ( 1,2 ) = -x;
    _jacobianOplusXi ( 1,3 ) = 0;
    _jacobianOplusXi ( 1,4 ) = -1;
    _jacobianOplusXi ( 1,5 ) = 0;

    _jacobianOplusXi ( 2,0 ) = -y;
    _jacobianOplusXi ( 2,1 ) = x;
    _jacobianOplusXi ( 2,2 ) = 0;
    _jacobianOplusXi ( 2,3 ) = 0;
    _jacobianOplusXi ( 2,4 ) = 0;
    _jacobianOplusXi ( 2,5 ) = -1;
}

//第三种,重投影误差
void EdgeProjectXYZ2UVPoseOnly::computeError()
{
    //顶点数组中取出顶点,转换成位姿指针类型,其实左边的pose类型可以写为auto
    const g2o::VertexSE3Expmap* pose = static_cast<const g2o::VertexSE3Expmap*> ( _vertices[0] );
    //误差计算,测量值减去估计值,也就是重投影误差
    //估计值计算方法是T*p,得到相机坐标系下坐标,然后在利用camera2pixel()函数得到像素坐标
    _error = _measurement - camera_->camera2pixel ( 
        pose->estimate().map(point_) );
}

void EdgeProjectXYZ2UVPoseOnly::linearizeOplus()
{
   /**
    * 这里说一下整体思路:
    * 重投影误差的雅克比矩阵在书中P164页式7.45已经呈现,所以这里就是直接构造,
    * 构造时发现需要变换后的空间点坐标,所以需要先求出。
    */
    //首先还是从顶点取出位姿
    g2o::VertexSE3Expmap* pose = static_cast<g2o::VertexSE3Expmap*> ( _vertices[0] );
    //这由位姿构造一个四元数形式T
    g2o::SE3Quat T ( pose->estimate() );
    //用T求得变换后的3D点坐标。T*p
    Vector3d xyz_trans = T.map ( point_ );
    //OK,到这步,变换后的3D点xyz坐标就分别求出来了,后面的z平方,纯粹是为了后面构造J时方便定义的,因为需要多处用到
    double x = xyz_trans[0];
    double y = xyz_trans[1];
    double z = xyz_trans[2];
    double z_2 = z*z;
    //直接各个元素构造J就好了,对照式7.45是一模一样的,2*6的矩阵
    _jacobianOplusXi ( 0,0 ) =  x*y/z_2 *camera_->fx_;
    _jacobianOplusXi ( 0,1 ) = - ( 1+ ( x*x/z_2 ) ) *camera_->fx_;
    _jacobianOplusXi ( 0,2 ) = y/z * camera_->fx_;
    _jacobianOplusXi ( 0,3 ) = -1./z * camera_->fx_;
    _jacobianOplusXi ( 0,4 ) = 0;
    _jacobianOplusXi ( 0,5 ) = x/z_2 * camera_->fx_;

    _jacobianOplusXi ( 1,0 ) = ( 1+y*y/z_2 ) *camera_->fy_;
    _jacobianOplusXi ( 1,1 ) = -x*y/z_2 *camera_->fy_;
    _jacobianOplusXi ( 1,2 ) = -x/z *camera_->fy_;
    _jacobianOplusXi ( 1,3 ) = 0;
    _jacobianOplusXi ( 1,4 ) = -1./z *camera_->fy_;
    _jacobianOplusXi ( 1,5 ) = y/z_2 *camera_->fy_;
}


}

 

 

  • 3
    点赞
  • 20
    收藏
    觉得还不错? 一键收藏
  • 4
    评论
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值