SLAM - 搭建VO框架 - 2

使用两两帧的视觉里程计,定义当前帧和参考帧两个概念,以参考帧为坐标系,将当前帧与其进行特征匹配,并估计运动关系。

流程如下:

1、对新来的当前帧提取关键点和描述子

2、如果系统未初始化,以该帧为参考帧,根据深度图计算关键点的3D位置,返回1

3、估计参考帧与当前帧间的运动

4、判断上述估计是否成功

5、若成功,把当前帧作为新的参考帧,回1

6、若失败,计连续丢失帧数。当连续丢失超过一定帧数,置VO状态为丢失,算法结束。若未超过,返回1

对应的头文件及实现:

visual_odometry.h

/*
 * <one line to give the program's name and a brief idea of what it does.>
 * Copyright (C) 2016  <copyright holder> <email>
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 */

#ifndef VISUALODOMETRY_H
#define VISUALODOMETRY_H

#include "myslam/common_include.h"
#include "myslam/map.h"

#include <opencv2/features2d/features2d.hpp>

namespace myslam 
{
class VisualOdometry
{
public:
    typedef shared_ptr<VisualOdometry> Ptr;
    enum VOState {
    //枚举表征VO状态,初始化、正常、丢失
        INITIALIZING=-1,
        OK=0,
        LOST
    };
    //VO状态、地图(关键帧和特征点)、参考帧、当前帧
    VOState     state_;     // current VO status
    Map::Ptr    map_;       // map with all frames and map points
    Frame::Ptr  ref_;       // reference frame 
    Frame::Ptr  curr_;      // current frame 
    //orb、当前帧特征点KeyPoint、当前帧描述子、参考帧3D坐标Point3f、参考帧描述子、匹配关系
    cv::Ptr<cv::ORB> orb_;  // orb detector and computer 
    vector<cv::Point3f>     pts_3d_ref_;        // 3d points in reference frame 引用帧关键点,描述子存放位置
    vector<cv::KeyPoint>    keypoints_curr_;    // keypoints in current frame 当前帧的关键点,描述子存放位置
    Mat                     descriptors_curr_;  // descriptor in current frame 
    Mat                     descriptors_ref_;   // descriptor in reference frame 
    vector<cv::DMatch>      feature_matches_; // 当前帧vs引用帧匹配结果存放位置,存储匹配点 
    //位姿估计T、内点数(即为匹配出的关键点数量)、丢失数
    SE3 T_c_r_estimated_;  // the estimated pose of current frame 参考帧到当前帧的位姿变换
    int num_inliers_;        // number of inlier features in icp 内点数目
    int num_lost_;           // number of lost times 失去点数目
    
    // parameters 特征数量、尺度、好匹配比率、最大持续丢失时间、最小内点数
    int num_of_features_;   // number of features 特征点个数
    double scale_factor_;   // scale in image pyramid 金字塔相关,每次缩放多少
    int level_pyramid_;     // number of pyramid levels 金字塔层数
    float match_ratio_;      // ratio for selecting  good matches 匹配成功率
    int max_num_lost_;      // max number of continuous lost times 连续追踪失败的最大数目
    int min_inliers_;       // minimum inliers 最少内点数目
    //关键帧筛选标准:最小旋转量&最小平移量,只要T满足其中一个即可
    double key_frame_min_rot;   // minimal rotation of two key-frames
    double key_frame_min_trans; // minimal translation of two key-frames
    
public: // functions 
    VisualOdometry();
    ~VisualOdometry();
    //核心,添加新关键帧
    bool addFrame( Frame::Ptr frame );      // add a new frame 
    
protected:  
    // inner operation 
    // 内部处理函数,特征匹配
    void extractKeyPoints();
    void computeDescriptors(); 
    void featureMatching();
    void poseEstimationPnP(); 
    void setRef3DPoints();
    // 关键帧的功能函数
    void addKeyFrame();
    bool checkEstimatedPose(); 
    bool checkKeyFrame();
    
};
}

#endif // VISUALODOMETRY_H

visual_odometry.cpp

/*
 * <one line to give the program's name and a brief idea of what it does.>
 * Copyright (C) 2016  <copyright holder> <email>
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 */

#include <opencv4/opencv2/highgui/highgui.hpp>
#include <opencv4/opencv2/imgproc/imgproc.hpp>
#include <opencv4/opencv2/calib3d/calib3d.hpp>
#include <algorithm>
#include <boost/timer.hpp>

#include "myslam/config.h"
#include "myslam/visual_odometry.h"

namespace myslam
{

VisualOdometry::VisualOdometry() :
    state_ ( INITIALIZING ), ref_ ( nullptr ), curr_ ( nullptr ), map_ ( new Map ), num_lost_ ( 0 ), num_inliers_ ( 0 )
{
    num_of_features_    = Config::get<int> ( "number_of_features" );
    scale_factor_       = Config::get<double> ( "scale_factor" );
    level_pyramid_      = Config::get<int> ( "level_pyramid" );
    match_ratio_        = Config::get<float> ( "match_ratio" );
    max_num_lost_       = Config::get<float> ( "max_num_lost" );
    min_inliers_        = Config::get<int> ( "min_inliers" );
    key_frame_min_rot   = Config::get<double> ( "keyframe_rotation" );
    key_frame_min_trans = Config::get<double> ( "keyframe_translation" );
    orb_ = cv::ORB::create ( num_of_features_, scale_factor_, level_pyramid_ );
}

VisualOdometry::~VisualOdometry()
{

}
// add a new frame 添加帧,并完成位姿估计(根据Vo的三种状态,完成不同的操作)
bool VisualOdometry::addFrame ( Frame::Ptr frame )
{
    switch ( state_ )
    {
    case INITIALIZING://状态:初始化
    {
        state_ = OK;
        curr_ = ref_ = frame;//当前帧和参考帧都是第一帧,实际上当前帧指向NULL也是行的
        // 维护的map插入一帧关键帧,即keyframes_这个unorderedmap容器插入一个元素
        map_->insertKeyFrame ( frame );//将此帧插入地图中
        // extract features from first frame         //提取关键点和描述子
        extractKeyPoints();//初始帧(当前帧)提取特征点
        computeDescriptors();// 初始帧(当前帧)计算描述子
        // compute the 3d position of features in ref frame 
        setRef3DPoints();
        break;
    }
    case OK:
    {
        curr_ = frame;
        extractKeyPoints();// 将(当前帧)特征点提取到keypoints_curr_容器
        computeDescriptors();// 计算(当前帧)描述子放入容器descriptors_curr_
        featureMatching();// 填满装有匹配点 vector<cv::DMatch>  feature_matches_ 容器
        poseEstimationPnP();// 得到T_c_r_estimated_和内点个数
        if ( checkEstimatedPose() == true ) // a good estimation 根据局内点个数不能过小以及李代数不能过大判断位姿估计是否成功
        {
            curr_->T_c_w_ = T_c_r_estimated_ * ref_->T_c_w_;  // T_c_w = T_c_r*T_r_w  连乘
            ref_ = curr_;// 更新当前帧为参考帧
            setRef3DPoints();// 当前帧变为参考帧以后,设置下一帧的参考3d点,补全depth数据
            num_lost_ = 0; // 连续追踪失败数目置零
            if ( checkKeyFrame() == true ) // is a key-frame
            {
                addKeyFrame();// 在维护的小地图map中插上一帧关键帧
            }
        }
        else // bad estimation due to various reasons 位姿估计失败
        {
            num_lost_++;// 连续失败追踪数目+1
            if ( num_lost_ > max_num_lost_ )// 判断是否大于最大丢失数,是则VO状态切换为lost
            {
                state_ = LOST;
            }
            return false;
        }
        break;
    }
    case LOST://匹配失败状态
    {
        cout<<"vo has lost."<<endl;
        break;
    }
    }

    return true;
}
// 当前帧提取特征点
void VisualOdometry::extractKeyPoints()
{
//从curr_->color_中检测出keypoint放入keypoints_curr_
    orb_->detect ( curr_->color_, keypoints_curr_ );
}
// 当前帧计算描述子
void VisualOdometry::computeDescriptors()
{
//计算curr_->color_中检测出的keypoint的descriptor
    orb_->compute ( curr_->color_, keypoints_curr_, descriptors_curr_ );
}
// 特征点匹配
void VisualOdometry::featureMatching()
{
    // match desp_ref and desp_curr, use OpenCV's brute force match 
    vector<cv::DMatch> matches;//matches----->匹配结果存放地点(筛选前))
    cv::BFMatcher matcher ( cv::NORM_HAMMING );
    matcher.match ( descriptors_ref_, descriptors_curr_, matches );
    

    // 关键点筛选:select the best matches
    // 找到matches数组中最小距离,赋值给min_dist
    float min_dis = std::min_element (
                        matches.begin(), matches.end(),
                        [] ( const cv::DMatch& m1, const cv::DMatch& m2 )
    {
        return m1.distance < m2.distance;
    } )->distance;

    feature_matches_.clear();//清除上次匹配结果
    //根据距离筛选特征点
    for ( cv::DMatch& m : matches )
    {
        if ( m.distance < max<float> ( min_dis*match_ratio_, 30.0 ) )
        {
            feature_matches_.push_back(m);//符合条件的匹配放入feature_matches_
        }
    }
    cout<<"good matches: "<<feature_matches_.size()<<endl;
}

// 引用帧3D点设置
// pnp需要参考帧3D,当前帧2D,所以当前帧迭代为参考帧时,需要加上depth数据
// 每轮循环结束,都需要把当前帧变成参考帧,这时候就需要把当前帧的坐标的2D形式转化成3D形式
void VisualOdometry::setRef3DPoints()
{
    // select the features with depth measurements 
    pts_3d_ref_.clear();
    descriptors_ref_ = Mat();
    //遍历当前关键点数组遍历
    for ( size_t i=0; i<keypoints_curr_.size(); i++ )
    {
        //找到depth数据
        double d = ref_->findDepth(keypoints_curr_[i]);               
        if ( d > 0)
        {
            //像素坐标到相机坐标系3D坐标
            Vector3d p_cam = ref_->camera_->pixel2camera(
                Vector2d(keypoints_curr_[i].pt.x, keypoints_curr_[i].pt.y), d
            );
            //构造Point3f,3D坐标
            pts_3d_ref_.push_back( cv::Point3f( p_cam(0,0), p_cam(1,0), p_cam(2,0) ));
            //参考帧描述子,将当前帧描述子按行放进去
            descriptors_ref_.push_back(descriptors_curr_.row(i));
        }
    }
}
// 使用pnp进行位置估计
// PnP估计相机位姿T
// 使用pts_3d_ref_ 、keypoints_curr_、feature_matches_、相机参数----->estimate出相机的T
void VisualOdometry::poseEstimationPnP()
{
    // construct the 3d 2d observations
    //参考帧3D坐标和当前帧2D坐标(需要.pt像素坐标)
    vector<cv::Point3f> pts3d;
    vector<cv::Point2f> pts2d;
    
    for ( cv::DMatch m:feature_matches_ )
    {
        pts3d.push_back( pts_3d_ref_[m.queryIdx] );//query=ref
        pts2d.push_back( keypoints_curr_[m.trainIdx].pt );//train=curr
    }
    //相机内参不变
    Mat K = ( cv::Mat_<double>(3,3)<<
        ref_->camera_->fx_, 0, ref_->camera_->cx_,
        0, ref_->camera_->fy_, ref_->camera_->cy_,
        0,0,1
    );
    //旋转、平移、内点数组
    Mat rvec, tvec, inliers;
    //核心函数
    cv::solvePnPRansac( pts3d, pts2d, K, Mat(), rvec, tvec, false, 100, 4.0, 0.99, inliers );
    // 猜测内点数,应该是匹配出的特征点中真正被位姿估计函数使用的那些点
    num_inliers_ = inliers.rows;
    cout<<"pnp inliers: "<<num_inliers_<<endl;
//由旋转和平移构造出当前帧相对于参考帧的T
    T_c_r_estimated_ = SE3(
        SO3(rvec.at<double>(0,0), rvec.at<double>(1,0), rvec.at<double>(2,0)), 
        Vector3d( tvec.at<double>(0,0), tvec.at<double>(1,0), tvec.at<double>(2,0))
    );
}
// 位置估计检查
//根据内点(inlier)的数量及运动的大小做一个简单的检测:认为内点不可太少,而运动不可能过大
bool VisualOdometry::checkEstimatedPose()
{
    // check if the estimated pose is good
    if ( num_inliers_ < min_inliers_ ) //匹配点太少
    {
        cout<<"reject because inlier is too small: "<<num_inliers_<<endl;
        return false;
    }
    // if the motion is too large, it is probably wrong
    //T的模太大
    Sophus::Vector6d d = T_c_r_estimated_.log();
    if ( d.norm() > 5.0 )
    {
        cout<<"reject because motion is too large: "<<d.norm()<<endl;
        return false;
    }
    return true;
}
// 检查关键帧,T中R或t比较大都是关键帧
bool VisualOdometry::checkKeyFrame()
{
    Sophus::Vector6d d = T_c_r_estimated_.log();
    Vector3d trans = d.head<3>();
    Vector3d rot = d.tail<3>();
    if ( rot.norm() >key_frame_min_rot || trans.norm() >key_frame_min_trans )
        return true;
    return false;
}
// 添加关键帧
void VisualOdometry::addKeyFrame()
{
    cout<<"adding a key-frame"<<endl;
    map_->insertKeyFrame ( curr_ );
}

}

参考:

   《slam十四讲》

SLAM十四讲---前端0.2---基本的VO---两两帧的视觉里程计_白色小靴的博客-CSDN博客icon-default.png?t=M4ADhttps://blog.csdn.net/weixin_44684139/article/details/105278421?utm_medium=distribute.pc_feed_404.none-task-blog-2~default~BlogCommendFromBaidu~Rate-6-105278421-blog-null.pc_404_mixedpudn&depth_1-utm_source=distribute.pc_feed_404.none-task-blog-2~default~BlogCommendFromBaidu~Rate-6-105278421-blog-null.pc_404_mixedpud

《视觉SLAM十四讲》源码详细注解---/project/0.2/visual_odometry_fenneishi的博客-CSDN博客icon-default.png?t=M4ADhttps://blog.csdn.net/weixin_36673043/article/details/85703200?utm_medium=distribute.pc_feed_404.none-task-blog-2~default~BlogCommendFromBaidu~Rate-20-85703200-blog-null.pc_404_mixedpudn&depth_1-utm_source=distribute.pc_feed_404.none-task-blog-2~default~BlogCommendFromBaidu~Rate-20-85703200-blog-null.pc_404_mixedpud

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值