ORB-SLAM3 单目惯导ros-system-track

ROS

  • 程序分为ROS接口和非ROS接口的。
  • 主程序 main 在ROS/ORB_SLAM3/src/中:

ros_mono_interial.cc

Function

  • 主函数:读取参数,并创建了slam系统,订阅两topic+创建同步线程。
  • 回调函数:只将数据放入队列,上锁
  • 同步线程:1s一次,先数据同步,后进行跟踪

主函数

  1. 读取参数 argc
  2. 创建SLAM系统,它初始化所有系统线程并准备好处理帧。
  3. 订阅IMU+CAMERA topic,回调中将数据放入队列
  4. 数据同步线程 ( Image 与 imu)
int main(int argc, char **argv)
{
  ros::init(argc, argv, "Mono_Inertial");
  ros::NodeHandle n("~");
  ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Info);
  bool bEqual = false;
  if(argc < 3 || argc > 4)
  {
    cerr << endl << "Usage: rosrun ORB_SLAM3 Mono_Inertial path_to_vocabulary path_to_settings [do_equalize]" << endl;
    ros::shutdown();
    return 1;
  }

  if(argc==4)
  {
    std::string sbEqual(argv[3]);
    if(sbEqual == "true")
      bEqual = true;
  }

  // Create SLAM system. It initializes all system threads and gets ready to process frames.
  ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::IMU_MONOCULAR,true);

  ImuGrabber imugb;
  ImageGrabber igb(&SLAM,&imugb,bEqual); // TODO
  
  // Maximum delay, 5 seconds
  ros::Subscriber sub_imu = n.subscribe("/imu", 1000, &ImuGrabber::GrabImu, &imugb); 
  ros::Subscriber sub_img0 = n.subscribe("/camera/image_raw", 100, &ImageGrabber::GrabImage,&igb);

  std::thread sync_thread(&ImageGrabber::SyncWithImu,&igb);

  ros::spin();

  return 0;
}

imu抓取

class ImuGrabber
{
public:
    ImuGrabber(){};
    void GrabImu(const sensor_msgs::ImuConstPtr &imu_msg);

    queue<sensor_msgs::ImuConstPtr> imuBuf;
    std::mutex mBufMutex;
};
// 直接放入imubuf中
void ImuGrabber::GrabImu(const sensor_msgs::ImuConstPtr &imu_msg)
{
  mBufMutex.lock();
  imuBuf.push(imu_msg);
  mBufMutex.unlock();
  return;
}

image 抓取

class ImageGrabber
{
public:
    ImageGrabber(ORB_SLAM3::System* pSLAM, ImuGrabber *pImuGb, const bool bClahe): mpSLAM(pSLAM), mpImuGb(pImuGb), mbClahe(bClahe){}

    void GrabImage(const sensor_msgs::ImageConstPtr& msg);
    cv::Mat GetImage(const sensor_msgs::ImageConstPtr &img_msg);
    void SyncWithImu();

    queue<sensor_msgs::ImageConstPtr> img0Buf;
    std::mutex mBufMutex;
   
    ORB_SLAM3::System* mpSLAM;
    ImuGrabber *mpImuGb;

    const bool mbClahe;
    cv::Ptr<cv::CLAHE> mClahe = cv::createCLAHE(3.0, cv::Size(8, 8));
};

// 回调就是将其数据放入队列
void ImageGrabber::GrabImage(const sensor_msgs::ImageConstPtr &img_msg)
{
  mBufMutex.lock();
  if (!img0Buf.empty())
    img0Buf.pop();
  img0Buf.push(img_msg);
  mBufMutex.unlock();
}

image与imu同步

步骤:

  1. 确保imu时间是否包含住image时间,否则continue
  2. 取出 image + vector
  3. 基于参数确定图像是否需要直方图均衡。
void ImageGrabber::SyncWithImu()
{
  while(1)
  {
    cv::Mat im;
    double tIm = 0;
    if (!img0Buf.empty()&&!mpImuGb->imuBuf.empty())
    {
      tIm = img0Buf.front()->header.stamp.toSec();
      // 保证:imu 最新时间比 image 新
      if(tIm>mpImuGb->imuBuf.back()->header.stamp.toSec())
          continue;
      {  // 取出图片
      this->mBufMutex.lock();
      im = GetImage(img0Buf.front());
      img0Buf.pop();
      this->mBufMutex.unlock();
      }

	  // 取出 小于image时间的imu数据,{time,线加速度,角加速度}
      vector<ORB_SLAM3::IMU::Point> vImuMeas;
      mpImuGb->mBufMutex.lock();
      if(!mpImuGb->imuBuf.empty())
      {
        // Load imu measurements from buffer
        vImuMeas.clear();
        while(!mpImuGb->imuBuf.empty() && mpImuGb->imuBuf.front()->header.stamp.toSec()<=tIm)
        {
          double t = mpImuGb->imuBuf.front()->header.stamp.toSec();
          cv::Point3f acc(mpImuGb->imuBuf.front()->linear_acceleration.x, mpImuGb->imuBuf.front()->linear_acceleration.y, mpImuGb->imuBuf.front()->linear_acceleration.z);
          cv::Point3f gyr(mpImuGb->imuBuf.front()->angular_velocity.x, mpImuGb->imuBuf.front()->angular_velocity.y, mpImuGb->imuBuf.front()->angular_velocity.z);
          vImuMeas.push_back(ORB_SLAM3::IMU::Point(acc,gyr,t));
          mpImuGb->imuBuf.pop();
        }
      }
      mpImuGb->mBufMutex.unlock();
      if(mbClahe)
        mClahe->apply(im,im);
     
      mpSLAM->TrackMonocular(im,tIm,vImuMeas);
    }

    std::chrono::milliseconds tSleep(1);
    std::this_thread::sleep_for(tSleep);
  }
}
// cv::CLAHE 是另外一种直方图均衡算法,CLAHE 和 AHE 的区别在于前者对区域对比度实行了限制,并且利用插值来加快计算。它能有效的增强或改善图像(局部)对比度,从而获取更多图像相关边缘信息有利于分割。还能够有效改善 AHE 中放大噪声的问题。另外,CLAHE 的有一个用途是被用来对图像去雾。

ros-image->Mat

cv::Mat ImageGrabber::GetImage(const sensor_msgs::ImageConstPtr &img_msg)
{
  // Copy the ros image message to cv::Mat.
  cv_bridge::CvImageConstPtr cv_ptr;
  try
  {
    cv_ptr = cv_bridge::toCvShare(img_msg, sensor_msgs::image_encodings::MONO8);
  }
  catch (cv_bridge::Exception& e)
  {
    ROS_ERROR("cv_bridge exception: %s", e.what());
  }
  
  if(cv_ptr->image.type()==0)
  {
    return cv_ptr->image.clone();
  }
  else
  {
    std::cout << "Error type" << std::endl;
    return cv_ptr->image.clone();
  }
}

ORB_SLAM3::System

Function

  • main主函数定义了System类, 他创建了跟踪、局部地图线程、闭环线程
    • 跟踪:image+imu同步线程中。
  • System.cc这个文件主要是SLAM系统的初始化、跟踪线程的入口以及轨迹的保存。

构造

  1. 构建 字典 并加载已有字典
  2. 建立 关键帧库地图集
  3. 创建 显示 Frame和地图的类,由viewer调用
  4. 创建跟踪线程
  5. 创建局部地图线程
  6. 创建闭环线程
System::System(
const string &strVocFile  //词典文件路径
, const string &strSettingsFile //配置文件路径 
, const eSensor sensor  //传感器类型 
,const bool bUseViewer // 是否使用可视化界面
, const int initFr // 初始化Frame
, const string &strSequence 
, const string &strLoadingFile):
    mSensor(sensor), mpViewer(static_cast<Viewer*>(NULL)), mbReset(false), mbResetActiveMap(false),
    mbActivateLocalizationMode(false), mbDeactivateLocalizationMode(false)
{
    // 打印 什么传感器
    if(mSensor==MONOCULAR) //单目
        cout << "Monocular" << endl;
    else if(mSensor==STEREO) //双目
        cout << "Stereo" << endl;
    else if(mSensor==RGBD)
        cout << "RGB-D" << endl;
    else if(mSensor==IMU_MONOCULAR) //IMU+单目
        cout << "Monocular-Inertial" << endl;
    else if(mSensor==IMU_STEREO) // IMU+双目
        cout << "Stereo-Inertial" << endl;

    //Check settings file //将配置文件名转换成为字符串 且只读
    cv::FileStorage fsSettings(strSettingsFile.c_str(), cv::FileStorage::READ);
    //如果打开失败,就输出调试信息,然后退出
    if(!fsSettings.isOpened())
    { 
       cerr << "Failed to open settings file at: " << strSettingsFile << endl;
       exit(-1);
    }

    // 加载Atlas的标志,目前没有用
    bool loadedAtlas = false;
    
    //Load ORB Vocabulary
    cout << endl << "Loading ORB Vocabulary. This could take a while..." << endl;
    // 建立一个新的ORB字典,并加载已有的orb字典词袋
    mpVocabulary = new ORBVocabulary();
    bool bVocLoad = mpVocabulary->loadFromTextFile(strVocFile);
    // 如果加载失败,就输出调试信息,并退出
    if(!bVocLoad)
    {
        cerr << "Wrong path to vocabulary. " << endl;
        cerr << "Falied to open at: " << strVocFile << endl;
        exit(-1);
    }
    cout << "Vocabulary loaded!" << endl << endl;

    //Create KeyFrame Database 创建关键帧数据库
    mpKeyFrameDatabase = new KeyFrameDatabase(*mpVocabulary);

    //Create the Atlas  创建地图集,参数0表示初始化关键帧id为0
    mpAtlas = new Atlas(0);

	// 若使用了imu,则mpAtlas设定imu传感器
    if (mSensor==IMU_STEREO || mSensor==IMU_MONOCULAR)
        mpAtlas->SetInertialSensor();

    // 这里的帧绘制器和地图绘制器将会被可视化的Viewer所使用
    mpFrameDrawer = new FrameDrawer(mpAtlas);
    mpMapDrawer = new MapDrawer(mpAtlas, strSettingsFile);

    //Initialize the Tracking thread  初始化跟踪线程
    //(它将存在于执行的主线程中,即调用此构造函数的线程中)
    cout << "Seq. Name: " << strSequence << endl;
    mpTracker = new Tracking(this, mpVocabulary, mpFrameDrawer, mpMapDrawer,
                             mpAtlas, mpKeyFrameDatabase, strSettingsFile, mSensor, strSequence);
    
    //创建并开启local mapping线程,回调函数为LocalMapping::Run()
    mpLocalMapper = new LocalMapping(this, mpAtlas, mSensor==MONOCULAR || mSensor==IMU_MONOCULAR, mSensor==IMU_MONOCULAR || mSensor==IMU_STEREO, strSequence);
    mptLocalMapping = new thread(&ORB_SLAM3::LocalMapping::Run,mpLocalMapper);
    //设置区分远点和近点的阈值
    mpLocalMapper->mThFarPoints = fsSettings["thFarPoints"];
    if(mpLocalMapper->mThFarPoints!=0)
    {
        cout << "Discard points further than " << mpLocalMapper->mThFarPoints << " m from current camera" << endl;
        mpLocalMapper->mbFarPoints = true;
    }
    else
        mpLocalMapper->mbFarPoints = false;

    // 创建并开启闭环线程,回调函数为LoopClosing::Run()
    mpLoopCloser = new LoopClosing(mpAtlas, mpKeyFrameDatabase, mpVocabulary, mSensor!=MONOCULAR); // mSensor!=MONOCULAR);
    mptLoopClosing = new thread(&ORB_SLAM3::LoopClosing::Run, mpLoopCloser);

    //Initialize the Viewer thread and launch
    if(bUseViewer) //如果指定了,程序的运行过程中需要运行可视化部分
    {
        mpViewer = new Viewer(this, mpFrameDrawer,mpMapDrawer,mpTracker,strSettingsFile);
        mptViewer = new thread(&Viewer::Run, mpViewer);
        mpTracker->SetViewer(mpViewer);
        mpLoopCloser->mpViewer = mpViewer;
        mpViewer->both = mpFrameDrawer->both;
    }

    //Set pointers between threads 设置进程间的指针
    mpTracker->SetLocalMapper(mpLocalMapper);
    mpTracker->SetLoopClosing(mpLoopCloser);

    mpLocalMapper->SetTracker(mpTracker);
    mpLocalMapper->SetLoopCloser(mpLoopCloser);

    mpLoopCloser->SetTracker(mpTracker);
    mpLoopCloser->SetLocalMapper(mpLocalMapper);

    // Fix verbosity 设置打印信息的等级,只有小于这个等级的信息才会被打印
    Verbose::SetTh(Verbose::VERBOSITY_QUIET);

}

TrackMonocular 单目跟踪

cv::Mat System::TrackMonocular(const cv::Mat &im, const double &timestamp, const vector<IMU::Point>& vImuMeas, string filename)
{   // 单目 和 IMU+单目
    if(mSensor!=MONOCULAR && mSensor!=IMU_MONOCULAR)
    {
        cerr << "ERROR: you called TrackMonocular but input sensor was not set to Monocular nor Monocular-Inertial." << endl;
        exit(-1);
    }

    // Check mode change 检查是否有运行模式的改变
    {
        unique_lock<mutex> lock(mMutexMode);
        if(mbActivateLocalizationMode) //激活定位模式?,局部建图线程会关闭
        {  
            mpLocalMapper->RequestStop();//请求停止局部建图线程

            // Wait until Local Mapping has effectively stopped
            while(!mpLocalMapper->isStopped())
            {
                usleep(1000);
            }
            // 运行到这里的时候,局部建图部分就真正地停止了
			// 通知Tracking线程,现在只进行跟踪,不建图
            mpTracker->InformOnlyTracking(true);
            // 设置为false,避免重复进行以上操作
            mbActivateLocalizationMode = false;
        }
        if(mbDeactivateLocalizationMode) //取消定位模式?,重新打开局部建图线程
        {   // 仅跟踪设置为false,localmap =Release,同时避免重复
            mpTracker->InformOnlyTracking(false);
            mpLocalMapper->Release(); //局部建图器要开始工作呢
            mbDeactivateLocalizationMode = false; //防止重复执行
        }
    }

    // Check reset  检查是否有复位的操作
    {
        unique_lock<mutex> lock(mMutexReset);
        if(mbReset) // 是否有复位请求?
        {	//有,追踪器复位+清除标志+不重置活跃子图
            mpTracker->Reset();
            mbReset = false;
            mbResetActiveMap = false;
        }
        else if(mbResetActiveMap) // 重置活跃地图?
        {
            cout << "SYSTEM-> Reseting active map in monocular case" << endl;
            mpTracker->ResetActiveMap();
            mbResetActiveMap = false;
        }
    }
	
	// 若imu+单目,则 tracking 中抓imu数据
    if (mSensor == System::IMU_MONOCULAR)
        for(size_t i_imu = 0; i_imu < vImuMeas.size(); i_imu++)
            mpTracker->GrabImuData(vImuMeas[i_imu]);
	
	// 开始跟踪,获取相机位姿的估计结果  ****运动估计的函数
    cv::Mat Tcw = mpTracker->GrabImageMonocular(im,timestamp,filename);

    unique_lock<mutex> lock2(mMutexState);
    // 记录track追踪的状态
    mTrackingState = mpTracker->mState; 
    ///获取当前帧追踪到的地图点向量指针
    mTrackedMapPoints = mpTracker->mCurrentFrame.mvpMapPoints;
    // 获取当前帧追踪到的关键帧特征点向量的指针
    mTrackedKeyPointsUn = mpTracker->mCurrentFrame.mvKeysUn;
	
	// //返回获得的相机运动估计
    return Tcw; 
}

TrackStereo

  • 双目 或 双目+IMU

TrackRGBD

  • RGBD

Shutdown 关闭

void System::Shutdown()
{
	// 对局部建图线程和回环检测线程发送终止请求
    mpLocalMapper->RequestFinish();
    mpLoopCloser->RequestFinish();
     //如果使用了可视化窗口查看器
    if(mpViewer)
    {	//向查看器发送终止请求
        mpViewer->RequestFinish();
        //等待,直到真正地停止
        while(!mpViewer->isFinished())
            usleep(5000);
    }

    // Wait until all thread have effectively stopped
    // 等所有线程工作完成后关闭
    while(!mpLocalMapper->isFinished() || !mpLoopCloser->isFinished() || mpLoopCloser->isRunningGBA())
    {
        if(!mpLocalMapper->isFinished())
            cout << "mpLocalMapper is not finished" << endl;
        if(!mpLoopCloser->isFinished())
            cout << "mpLoopCloser is not finished" << endl;
        if(mpLoopCloser->isRunningGBA()){
            cout << "mpLoopCloser is running GBA" << endl;
            cout << "break anyway..." << endl;
            break;
        }
        usleep(5000);
    }

    if(mpViewer) 
        pangolin::BindToContext("ORB-SLAM2: Map Viewer");

#ifdef REGISTER_TIMES
    mpTracker->PrintTimeStats();
#endif
}

SaveTrajectoryTUM 按TUM保存轨迹

  • 以TUM格式(tx,ty,tz,qx,qy,qz,qw)保存所有成功定位的帧的位姿
  • 关键帧是相对于其参考关键帧(由 BA 和姿势图优化)存储的。 我们需要首先获得关键帧姿势,然后连接相对变换。 未定位的帧(跟踪失败)不会保存。
void System::SaveTrajectoryTUM(const string &filename)
{
    cout << endl << "Saving camera trajectory to " << filename << " ..." << endl;
    // 只有在传感器为双目或者RGBD时才可以工作
    if(mSensor==MONOCULAR)
    {
        cerr << "ERROR: SaveTrajectoryTUM cannot be used for monocular." << endl;
        return;
    }
	// 获取地图集中所有的关键帧,并给予Id排序
    vector<KeyFrame*> vpKFs = mpAtlas->GetAllKeyFrames();
    sort(vpKFs.begin(),vpKFs.end(),KeyFrame::lId);

    // Transform all keyframes so that the first keyframe is at the origin.转换所有关键帧,使第一个关键帧位于原点.
    // After a loop closure the first keyframe might not be at the origin.闭环后,第一个关键帧可能不在原点。
    cv::Mat Two = vpKFs[0]->GetPoseInverse();

    ofstream f;
    f.open(filename.c_str());
    // 用一般的方式输出浮点型,例如C++程序在控制台显示大一点的数,显示的时候使用了科学计数法,使用该命令即可像一般的方式显示
    f << fixed;

    // Frame pose is stored relative to its reference keyframe (which is optimized by BA and pose graph).
    // We need to get first the keyframe pose and then concatenate the relative transformation.
    // Frames not localized (tracking failure) are not saved.

    // For each frame we have a reference keyframe (lRit), the timestamp (lT) and a flag
    // which is true when tracking failed (lbL).
    //  对于每一帧,我们都有一个参考关键帧 (lRit)、时间戳 (lT) 和跟踪失败时为真的标志 (lbL)。
    list<ORB_SLAM3::KeyFrame*>::iterator lRit = mpTracker->mlpReferences.begin();  // 关键帧list
    list<double>::iterator lT = mpTracker->mlFrameTimes.begin();//时间戳
    list<bool>::iterator lbL = mpTracker->mlbLost.begin();//是否跟踪失败
    for(list<cv::Mat>::iterator lit=mpTracker->mlRelativeFramePoses.begin(),
        lend=mpTracker->mlRelativeFramePoses.end();lit!=lend;lit++, lRit++, lT++, lbL++)
    {
        if(*lbL) //如果跟踪失败,则跳过,不保存这一帧
            continue;

        KeyFrame* pKF = *lRit;//取出参考关键帧
        cv::Mat Trw = cv::Mat::eye(4,4,CV_32F);

        // If the reference keyframe was culled, traverse the spanning tree to get a suitable keyframe.
        //如果参考关键帧是坏的(可能是检查冗余时被剔除了),则用其父帧,并记录父帧到原参考关键帧的位姿变换
        while(pKF->isBad()) 
        {	// 更新关键帧变换矩阵的初始值,
            Trw = Trw*pKF->mTcp;
            //并且更新到原关键帧的父关键帧
            pKF = pKF->GetParent();
        }
		// 最终得到的是参考关键帧相对于世界坐标系的变换
        Trw = Trw*pKF->GetPose()*Two;
		// 在此基础上得到相机当前帧相对于世界坐标系的变换
        cv::Mat Tcw = (*lit)*Trw;
        //然后分解出旋转矩阵
        cv::Mat Rwc = Tcw.rowRange(0,3).colRange(0,3).t();
        //以及平移向量
        cv::Mat twc = -Rwc*Tcw.rowRange(0,3).col(3);
		//用四元数表示旋转
        vector<float> q = Converter::toQuaternion(Rwc);
		//然后按照给定的格式输出到文件中
        f << setprecision(6) << *lT << " " <<  setprecision(9) << twc.at<float>(0) << " " << twc.at<float>(1) << " " << twc.at<float>(2) << " " << q[0] << " " << q[1] << " " << q[2] << " " << q[3] << endl;
    }
    f.close();
    // cout << endl << "trajectory saved!" << endl;
}

其他数据的保存

  • 以TUM格式保存所有关键帧的位姿
    void SaveKeyFrameTrajectoryTUM(const string &filename);
  • 以EuRoC格式保存所有帧的位姿
    void SaveTrajectoryEuRoC(const string &filename);
  • 以EuRoC格式保存所有关键帧的位姿
    void SaveKeyFrameTrajectoryEuRoC(const string &filename);
  • 以KITTI格式保存所有帧的位姿
    void SaveTrajectoryKITTI(const string &filename);

Tracking

在这里插入图片描述

核心函数

构造

namespace ORB_SLAM3
{

Tracking::Tracking(
System *pSys   // systerm 中 this指针
, ORBVocabulary* pVoc  // BOW词袋,包括了已加载的词袋
, FrameDrawer *pFrameDrawer //帧绘制器
, MapDrawer *pMapDrawer // 地图点绘制器
, Atlas *pAtlas // 地图集句柄
, KeyFrameDatabase* pKFDB  // 关键帧产生的词袋数据库
, const string &strSettingPath // 配置文件路径
, const int sensor // 传感器类型
, const string &_nameSeq):
    mState(NO_IMAGES_YET) // 当前状态还没准备好
    , mSensor(sensor), mTrackedFr(0), mbStep(false),
    mbOnlyTracking(false) // 处于slam模式
    , mbMapUpdated(false), mbVO(false), //当处于纯跟踪模式的时候,这个变量表示了当前跟踪状态的好坏
    mpORBVocabulary(pVoc), mpKeyFrameDB(pKFDB),
    mpInitializer(static_cast<Initializer*>(NULL)), //暂时给地图初始化器设置为空指针
    mpSystem(pSys), mpViewer(NULL),
    mpFrameDrawer(pFrameDrawer), mpMapDrawer(pMapDrawer), mpAtlas(pAtlas), mnLastRelocFrameId(0), time_recently_lost(5.0), time_recently_lost_visual(2.0),
    mnInitialFrameId(0), mbCreatedMap(false), mnFirstFrameId(0), mpCamera2(nullptr)
{
    // Load camera parameters from settings file
    cv::FileStorage fSettings(strSettingPath, cv::FileStorage::READ);
	
	///< step 1: 从配置文件中加载相机参数
	    // Load ORB parameters 加载相机参数文件
    // 相机参数,两类:
    // 		1. PinHole: fx,fy,cx,cy,k1,k2,p1,p2  --k3
    //  	2. KannalaBrandt8:fx,fy,cx,cy,k1,k2,k3,k4 其他就不支持了
    // STEREO or IMU_STEREO: br
    // fps , fps==0 ? 30 : fps
    // RGB ? RGB : BGR
    // STEREO || RGBD || IMU_STEREO ? ThDepth (深度阈值(近/远点))
    // RGBD ? DepthMapFactor
    // 若上述都存在且真实,则 return true 否则 false
    bool b_parse_cam = ParseCamParamFile(fSettings);
    if(!b_parse_cam) // 读取失败则打印error
    {
        std::cout << "*Error with the camera parameters in the config file*" << std::endl;
    }

	///< step 2: 加载ORB特征点有关的参数,并构造ORB提取对象
	// nFeatures: 1000 每一帧提取的特征点数 1000
	// scaleFactor: 1.2 图像建立金字塔时的变化尺度 1.2
	// nLevels: 8 尺度金字塔的层数 8
	// iniThFAST: 20 提取fast特征点的默认阈值 20
	// minThFAST: 7 如果默认阈值提取不出足够fast特征点,则使用最小阈值 7
	/// 构造orb提取对象:构造左目提取器,若双目或imu_双目时?再构造右目提取器,若单目或imu_单目时?构造mpIniORBextractor
	/// 特征数:mpIni=5*(mpLeft=mpRight)
    bool b_parse_orb = ParseORBParamFile(fSettings);
    if(!b_parse_orb)
    {
        std::cout << "*Error with the ORB parameters in the config file*" << std::endl;
    }

    initID = 0; lastID = 0;

    // Load IMU parameters 
    bool b_parse_imu = true; // 加载 imu 参数
    if(sensor==System::IMU_MONOCULAR || sensor==System::IMU_STEREO) // 只使用imu时加载
    {   // 外参 Tcw 4*4 
    	///< **IMU noise (Use those from VINS-mono)**
		// IMU.NoiseGyro: 0.00016 # rad/s^0.5 
		// IMU.NoiseAcc: 0.0028 # m/s^1.5
		// IMU.GyroWalk: 0.000022 # rad/s^1.5
		// IMU.AccWalk: 0.00086 # m/s^2.5
		// IMU.Frequency: 200
        b_parse_imu = ParseIMUParamFile(fSettings);
        if(!b_parse_imu)
        {
            std::cout << "*Error with the IMU parameters in the config file*" << std::endl;
        }

        mnFramesToResetIMU = mMaxFrames;
    }

    mbInitWith3KFs = false;

    mnNumDataset = 0;

    if(!b_parse_cam || !b_parse_orb || !b_parse_imu)
    {
        std::cerr << "**ERROR in the config file, the format is not correct**" << std::endl;
        try
        {
            throw -1;
        }
        catch(exception &e)
        {

        }
    }

#ifdef REGISTER_TIMES
    vdRectStereo_ms.clear();
    vdORBExtract_ms.clear();
    vdStereoMatch_ms.clear();
    vdIMUInteg_ms.clear();
    vdPosePred_ms.clear();
    vdLMTrack_ms.clear();
    vdNewKF_ms.clear();
    vdTrackTotal_ms.clear();

    vdUpdatedLM_ms.clear();
    vdSearchLP_ms.clear();
    vdPoseOpt_ms.clear();
#endif

    vnKeyFramesLM.clear();
    vnMapPointsLM.clear();
}
  • 读取参数文件时,是否存在以及是否real案例:

     // Camera calibration parameters
     cv::FileNode node = fSettings["Camera.fx"];
     if(!node.empty() && node.isReal())
     {
         fx = node.real();
     }
     else
     {
         std::cerr << "*Camera.fx parameter doesn't exist or is not a real number*" << std::endl;
         b_miss_params = true;
     }
    

GrabImageMonocular

  • 输入:左目RGB或RGBA图像
    1、 将图像转为mImGray并初始化mCurrentFrame
    2、进行tracking过程
  • 输出:世界坐标系到该帧相机坐标系的变换矩阵
cv::Mat Tracking::GrabImageMonocular(const cv::Mat &im, const double &timestamp, string filename)
{
    mImGray = im;

	// step 1 :将RGB或RGBA图像转为灰度图像
    if(mImGray.channels()==3)
    {
        if(mbRGB)
            cvtColor(mImGray,mImGray,cv::COLOR_RGB2GRAY);
        else
            cvtColor(mImGray,mImGray,cv::COLOR_BGR2GRAY);
    }
    else if(mImGray.channels()==4)
    {
        if(mbRGB)
            cvtColor(mImGray,mImGray,cv::COLOR_RGBA2GRAY);
        else
            cvtColor(mImGray,mImGray,cv::COLOR_BGRA2GRAY);
    }

	// step 2 :构造Frame  
    if (mSensor == System::MONOCULAR)   // 若是单目
    {	// 《状态没有初始化or没有图片or lastid与初始id间隔太小》时?
    	//  初始特征提取;否则orb特征提取,提取特征值的数量不一样
        if(mState==NOT_INITIALIZED || mState==NO_IMAGES_YET ||(lastID - initID) < mMaxFrames)
            mCurrentFrame = Frame(mImGray,timestamp,mpIniORBextractor,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth);
        else  
            mCurrentFrame = Frame(mImGray,timestamp,mpORBextractorLeft,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth);
    }
    else if(mSensor == System::IMU_MONOCULAR) // 单目Imu
    {
    	// 《状态没有初始化or没有图片》时,初始特征提取;否则orb特征提取
        if(mState==NOT_INITIALIZED || mState==NO_IMAGES_YET)
        {
            mCurrentFrame = Frame(mImGray,timestamp,mpIniORBextractor,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth,&mLastFrame,*mpImuCalib);
        }
        else
            mCurrentFrame = Frame(mImGray,timestamp,mpORBextractorLeft,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth,&mLastFrame,*mpImuCalib);
    }

    if (mState==NO_IMAGES_YET)
        t0=timestamp;

    mCurrentFrame.mNameFile = filename;
    mCurrentFrame.mnDataset = mnNumDataset;

#ifdef REGISTER_TIMES
    vdORBExtract_ms.push_back(mCurrentFrame.mTimeORB_Ext);
#endif

    lastID = mCurrentFrame.mnId;
	// step 3 :跟踪
    Track();
	// 返回当前帧的位姿
    return mCurrentFrame.mTcw.clone();
}

Track 跟踪

  1. 若 localmap中imu数据有问题,则系统:重置活跃地图ResetActiveMap
  2. 地图集中 取出 当前地图
  3. mState!=NO_IMAGES_YET:
    1. 当前帧时间回跳时,清空imu数据,并创建地图CreateMapInAtlas,return
    2. 当前帧时间与上一帧相差了1s以上时 ,若地图集未使用imu则return,否则:
      1. imu未初始化成功,则 重置活跃子图ResetActiveMap
      2. 若imu初始化成功时:且GetIniertialBA2成功时在地图集中创建地图CreateMapInAtlas,否则重置活跃子图ResetActiveMap
  4. 若使用imu且上一帧是关键帧时,当前帧设置新的bias
  5. 若 mState == NO_IMAGES_YET,则:mState = NOT_INITIALIZED
  6. 若使用imu且非创建地图时,imu进行预积分 (两套)
  7. 上一状态和创建地图 标记赋值
    1. mLastProcessedState=mState;
    2. mbCreatedMap = false;
  8. 若 mState == NOT_INITIALIZED
    1. 基于传感器参数进行相应初始化,如MonocularInitialization
    2. 绘制地图更新 mpFrameDrawer->Update(this)
    3. 若未正常初始化时, mLastFrame = currentFrame ,return
    4. 若地图集 == 1时,mnFirstFrameId = mCurrentFrame.mnId;
  9. 否则 mState 已经初始化:
    1. 若不是仅跟踪模式:
      1. 若 mState == ok时:
        1. 检查上一帧中被替换的地图点,将地图点替换成新的地图点CheckReplacedInLastFrame
        2. 状态跟踪
          1. 若运动模式是空imu未初始化 刚刚完成初始化时,跟踪参考关键帧TrackReferenceKeyFrame
          2. 否则,跟踪运动模式TrackWithMotionModel,若运动模式跟踪失败,则跟踪 参考帧
          3. 若跟踪失败时,
          4. 刚刚进行了重定位imu模式,则直接将mState置为lost
          5. 否则,如果地图中关键帧数量大于10,则 RECENTLY_LOST
          6. 否则 mState = LOST;
      2. 若状态不是ok时:
        1. 如果是最近丢失时 :mState == RECENTLY_LOST
          1. bOK = true;
          2. 如果使用imu时,
            1. 若:imu初始化成功,则用imu跟踪 PredictStateIMU
            2. 否则初始化失败,bOK = false;
              3. 若与前一次跟踪相隔5s,记为lost
              1. mState = LOST;,OK=false;
          3. 纯视觉模式下进行重定位,重定位失败记为lost
            1. mState = LOST;,OK=false;
              2. 若状态为 lost: mState == LOST
            2. 若地图关键帧数量小于10个时,则重置活跃地图
            3. 否则新建地图,删除mLastKeyFrame,从头开始
    2. 若是仅跟踪模式:
      1. 若定位状态为lost,定位丢失则重定位 Relocalization
      2. 否则 若跟踪正常时:
        1. 恒速模式时使用速度,bOK = TrackWithMotionModel
        2. 否则使用参考帧, bOK = TrackReferenceKeyFrame
      3. 状态不正常时
        1. 恒速模式非空时用恒速模型,bOKMM = TrackWithMotionModel
        2. 重定位 Relocalization
        3. 恒速模型 且 重定位失败时,使用了恒速预测的数据
          1. mCurrentFrame 设置为 恒速模型的数据
          2. 若仍失败,则当前帧地图点 IncreaseFound
        4. 否则 若重定位成功时:设置状态正常 mbVO = false;
        5. 跟踪成功:bOKMM || 重定位成功
          3. 若当前帧是关键帧,则更新关键帧
          4. 若不是仅跟踪模式: // slam模式
          1. 若跟踪成功时,则跟综局部地图(寻找更多的匹配,优化当前帧的pose) TrackLocalMap
          2. 若跟踪失败,则打印
          5. 若定位模式时:
          1. 跟踪成功 且 状态ok(!mbVO):跟踪localmap(寻找更多的匹配,优化当前帧的pose)TrackLocalMap
          2. mbVO true 表示地图中与 MapPoints 的匹配项很少。 我们无法检索本地地图,因此我们不执行 TrackLocalMap()。 一旦系统重新定位相机,我们将再次使用本地地图。
          6. 跟踪成功时:bOK //包括了跟踪局部地图的结果
          1. mstate =ok。
          7. 跟踪失败时:bOKfalse
          1. 若前面位姿跟踪成功时:mState == OK
          1. 使用imu,且 当前地图未初始化完imu 或 imu做ba2失败时,系统重置活跃子图ResetActiveMap, mState = RECENTLY_LOST;
          2. 不使用imu时: mState = RECENTLY_LOST;
          3. 更新丢失时间 mTimeStampLost
          8. 如果最近重新定位,当前帧id>上次重置imu,使用imu,地图已经初始化imu
          1. 当前帧构建frame,并进行预积分 Preintegrated
          9. 若pCurrentMap已初始化imu成功时:
          1. 若跟踪成功,且未重置imu,则 更新mLastBias
          显示设置当前帧位姿
          10. 如果跟踪成功bOK或者 mState
          RECENTLY_LOST,
        6. 更新恒速模型,计算上一帧到当前帧的位姿变换
        7. 若使用imu,则更新当前相机pose SetCurrentCameraPose
        8. 清除恒速跟踪中的临时地图点
        9. 清除观测不到的地图点
        10. 判断是否需要插入关键帧,插入关键帧(对双目或rgb-d会产生新的地图点)
          11. 若 状态失败 mState==LOST)时:
        11. 若:关键帧小于5个,或使用imu且imu未初始化时:系统重置活跃子图
        12. 否则 创建新地图
          12. 若当前帧不是关键帧,则赋值临时帧。赋值mLastFrame
  10. 若状态ok 或 状态是 最近丢失时:
  11. 存储帧姿态信息以在之后检索完整的相机轨迹。
void Tracking::Track()
{
    if (bStepByStep)
    {
        while(!mbStep)
            usleep(500);
        mbStep = false;
    }
	
	// 若 localmap中imu数据有问题,则系统重置活跃地图
    if(mpLocalMapper->mbBadImu)
    {
        cout << "TRACK: Reset map because local mapper set the bad imu flag " << endl;
        mpSystem->ResetActiveMap();
        return;
    }
	
	// 地图集中 取出 当前地图
    Map* pCurrentMap = mpAtlas->GetCurrentMap();
    
    // 如果图像复位过、或者第一次运行,则为NO_IMAGE_YET状态
    if(mState!=NO_IMAGES_YET)  // 有图片时
    {
    	// 当前帧时间回跳时,清空imu数据,并创建地图
        if(mLastFrame.mTimeStamp>mCurrentFrame.mTimeStamp)
        {
            cerr << "ERROR: Frame with a timestamp older than previous frame detected!" << endl;
            unique_lock<mutex> lock(mMutexImuQueue);
            mlQueueImuData.clear();
            CreateMapInAtlas();
            return;
        }
        else  // 当前帧与上一帧相差了1s以上时,若未使用imu,直接return,若使用imu,则创建地图 or 重置地图
        if(mCurrentFrame.mTimeStamp>mLastFrame.mTimeStamp+1.0)
        {
            cout << "id last: " << mLastFrame.mnId << "    id curr: " << mCurrentFrame.mnId << endl;
            if(mpAtlas->isInertial()) // 使用了IMU
            {

                if(mpAtlas->isImuInitialized()) // imu已经初始化
                {
                    cout << "Timestamp jump detected. State set to LOST. Reseting IMU integration..." << endl;
                    // 1. 若imu初始化成功时:若得到imu ba 错误时重置活跃子图,否则创建地图在地图集中
                    if(!pCurrentMap->GetIniertialBA2())
                    {
                        mpSystem->ResetActiveMap();
                    }
                    else
                    {
                        CreateMapInAtlas();
                    }
                }
                else
                {
                    cout << "Timestamp jump detected, before IMU initialization. Reseting..." << endl;
                    // 重置活跃子图
                    mpSystem->ResetActiveMap();
                }
            }

            return;
        }
    }

	// 若使用imu且上一帧是关键帧时,当前帧设置新的bias
    if ((mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO) && mpLastKeyFrame)
        mCurrentFrame.SetNewBias(mpLastKeyFrame->GetImuBias());

    if(mState==NO_IMAGES_YET)
    {
        mState = NOT_INITIALIZED;
    }

    mLastProcessedState=mState;

    if ((mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO) && !mbCreatedMap)
    {
#ifdef REGISTER_TIMES
        std::chrono::steady_clock::time_point time_StartPreIMU = std::chrono::steady_clock::now();
#endif
        PreintegrateIMU();
#ifdef REGISTER_TIMES
        std::chrono::steady_clock::time_point time_EndPreIMU = std::chrono::steady_clock::now();

        double timePreImu = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(time_EndPreIMU - time_StartPreIMU).count();
        vdIMUInteg_ms.push_back(timePreImu);
#endif

    }
    mbCreatedMap = false;

    // Get Map Mutex -> Map cannot be changed
    unique_lock<mutex> lock(pCurrentMap->mMutexMapUpdate);

    mbMapUpdated = false;

    int nCurMapChangeIndex = pCurrentMap->GetMapChangeIndex();
    int nMapChangeIndex = pCurrentMap->GetLastMapChange();
    if(nCurMapChangeIndex>nMapChangeIndex)
    {
        // cout << "Map update detected" << endl;
        pCurrentMap->SetLastMapChange(nCurMapChangeIndex);
        mbMapUpdated = true;
    }


    if(mState==NOT_INITIALIZED)
    {
    	// 如果没有初始化,则先进行初始化
        if(mSensor==System::STEREO || mSensor==System::RGBD || mSensor==System::IMU_STEREO)
            StereoInitialization();
        else
        {
            MonocularInitialization();
        }
		// 地图更新此
        mpFrameDrawer->Update(this);
		// 如果正确初始化,mState=OK
        if(mState!=OK) // If rightly initialized, mState=OK
        {	// 若状态不是ok,则当前帧赋值last,return
            mLastFrame = Frame(mCurrentFrame);
            return;
        }

        if(mpAtlas->GetAllMaps().size() == 1)
        {
            mnFirstFrameId = mCurrentFrame.mnId;
        }
    }
    else
    {
        // System is initialized. Track Frame.
        bool bOK;

#ifdef REGISTER_TIMES
        std::chrono::steady_clock::time_point time_StartPosePred = std::chrono::steady_clock::now();
#endif

        // Initial camera pose estimation using motion model or relocalization (if tracking is lost)
        // 使用运动模型或重新定位的初始相机姿态估计(如果跟踪丢失)
        if(!mbOnlyTracking)  // 不仅仅是跟踪模式
        {  

            // State OK
            // Local Mapping is activated. This is the normal behaviour, unless
            // you explicitly activate the "only tracking" mode.
            // 局部地图工作,这是正常行为,除非您明确激活“仅跟踪”模式。
            if(mState==OK)
            {

                // Local Mapping might have changed some MapPoints tracked in last frame
                // 检查上一帧中被替换的地图点,将地图点替换成新的地图点
                CheckReplacedInLastFrame();
				
				// 若运动模式是空且imu未初始化 或刚刚完成初始化时 跟踪参考关键帧
                if((mVelocity.empty() && !pCurrentMap->isImuInitialized()) || mCurrentFrame.mnId<mnLastRelocFrameId+2)
                {
                    //Verbose::PrintMess("TRACK: Track with respect to the reference KF ", Verbose::VERBOSITY_DEBUG);
                    // 跟踪参考关键帧
                    bOK = TrackReferenceKeyFrame();
                }
                else
                {
                    //Verbose::PrintMess("TRACK: Track with motion model", Verbose::VERBOSITY_DEBUG);
                    // 跟踪运动模式
                    bOK = TrackWithMotionModel();
                    if(!bOK)
                        bOK = TrackReferenceKeyFrame();
                }


                if (!bOK) //如果跟踪失败了
                {
                	// 如果刚刚进行了重定位且imu模式,则直接将mState置为lost
                    if ( mCurrentFrame.mnId<=(mnLastRelocFrameId+mnFramesToResetIMU) &&
                         (mSensor==System::IMU_MONOCULAR || mSensor==System::IMU_STEREO))
                    {
                        mState = LOST;
                    } // 否则,如果地图中关键帧数量大于10,则 RECENTLY_LOST
                    else if(pCurrentMap->KeyFramesInMap()>10)
                    {
                        cout << "KF in map: " << pCurrentMap->KeyFramesInMap() << endl;
                        mState = RECENTLY_LOST;
                        mTimeStampLost = mCurrentFrame.mTimeStamp;
                        //mCurrentFrame.SetPose(mLastFrame.mTcw);
                    }
                    else
                    {
                        mState = LOST;
                    }
                }
            }
            else // 状态不ok时
            {
				// 如果是最近丢失时
                if (mState == RECENTLY_LOST)
                {
                    Verbose::PrintMess("Lost for a short time", Verbose::VERBOSITY_NORMAL);

                    bOK = true;
                    // 如果是imu模式,则用imu跟踪,最多跟踪5s,否则记为lost
                    if((mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO))
                    {
                        if(pCurrentMap->isImuInitialized())
                            PredictStateIMU();
                        else
                            bOK = false;

                        if (mCurrentFrame.mTimeStamp-mTimeStampLost>time_recently_lost)
                        {
                            mState = LOST;
                            Verbose::PrintMess("Track Lost...", Verbose::VERBOSITY_NORMAL);
                            bOK=false;
                        }
                    }
                    else 
                    {	// 纯视觉模式下进行重定位,重定位失败记为lost
                        // TODO fix relocalization
                        bOK = Relocalization();
                        if(!bOK && mCurrentFrame.mTimeStamp-mTimeStampLost>time_recently_lost_visual)
                        {
                            mState = LOST;
                            Verbose::PrintMess("Track Lost...", Verbose::VERBOSITY_NORMAL);
                            bOK=false;
                        }
                    }
                }
                else if (mState == LOST) // 状态处于lost时
                {

                    Verbose::PrintMess("A new map is started...", Verbose::VERBOSITY_NORMAL);
					// 若地图关键帧数量小于10个时,则重置活跃地图
                    if (pCurrentMap->KeyFramesInMap()<10)
                    {
                        mpSystem->ResetActiveMap();
                        cout << "Reseting current map..." << endl;
                    }else // 否则新建地图,删除mLastKeyFrame,从头开始
                        CreateMapInAtlas();

                    if(mpLastKeyFrame)
                        mpLastKeyFrame = static_cast<KeyFrame*>(NULL);

                    Verbose::PrintMess("done", Verbose::VERBOSITY_NORMAL);

                    return;
                }
            }

        }
        else // 若处于定位模式时(局部地图不工作)
        {
            // Localization Mode: Local Mapping is deactivated (TODO Not available in inertial mode)
            if(mState==LOST) //若定位状态为lost,定位丢失则重定位
            {
                if(mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO)
                    Verbose::PrintMess("IMU. State LOST", Verbose::VERBOSITY_NORMAL);
                bOK = Relocalization();
            }
            else
            {
                if(!mbVO) // 跟踪正常时
                {
                    // In last frame we tracked enough MapPoints in the map
                    // 恒速模式时使用速度,否则使用参考帧
                    if(!mVelocity.empty())
                    {	
                        bOK = TrackWithMotionModel();
                    }
                    else
                    {
                        bOK = TrackReferenceKeyFrame();
                    }
                }
                else // 状态不正常时
                {	
                    // In last frame we tracked mainly "visual odometry" points.

                    // We compute two camera poses, one from motion model and one doing relocalization.
                    // If relocalization is sucessfull we choose that solution, otherwise we retain
                    // the "visual odometry" solution.
					//在最后一帧中,我们主要跟踪“视觉里程计”点。 我们计算两个相机姿势,一个来自运动模型,另一个进行重新定位。 如果重新定位成功,我们选择该解决方案,否则我们保留“视觉里程计”解决方案。
                    bool bOKMM = false;
                    bool bOKReloc = false;
                    vector<MapPoint*> vpMPsMM;
                    vector<bool> vbOutMM;
                    cv::Mat TcwMM;
                    // 恒速模式非空用恒速模型
                    if(!mVelocity.empty())
                    {
                        bOKMM = TrackWithMotionModel();
                        vpMPsMM = mCurrentFrame.mvpMapPoints;
                        vbOutMM = mCurrentFrame.mvbOutlier;
                        TcwMM = mCurrentFrame.mTcw.clone();
                    }
                    // 重定位
                    bOKReloc = Relocalization();

                    if(bOKMM && !bOKReloc)
                    {
                        mCurrentFrame.SetPose(TcwMM);
                        mCurrentFrame.mvpMapPoints = vpMPsMM;
                        mCurrentFrame.mvbOutlier = vbOutMM;

                        if(mbVO)
                        {
                            for(int i =0; i<mCurrentFrame.N; i++)
                            {
                                if(mCurrentFrame.mvpMapPoints[i] && !mCurrentFrame.mvbOutlier[i])
                                {
                                    mCurrentFrame.mvpMapPoints[i]->IncreaseFound();
                                }
                            }
                        }
                    }
                    else if(bOKReloc)
                    {
                        mbVO = false;
                    }

                    bOK = bOKReloc || bOKMM;
                }
            }
        }
		// 若当前帧是关键帧,则更新关键帧
        if(!mCurrentFrame.mpReferenceKF)
            mCurrentFrame.mpReferenceKF = mpReferenceKF;

#ifdef REGISTER_TIMES
        std::chrono::steady_clock::time_point time_EndPosePred = std::chrono::steady_clock::now();

        double timePosePred = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(time_EndPosePred - time_StartPosePred).count();
        vdPosePred_ms.push_back(timePosePred);
#endif


#ifdef REGISTER_TIMES
        std::chrono::steady_clock::time_point time_StartLMTrack = std::chrono::steady_clock::now();
#endif
        // If we have an initial estimation of the camera pose and matching. Track the local map.
        if(!mbOnlyTracking) // 若不是仅跟踪模式:
        {	// 若跟踪成功时,则跟综局部地图,寻找更多的匹配,优化当前帧的pose
            if(bOK) 
            {
                bOK = TrackLocalMap();

            }
            if(!bOK)
                cout << "Fail to track local map!" << endl;
        }
        else
        {
            // mbVO true means that there are few matches to MapPoints in the map. We cannot retrieve
            // a local map and therefore we do not perform TrackLocalMap(). Once the system relocalizes
            // the camera we will use the local map again.
            // mbVO true 表示地图中与 MapPoints 的匹配项很少。 我们无法检索本地地图,因此我们不执行 TrackLocalMap()。 一旦系统重新定位相机,我们将再次使用本地地图。
            if(bOK && !mbVO) //跟踪成功 且 状态ok(!mbVO):跟踪localmap(寻找更多的匹配,优化当前帧的pose)
                bOK = TrackLocalMap();
        }
		
		// 局部地图跟踪成功,mstate =ok。局部地图失败但前面位姿跟踪成功,则imu模式置为recent_lost,纯视觉置为lost
        if(bOK)
            mState = OK;
        else if (mState == OK)
        {
            if (mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO)
            {
                Verbose::PrintMess("Track lost for less than one second...", Verbose::VERBOSITY_NORMAL);
                if(!pCurrentMap->isImuInitialized() || !pCurrentMap->GetIniertialBA2())
                {
                    cout << "IMU is not or recently initialized. Reseting active map..." << endl;
                    mpSystem->ResetActiveMap();
                }

                mState = RECENTLY_LOST;
            }
            else
                mState = RECENTLY_LOST; // visual to lost

            if(mCurrentFrame.mnId>mnLastRelocFrameId+mMaxFrames)
            {
                mTimeStampLost = mCurrentFrame.mTimeStamp;
            }
        }

        // Save frame if recent relocalization, since they are used for IMU reset (as we are making copy, it shluld be once mCurrFrame is completely modified)
// 如果最近重新定位,且帧id>上次重置imu,且使用imu,且地图已经初始化imu
if((mCurrentFrame.mnId<(mnLastRelocFrameId+mnFramesToResetIMU)) && (mCurrentFrame.mnId > mnFramesToResetIMU) && ((mSensor == System::IMU_MONOCULAR) || (mSensor == System::IMU_STEREO)) && pCurrentMap->isImuInitialized())
        {
            // TODO check this situation
            Verbose::PrintMess("Saving pointer to frame. imu needs reset...", Verbose::VERBOSITY_NORMAL);
            Frame* pF = new Frame(mCurrentFrame);
            pF->mpPrevFrame = new Frame(mLastFrame);

            // Load preintegration
            pF->mpImuPreintegratedFrame = new IMU::Preintegrated(mCurrentFrame.mpImuPreintegratedFrame);
        }

        if(pCurrentMap->isImuInitialized())
        {
            if(bOK)
            {
                if(mCurrentFrame.mnId==(mnLastRelocFrameId+mnFramesToResetIMU))
                {
                    cout << "RESETING FRAME!!!" << endl;
                }
                else if(mCurrentFrame.mnId>(mnLastRelocFrameId+30))
                    mLastBias = mCurrentFrame.mImuBias;
            }
        }

#ifdef REGISTER_TIMES
        std::chrono::steady_clock::time_point time_EndLMTrack = std::chrono::steady_clock::now();

        double timeLMTrack = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(time_EndLMTrack - time_StartLMTrack).count();
        vdLMTrack_ms.push_back(timeLMTrack);
#endif

        // Update drawer
        mpFrameDrawer->Update(this);
        if(!mCurrentFrame.mTcw.empty())
            mpMapDrawer->SetCurrentCameraPose(mCurrentFrame.mTcw);
		
		// 如果跟踪成功或者recent_lost,更新恒速模型,计算上一帧到当前帧的位姿变换
        if(bOK || mState==RECENTLY_LOST)
        {
            // Update motion model
            if(!mLastFrame.mTcw.empty() && !mCurrentFrame.mTcw.empty())
            {
                cv::Mat LastTwc = cv::Mat::eye(4,4,CV_32F);
                mLastFrame.GetRotationInverse().copyTo(LastTwc.rowRange(0,3).colRange(0,3));
                mLastFrame.GetCameraCenter().copyTo(LastTwc.rowRange(0,3).col(3));
                mVelocity = mCurrentFrame.mTcw*LastTwc;
            }
            else
                mVelocity = cv::Mat();

            if(mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO)
                mpMapDrawer->SetCurrentCameraPose(mCurrentFrame.mTcw);

            // Clean VO matches 清除恒速跟踪中的临时地图点
            for(int i=0; i<mCurrentFrame.N; i++)
            {
                MapPoint* pMP = mCurrentFrame.mvpMapPoints[i];
                if(pMP)
                    if(pMP->Observations()<1)
                    {
                        mCurrentFrame.mvbOutlier[i] = false;
                        mCurrentFrame.mvpMapPoints[i]=static_cast<MapPoint*>(NULL);
                    }
            }

            // Delete temporal MapPoints 清除观测不到的地图点
            for(list<MapPoint*>::iterator lit = mlpTemporalPoints.begin(), lend =  mlpTemporalPoints.end(); lit!=lend; lit++)
            {
                MapPoint* pMP = *lit;
                delete pMP;
            }
            mlpTemporalPoints.clear();

#ifdef REGISTER_TIMES
            std::chrono::steady_clock::time_point time_StartNewKF = std::chrono::steady_clock::now();
#endif
			//判断是否需要插入关键帧
            bool bNeedKF = NeedNewKeyFrame();




            // Check if we need to insert a new keyframe 插入关键帧(对双目或rgb-d会产生新的地图点)
            if(bNeedKF && (bOK|| (mState==RECENTLY_LOST && (mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO))))
                CreateNewKeyFrame();

#ifdef REGISTER_TIMES
            std::chrono::steady_clock::time_point time_EndNewKF = std::chrono::steady_clock::now();

            double timeNewKF = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(time_EndNewKF - time_StartNewKF).count();
            vdNewKF_ms.push_back(timeNewKF);
#endif

            // We allow points with high innovation (considererd outliers by the Huber Function)
            // pass to the new keyframe, so that bundle adjustment will finally decide
            // if they are outliers or not. We don't want next frame to estimate its position
            // with those points so we discard them in the frame. Only has effect if lastframe is tracked
            for(int i=0; i<mCurrentFrame.N;i++)
            {
                if(mCurrentFrame.mvpMapPoints[i] && mCurrentFrame.mvbOutlier[i])
                    mCurrentFrame.mvpMapPoints[i]=static_cast<MapPoint*>(NULL);
            }
        }

        // Reset if the camera get lost soon after initialization
        //若:关键帧小于5个,或使用imu且imu未初始化时:系统重置活跃子图
        //否则 创建新地图
        if(mState==LOST) 
        {
            if(pCurrentMap->KeyFramesInMap()<=5)
            {
                mpSystem->ResetActiveMap();
                return;
            }
            if ((mSensor == System::IMU_MONOCULAR) || (mSensor == System::IMU_STEREO))
                if (!pCurrentMap->isImuInitialized())
                {
                    Verbose::PrintMess("Track lost before IMU initialisation, reseting...", Verbose::VERBOSITY_QUIET);
                    mpSystem->ResetActiveMap();
                    return;
                }

            CreateMapInAtlas();
        }

        if(!mCurrentFrame.mpReferenceKF)
            mCurrentFrame.mpReferenceKF = mpReferenceKF;

        mLastFrame = Frame(mCurrentFrame);
    }



	// 保存当前关键帧相对参考帧的位姿,参考kf、当前帧时间戳,当前帧状态
    if(mState==OK || mState==RECENTLY_LOST)
    {
        // Store frame pose information to retrieve the complete camera trajectory afterwards.
        if(!mCurrentFrame.mTcw.empty())
        {
            cv::Mat Tcr = mCurrentFrame.mTcw*mCurrentFrame.mpReferenceKF->GetPoseInverse();
            mlRelativeFramePoses.push_back(Tcr);
            mlpReferences.push_back(mCurrentFrame.mpReferenceKF);
            mlFrameTimes.push_back(mCurrentFrame.mTimeStamp);
            mlbLost.push_back(mState==LOST);
        }
        else
        {
            // This can happen if tracking is lost
            mlRelativeFramePoses.push_back(mlRelativeFramePoses.back());
            mlpReferences.push_back(mlpReferences.back());
            mlFrameTimes.push_back(mlFrameTimes.back());
            mlbLost.push_back(mState==LOST);
        }

    }
}

相关函数-地图集

CreateMapInAtlas

  • 在地图集中新建地图,并更新所有状态。
void Tracking::CreateMapInAtlas()
{
	// 当前帧id赋值给 lastInitFrameId
    mnLastInitFrameId = mCurrentFrame.mnId;
    // 地图集中创建新地图
    mpAtlas->CreateNewMap();
    // 如果使用imu时,
    if (mSensor==System::IMU_STEREO || mSensor == System::IMU_MONOCULAR)
        mpAtlas->SetInertialSensor();// 告诉地图集,使用imu
    mbSetInit=false;
	
	// 重置初始帧frameid和状态
    mnInitialFrameId = mCurrentFrame.mnId+1;
    mState = NO_IMAGES_YET;

    // Restart the variable with information about the last KF
    mVelocity = cv::Mat();
    // 最后重定位KF_id是当前id,因为它是新地图的新起点
    mnLastRelocFrameId = mnLastInitFrameId; // The last relocation KF_id is the current id, because it is the new starting point for new map
    Verbose::PrintMess("First frame id in map: " + to_string(mnLastInitFrameId+1), Verbose::VERBOSITY_NORMAL);
    mbVO = false; // Init value for know if there are enough MapPoints in the last KF
    if(mSensor == System::MONOCULAR || mSensor == System::IMU_MONOCULAR)
    {
        if(mpInitializer)
            delete mpInitializer;
        mpInitializer = static_cast<Initializer*>(NULL);
    }

    if((mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO ) && mpImuPreintegratedFromLastKF)
    {
        delete mpImuPreintegratedFromLastKF;
        mpImuPreintegratedFromLastKF = new IMU::Preintegrated(IMU::Bias(),*mpImuCalib);
    }

    if(mpLastKeyFrame)
        mpLastKeyFrame = static_cast<KeyFrame*>(NULL);

    if(mpReferenceKF)
        mpReferenceKF = static_cast<KeyFrame*>(NULL);

    mLastFrame = Frame();
    mCurrentFrame = Frame();
    mvIniMatches.clear();

    mbCreatedMap = true;

}

ResetActiveMap

  • 把当前地图给仍了
void Tracking::ResetActiveMap(bool bLocMap)
{
    Verbose::PrintMess("Active map Reseting", Verbose::VERBOSITY_NORMAL);
	// 若使用显示,则 显示类停止等待直到停止。
    if(mpViewer)
    {
        mpViewer->RequestStop();
        while(!mpViewer->isStopped())
            usleep(3000);
    }
	
	// 取到当前地图
    Map* pMap = mpAtlas->GetCurrentMap();

    if (!bLocMap)	// 重置localmap标记
    {	
        Verbose::PrintMess("Reseting Local Mapper...", Verbose::VERBOSITY_NORMAL);
        // 请求重置活跃地图
        mpLocalMapper->RequestResetActiveMap(pMap);
        Verbose::PrintMess("done", Verbose::VERBOSITY_NORMAL);
    }

    // Reset Loop Closing
    Verbose::PrintMess("Reseting Loop Closing...", Verbose::VERBOSITY_NORMAL);
    // 重置闭环
    mpLoopClosing->RequestResetActiveMap(pMap);
    Verbose::PrintMess("done", Verbose::VERBOSITY_NORMAL);

    // Clear BoW Database
    Verbose::PrintMess("Reseting Database", Verbose::VERBOSITY_NORMAL);
    // 清除 词袋数据
    mpKeyFrameDB->clearMap(pMap); // Only clear the active map references
    Verbose::PrintMess("done", Verbose::VERBOSITY_NORMAL);

    // Clear Map (this erase MapPoints and KeyFrames)
    // 清除地图点和关键帧
    mpAtlas->clearMap();

    mnLastInitFrameId = Frame::nNextId;
    mnLastRelocFrameId = mnLastInitFrameId;
    mState = NO_IMAGES_YET;

    if(mpInitializer)
    {
        delete mpInitializer;
        mpInitializer = static_cast<Initializer*>(NULL);
    }

    list<bool> lbLost;
    unsigned int index = mnFirstFrameId;
    cout << "mnFirstFrameId = " << mnFirstFrameId << endl;
    for(Map* pMap : mpAtlas->GetAllMaps())
    {
        if(pMap->GetAllKeyFrames().size() > 0)
        {
            if(index > pMap->GetLowerKFID())
                index = pMap->GetLowerKFID();
        }
    }

    int num_lost = 0;
    cout << "mnInitialFrameId = " << mnInitialFrameId << endl;

    for(list<bool>::iterator ilbL = mlbLost.begin(); ilbL != mlbLost.end(); ilbL++)
    {
        if(index < mnInitialFrameId)
            lbLost.push_back(*ilbL);
        else
        {
            lbLost.push_back(true);
            num_lost += 1;
        }

        index++;
    }
    cout << num_lost << " Frames set to lost" << endl;

    mlbLost = lbLost;

    mnInitialFrameId = mCurrentFrame.mnId;
    mnLastRelocFrameId = mCurrentFrame.mnId;

    mCurrentFrame = Frame();
    mLastFrame = Frame();
    mpReferenceKF = static_cast<KeyFrame*>(NULL);
    mpLastKeyFrame = static_cast<KeyFrame*>(NULL);
    mvIniMatches.clear();

    if(mpViewer)
        mpViewer->Release();

    Verbose::PrintMess("   End reseting! ", Verbose::VERBOSITY_NORMAL);
}

相关函数-参数读取

ParseCamParamFile 读取相机文件

  • 采用cv::FileStorage读取,且增加了有效判断

     // Camera calibration parameters
     cv::FileNode node = fSettings["Camera.fx"];
     if(!node.empty() && node.isReal())
     {
         fx = node.real();
     }
     else
     {
         std::cerr << "*Camera.fx parameter doesn't exist \
         or is not a real number*" << std::endl;
         b_miss_params = true;
     }
    
  • 主要参数有:

    • Camera.type :PinHole + KannalaBrandt8
      • PinHole:fx, fy, cx, cy, k1, k2, p1, p2, k3
      • KannalaBrandt8: fx, fy, cx, cy, k1, k2, k3, k4
    • STEREO or IMU_STEREO ?: bf
    • fps (30)
    • RGB?: 0: BGR, 1: RGB.
    • STEREO or RGBD or IMU_STEREO ?: ThDepth
      • mThDepth = mbf*mThDepth/fx 深度阈值
    • RGBD ? DepthMapFactor

ParseORBParamFile 读取orb参数

ORBextractor.nFeatures: 1500  # 每张图像的特征数
ORBextractor.scaleFactor:1.2  # 比例金字塔中各级别之间的比例因子
ORBextractor.nLevels: 8 # 比例金字塔中的级别数
# ORB 提取器:快速阈值,如果您的图像对比度低,您可以降低这些值
# 图像被分成一个网格。 在每个单元格处提取 FAST,施加最小响应。
# 首先我们强加 iniThFAST。 如果没有检测到角点,我们会施加一个较低的值 minThFAST
ORBextractor.iniThFAST: 20 
ORBextractor.minThFAST: 7 

ParseIMUParamFile 读取imu参数

# 从 body-frame (imu) 到相机的转换
Tbc: !!opencv-matrix
   rows: 4
   cols: 4
   dt: f
   data: [-0.9995250378696743, 0.0075019185074052044, -0.02989013031643309, 0.045574835649698026, 
          0.029615343885863205, -0.03439736061393144, -0.998969345370175, -0.071161801837997044,
         -0.008522328211654736, -0.9993800792498829, 0.03415885127385616, -0.044681254117144367,
          0.0, 0.0, 0.0, 1.0]
# IMU noise (Use those from VINS-mono)
IMU.NoiseGyro: 0.00016 # rad/s^0.5 
IMU.NoiseAcc: 0.0028 # m/s^1.5
IMU.GyroWalk: 0.000022 # rad/s^1.5
IMU.AccWalk: 0.00086 # m/s^2.5
IMU.Frequency: 200

相关函数-Imu相关

PreintegrateIMU IMU预积分

步骤:

  1. 当前帧没有前一个frame时,return
  2. 有Frame了,先clear 容器
  3. imuDeque为空时,return
  4. while 循环,取出队列中 last_frame-> frame时间段内的数据
  5. 构建 IMU::Preintegrated对象
  6. 总体预积分+帧间预积分
void Tracking::PreintegrateIMU()
{
    //cout << "start preintegration" << endl;

	/// 当前帧没有前一个frame时,return
    if(!mCurrentFrame.mpPrevFrame)
    {
        Verbose::PrintMess("non prev frame ", Verbose::VERBOSITY_NORMAL);
        mCurrentFrame.setIntegrated();
        return;
    }
	/// 上次计算数据清空(联想trick:多线程操作可以在启动时清空,防止未计算完clear造成coredump)
    mvImuFromLastFrame.clear();
    mvImuFromLastFrame.reserve(mlQueueImuData.size());
    
    /// imuDeque为空时,return
    if(mlQueueImuData.size() == 0)
    {
        Verbose::PrintMess("Not IMU data in mlQueueImuData!!", Verbose::VERBOSITY_NORMAL);
        mCurrentFrame.setIntegrated();
        return;
    }

	// while 循环,取出队列中 last_frame-> frame时间段内的数据 
    while(true)
    {
        bool bSleep = false;
        {
            unique_lock<mutex> lock(mMutexImuQueue);
            if(!mlQueueImuData.empty())
            {	// 取mlQueueImuData中front,
                IMU::Point* m = &mlQueueImuData.front();
                cout.precision(17);
                // 若 time 小于前 last_frame时, pop_front
                if(m->t<mCurrentFrame.mpPrevFrame->mTimeStamp-0.001l)
                {
                    mlQueueImuData.pop_front();
                } 
                // 否则 若time < current_time时,放入收集队列并pop_front
                else if(m->t<mCurrentFrame.mTimeStamp-0.001l)
                {
                    mvImuFromLastFrame.push_back(*m);
                    mlQueueImuData.pop_front();
                }
                else  // 否则:2,3不满足,即此间隔数据已经取完,break
                {
                    mvImuFromLastFrame.push_back(*m);
                    break;
                }
            }
            else
            {
                break;
                bSleep = true;
            }
        }
        if(bSleep) //500us轮询一次
            usleep(500);
    }


    const int n = mvImuFromLastFrame.size()-1;
    // 构建 IMU::Preintegrated对象
    IMU::Preintegrated* pImuPreintegratedFromLastFrame = new IMU::Preintegrated(mLastFrame.mImuBias,mCurrentFrame.mImuCalib);

    for(int i=0; i<n; i++)
    {
        float tstep;
        cv::Point3f acc, angVel;
        // 得角速度+角度
        if((i==0) && (i<(n-1))) //第一个但不是最后一个时
        {   
            float tab = mvImuFromLastFrame[i+1].t-mvImuFromLastFrame[i].t;
            float tini = mvImuFromLastFrame[i].t-mCurrentFrame.mpPrevFrame->mTimeStamp;
            acc = (mvImuFromLastFrame[i].a+mvImuFromLastFrame[i+1].a-
                    (mvImuFromLastFrame[i+1].a-mvImuFromLastFrame[i].a)*(tini/tab))*0.5f;
            angVel = (mvImuFromLastFrame[i].w+mvImuFromLastFrame[i+1].w-
                    (mvImuFromLastFrame[i+1].w-mvImuFromLastFrame[i].w)*(tini/tab))*0.5f;
            tstep = mvImuFromLastFrame[i+1].t-mCurrentFrame.mpPrevFrame->mTimeStamp;
        }
        else if(i<(n-1)) // 不是最后一个时
        {
            acc = (mvImuFromLastFrame[i].a+mvImuFromLastFrame[i+1].a)*0.5f;
            angVel = (mvImuFromLastFrame[i].w+mvImuFromLastFrame[i+1].w)*0.5f;
            tstep = mvImuFromLastFrame[i+1].t-mvImuFromLastFrame[i].t;
        }
        else if((i>0) && (i==(n-1))) // 不是第一个但是最后一个
        {
            float tab = mvImuFromLastFrame[i+1].t-mvImuFromLastFrame[i].t;
            float tend = mvImuFromLastFrame[i+1].t-mCurrentFrame.mTimeStamp;
            acc = (mvImuFromLastFrame[i].a+mvImuFromLastFrame[i+1].a-
                    (mvImuFromLastFrame[i+1].a-mvImuFromLastFrame[i].a)*(tend/tab))*0.5f;
            angVel = (mvImuFromLastFrame[i].w+mvImuFromLastFrame[i+1].w-
                    (mvImuFromLastFrame[i+1].w-mvImuFromLastFrame[i].w)*(tend/tab))*0.5f;
            tstep = mCurrentFrame.mTimeStamp-mvImuFromLastFrame[i].t;
        }
        else if((i==0) && (i==(n-1))) // 只有一个时
        {
            acc = mvImuFromLastFrame[i].a;
            angVel = mvImuFromLastFrame[i].w;
            tstep = mCurrentFrame.mTimeStamp-mCurrentFrame.mpPrevFrame->mTimeStamp;
        }

        if (!mpImuPreintegratedFromLastKF)
            cout << "mpImuPreintegratedFromLastKF does not exist" << endl;
        // 两个预积分 总体预积分+帧间预积分
        mpImuPreintegratedFromLastKF->IntegrateNewMeasurement(acc,angVel,tstep);
        pImuPreintegratedFromLastFrame->IntegrateNewMeasurement(acc,angVel,tstep);
    }

    mCurrentFrame.mpImuPreintegratedFrame = pImuPreintegratedFromLastFrame;
    mCurrentFrame.mpImuPreintegrated = mpImuPreintegratedFromLastKF;
    mCurrentFrame.mpLastKeyFrame = mpLastKeyFrame;
	// 设置当前帧为已预积分状态
    mCurrentFrame.setIntegrated();

    Verbose::PrintMess("Preintegration is finished!! ", Verbose::VERBOSITY_DEBUG);
}

PredictStateIMU 利用IMU计算位姿

  • 利用IMU计算位姿, 地图更新且有关键帧时,从关键帧开始积,否则按照上一帧积
  • 有两种情况会用到此函数:
    • 视觉跟丢时用imu预测位姿
      
    • imu模式下,恒速模型跟踪时提供位姿初始值
      
bool Tracking::PredictStateIMU()
{
	// 没有前一帧时直接return false
    if(!mCurrentFrame.mpPrevFrame)
    {
        Verbose::PrintMess("No last frame", Verbose::VERBOSITY_NORMAL);
        return false;
    }
	
	// 地图已更新 且 有上一关键帧
    if(mbMapUpdated && mpLastKeyFrame)
    {
    	// 得到上一关键帧的imu位姿+速度
        const cv::Mat twb1 = mpLastKeyFrame->GetImuPosition();
        const cv::Mat Rwb1 = mpLastKeyFrame->GetImuRotation();
        const cv::Mat Vwb1 = mpLastKeyFrame->GetVelocity();
		
		// 从上一关键帧到当前的时间差
        const cv::Mat Gz = (cv::Mat_<float>(3,1) << 0,0,-IMU::GRAVITY_VALUE);
        const float t12 = mpImuPreintegratedFromLastKF->dT;
		
		// 得到此时 位置+角度+速度
        cv::Mat Rwb2 = IMU::NormalizeRotation(Rwb1*mpImuPreintegratedFromLastKF->GetDeltaRotation(mpLastKeyFrame->GetImuBias()));
        cv::Mat twb2 = twb1 + Vwb1*t12 + 0.5f*t12*t12*Gz+ Rwb1*mpImuPreintegratedFromLastKF->GetDeltaPosition(mpLastKeyFrame->GetImuBias());
        cv::Mat Vwb2 = Vwb1 + t12*Gz + Rwb1*mpImuPreintegratedFromLastKF->GetDeltaVelocity(mpLastKeyFrame->GetImuBias());
        // 赋值当前帧数据
        mCurrentFrame.SetImuPoseVelocity(Rwb2,twb2,Vwb2);
        mCurrentFrame.mPredRwb = Rwb2.clone();
        mCurrentFrame.mPredtwb = twb2.clone();
        mCurrentFrame.mPredVwb = Vwb2.clone();
        mCurrentFrame.mImuBias = mpLastKeyFrame->GetImuBias();
        mCurrentFrame.mPredBias = mCurrentFrame.mImuBias;
        return true;
    }
    else if(!mbMapUpdated) // 如果地图没更新时
    {	// 取上一帧 位置+角度+速度
        const cv::Mat twb1 = mLastFrame.GetImuPosition();
        const cv::Mat Rwb1 = mLastFrame.GetImuRotation();
        const cv::Mat Vwb1 = mLastFrame.mVw;
        const cv::Mat Gz = (cv::Mat_<float>(3,1) << 0,0,-IMU::GRAVITY_VALUE);
        const float t12 = mCurrentFrame.mpImuPreintegratedFrame->dT;
		// 得到此时 位置+角度+速度
        cv::Mat Rwb2 = IMU::NormalizeRotation(Rwb1*mCurrentFrame.mpImuPreintegratedFrame->GetDeltaRotation(mLastFrame.mImuBias));
        cv::Mat twb2 = twb1 + Vwb1*t12 + 0.5f*t12*t12*Gz+ Rwb1*mCurrentFrame.mpImuPreintegratedFrame->GetDeltaPosition(mLastFrame.mImuBias);
        cv::Mat Vwb2 = Vwb1 + t12*Gz + Rwb1*mCurrentFrame.mpImuPreintegratedFrame->GetDeltaVelocity(mLastFrame.mImuBias);
		// 赋值当前帧
        mCurrentFrame.SetImuPoseVelocity(Rwb2,twb2,Vwb2);
        mCurrentFrame.mPredRwb = Rwb2.clone();
        mCurrentFrame.mPredtwb = twb2.clone();
        mCurrentFrame.mPredVwb = Vwb2.clone();
        mCurrentFrame.mImuBias =mLastFrame.mImuBias;
        mCurrentFrame.mPredBias = mCurrentFrame.mImuBias;
        return true;
    }
    else
        cout << "not IMU prediction!!" << endl;

    return false;
}

ComputeGyroBias 角度零偏

  • 多帧数据,帧间旋转与IMU旋转之差作为误差项
  • 多帧数据中,bias一样。采用高斯牛顿,得到 bias:xyz
void Tracking::ComputeGyroBias(const vector<Frame*> &vpFs, float &bwx,  float &bwy, float &bwz)
{
    const int N = vpFs.size();
    vector<float> vbx;
    vbx.reserve(N);
    vector<float> vby;
    vby.reserve(N);
    vector<float> vbz;
    vbz.reserve(N);

    cv::Mat H = cv::Mat::zeros(3,3,CV_32F);
    cv::Mat grad  = cv::Mat::zeros(3,1,CV_32F);
    for(int i=1;i<N;i++)
    {
        Frame* pF2 = vpFs[i];
        Frame* pF1 = vpFs[i-1];
        // 计算观测旋转量
        cv::Mat VisionR = pF1->GetImuRotation().t()*pF2->GetImuRotation();
        // 计算imu帧间旋转量
        cv::Mat JRg = pF2->mpImuPreintegratedFrame->JRg;
        // 得到预积分与观测误差量
        cv::Mat E = pF2->mpImuPreintegratedFrame->GetUpdatedDeltaRotation().t()*VisionR;
        cv::Mat e = IMU::LogSO3(E);
        assert(fabs(pF2->mTimeStamp-pF1->mTimeStamp-pF2->mpImuPreintegratedFrame->dT)<0.01);
        cv::Mat J = -IMU::InverseRightJacobianSO3(e)*E.t()*JRg;
        // 计算 J^T *J * Δx = -J^T * f(x) 
        grad += J.t()*e;
        H += J.t()*J;
    }
	
    cv::Mat bg = -H.inv(cv::DECOMP_SVD)*grad;
    bwx = bg.at<float>(0);
    bwy = bg.at<float>(1);
    bwz = bg.at<float>(2);
	
	// bias 数据赋值
    for(int i=1;i<N;i++)
    {
        Frame* pF = vpFs[i];
        pF->mImuBias.bwx = bwx;
        pF->mImuBias.bwy = bwy;
        pF->mImuBias.bwz = bwz;
        pF->mpImuPreintegratedFrame->SetNewBias(pF->mImuBias);
        pF->mpImuPreintegratedFrame->Reintegrate();
    }
}

Δ \Delta Δ

ComputeVelocitiesAccBias 加速度零偏

  • 多帧数据,帧间位置、速度与IMU旋转之差作为误差项
  • 多帧数据中,bias一样。采用高斯牛顿,得到 bias:xyz
void Tracking::ComputeVelocitiesAccBias(const vector<Frame*> &vpFs, float &bax,  float &bay, float &baz)
{
    const int N = vpFs.size();
    const int nVar = 3*N +3; // 3 velocities/frame + acc bias
    const int nEqs = 6*(N-1);
	
	// J e g
    cv::Mat J(nEqs,nVar,CV_32F,cv::Scalar(0));
    cv::Mat e(nEqs,1,CV_32F,cv::Scalar(0));
    cv::Mat g = (cv::Mat_<float>(3,1)<<0,0,-IMU::GRAVITY_VALUE);

    for(int i=0;i<N-1;i++)
    {
        Frame* pF2 = vpFs[i+1];
        Frame* pF1 = vpFs[i];
        // 取到pF1位置
        cv::Mat twb1 = pF1->GetImuPosition();
        cv::Mat twb2 = pF2->GetImuPosition();
        cv::Mat Rwb1 = pF1->GetImuRotation();
        // 取到帧间imu积分的数据
        cv::Mat dP12 = pF2->mpImuPreintegratedFrame->GetUpdatedDeltaPosition();
        cv::Mat dV12 = pF2->mpImuPreintegratedFrame->GetUpdatedDeltaVelocity();
        cv::Mat JP12 = pF2->mpImuPreintegratedFrame->JPa;
        cv::Mat JV12 = pF2->mpImuPreintegratedFrame->JVa;
        float t12 = pF2->mpImuPreintegratedFrame->dT;
        // Position p2=p1+v1*t+0.5*g*t^2+R1*dP12
        J.rowRange(6*i,6*i+3).colRange(3*i,3*i+3) += cv::Mat::eye(3,3,CV_32F)*t12;
        J.rowRange(6*i,6*i+3).colRange(3*N,3*N+3) += Rwb1*JP12;
        e.rowRange(6*i,6*i+3) = twb2-twb1-0.5f*g*t12*t12-Rwb1*dP12;
        // Velocity v2=v1+g*t+R1*dV12
        J.rowRange(6*i+3,6*i+6).colRange(3*i,3*i+3) += -cv::Mat::eye(3,3,CV_32F);
        J.rowRange(6*i+3,6*i+6).colRange(3*(i+1),3*(i+1)+3) += cv::Mat::eye(3,3,CV_32F);
        J.rowRange(6*i+3,6*i+6).colRange(3*N,3*N+3) -= Rwb1*JV12;
        e.rowRange(6*i+3,6*i+6) = g*t12+Rwb1*dV12;
    }
	
	// 得到加速度偏差
    cv::Mat H = J.t()*J;
    cv::Mat B = J.t()*e;
    cv::Mat x(nVar,1,CV_32F);
    cv::solve(H,B,x);

    bax = x.at<float>(3*N);
    bay = x.at<float>(3*N+1);
    baz = x.at<float>(3*N+2);

    for(int i=0;i<N;i++)
    {
        Frame* pF = vpFs[i];
        x.rowRange(3*i,3*i+3).copyTo(pF->mVw);
        if(i>0)
        {
            pF->mImuBias.bax = bax;
            pF->mImuBias.bay = bay;
            pF->mImuBias.baz = baz;
            pF->mpImuPreintegratedFrame->SetNewBias(pF->mImuBias);
        }
    }
}

相关函数-初始化

MonocularInitialization 单目初始化

  • 并行地计算基础矩阵和单应性矩阵,选取其中一个模型,恢复出最开始两帧之间的相对姿态以及点云,得到初始两帧的匹配、相对运动、初始MapPoints
  • 每个track 都会判断
void Tracking::MonocularInitialization()
{
	// 如果单目初始器还没有被创建,则创建单目初始器 
	// NOTICE 也只有单目会使用到这个
    if(!mpInitializer)	
    {
        // Set Reference Frame 若当前帧的 特征点个数大于100时
        if(mCurrentFrame.mvKeys.size()>100)
        {
			// 当前帧作为初始帧+上一帧,
            mInitialFrame = Frame(mCurrentFrame);
            mLastFrame = Frame(mCurrentFrame);
            // mvbPrevMatched最大的情况就是所有特征点都被跟踪上
            // 因为当前帧会变成"上一帧"",所以存储到了这个变量中
            mvbPrevMatched.resize(mCurrentFrame.mvKeysUn.size());
            for(size_t i=0; i<mCurrentFrame.mvKeysUn.size(); i++)
                mvbPrevMatched[i]=mCurrentFrame.mvKeysUn[i].pt;
			// 初始化 mpInitializer 无效
            if(mpInitializer)
                delete mpInitializer;
            // 由当前帧构造初始器 sigma:1.0 iterations:200
            mpInitializer =  new Initializer(mCurrentFrame,1.0,200);
            // -1 表示没有任何匹配。这里面存储的是匹配的点的id
            fill(mvIniMatches.begin(),mvIniMatches.end(),-1);
			// 如果使用imu,则初始化 imu一些数据
            if (mSensor == System::IMU_MONOCULAR)
            {
                if(mpImuPreintegratedFromLastKF)
                {
                    delete mpImuPreintegratedFromLastKF;
                }
                mpImuPreintegratedFromLastKF = new IMU::Preintegrated(IMU::Bias(),*mpImuCalib);
                mCurrentFrame.mpImuPreintegrated = mpImuPreintegratedFromLastKF;

            }
            return;
        }
    }
    else  //mpInitializer 已初始化
    {	// 因此只有两帧的特征点个数都大于100 且 使用imu且lastFrame与初始帧时间间隔小于1s, 才能继续初始化
        if (((int)mCurrentFrame.mvKeys.size()<=100)||((mSensor == System::IMU_MONOCULAR)&&(mLastFrame.mTimeStamp-mInitialFrame.mTimeStamp>1.0)))
        {	// 重置 mpInitializer 为未初始化,并return
            delete mpInitializer;
            mpInitializer = static_cast<Initializer*>(NULL);
            fill(mvIniMatches.begin(),mvIniMatches.end(),-1);
            return;
        }
		
		// step 3:在mInitialFrame与mCurrentFrame中找匹配的特征点对
        // Find correspondences 构造orb匹配子
        ORBmatcher matcher(0.9 //最佳的和次佳评分的比值阈值
        				,true); //检查特征点的方向
        // step 4:针对单目初始化的时候,进行特征点的匹配
        int nmatches = matcher.SearchForInitialization(
        	mInitialFrame,mCurrentFrame //初始化时的参考帧和当前帧
        	,mvbPrevMatched, //在初始化参考帧中提取得到的特征点
        	mvIniMatches,//保存匹配关系
        	100); //搜索窗口大小

        // 如果初始化的两帧之间的匹配点太少,重新初始化
        if(nmatches<100)
        {
            delete mpInitializer;
            mpInitializer = static_cast<Initializer*>(NULL);
            fill(mvIniMatches.begin(),mvIniMatches.end(),-1);
            return;
        }

        cv::Mat Rcw; // Current Camera Rotation
        cv::Mat tcw; // Current Camera Translation
        vector<bool> vbTriangulated; // Triangulated Correspondences (mvIniMatches)
		
		// ReconstructWithTwoViews()是个虚函数,会根据所使用的相机模型调用不同的实现
		// Param: 初始帧(去过畸变)+第一帧(去过畸变)+第0帧到第1帧的匹配mvIniMatches(不成功为-1)+第一帧位姿Rcw+tcw+第0帧特征点三维坐标+存储三角测量匹配结果
        if(mpCamera->ReconstructWithTwoViews(mInitialFrame.mvKeysUn,mCurrentFrame.mvKeysUn,mvIniMatches,Rcw,tcw,mvIniP3D,vbTriangulated))
        {	// 若匹配不成功,则 标记个数--
            for(size_t i=0, iend=mvIniMatches.size(); i<iend;i++)
            {
                if(mvIniMatches[i]>=0 && !vbTriangulated[i])
                {
                    mvIniMatches[i]=-1;
                    nmatches--;
                }
            }

            // Set Frame Poses
            mInitialFrame.SetPose(cv::Mat::eye(4,4,CV_32F));
            cv::Mat Tcw = cv::Mat::eye(4,4,CV_32F);
            Rcw.copyTo(Tcw.rowRange(0,3).colRange(0,3));
            tcw.copyTo(Tcw.rowRange(0,3).col(3));
            mCurrentFrame.SetPose(Tcw);
			// 创建初始化单目地图
            CreateInitialMapMonocular();

        }
    }
}

CreateInitialMapMonocular 创建初始化单目地图

  • 使用在单目初始化过程中三角化得到的点,包装成为地图点,并且生成初始地图
void Tracking::CreateInitialMapMonocular() 
{
    // Create KeyFrames 认为单目初始化时候的参考帧和当前帧都是关键帧
    KeyFrame* pKFini = new KeyFrame(mInitialFrame,mpAtlas->GetCurrentMap(),mpKeyFrameDB);
    KeyFrame* pKFcur = new KeyFrame(mCurrentFrame,mpAtlas->GetCurrentMap(),mpKeyFrameDB);
	
	// 如果使用imu,则 重置IMU预积分
    if(mSensor == System::IMU_MONOCULAR)
        pKFini->mpImuPreintegrated = (IMU::Preintegrated*)(NULL);
	
	// 计算 初始帧与当前帧 的词袋
    pKFini->ComputeBoW();
    pKFcur->ComputeBoW();

    // Insert KFs in the map 地图集中插入关键帧
    mpAtlas->AddKeyFrame(pKFini);
    mpAtlas->AddKeyFrame(pKFcur);

    for(size_t i=0; i<mvIniMatches.size();i++)
    {
    	// 非匹配点略过
        if(mvIniMatches[i]<0)
            continue;

        // Create MapPoint. 创建地图点
        cv::Mat worldPos(mvIniP3D[i]);
        MapPoint* pMP = new MapPoint(worldPos,pKFcur,mpAtlas->GetCurrentMap());
		
		// 关键帧中添加地图点
        pKFini->AddMapPoint(pMP,i);
        pKFcur->AddMapPoint(pMP,mvIniMatches[i]);
		
		// 地图点中添加观测点
        pMP->AddObservation(pKFini,i);
        pMP->AddObservation(pKFcur,mvIniMatches[i]);

        pMP->ComputeDistinctiveDescriptors();
        pMP->UpdateNormalAndDepth();

        //Fill Current Frame structure
        mCurrentFrame.mvpMapPoints[mvIniMatches[i]] = pMP;
        mCurrentFrame.mvbOutlier[mvIniMatches[i]] = false;

        //Add to Map
        mpAtlas->AddMapPoint(pMP);
    }


    // Update Connections
    pKFini->UpdateConnections();
    pKFcur->UpdateConnections();

    std::set<MapPoint*> sMPs;
    sMPs = pKFini->GetMapPoints();

    // Bundle Adjustment
    Verbose::PrintMess("New Map created with " + to_string(mpAtlas->MapPointsInMap()) + " points", Verbose::VERBOSITY_QUIET);
    Optimizer::GlobalBundleAdjustemnt(mpAtlas->GetCurrentMap(),20);

    pKFcur->PrintPointDistribution();

    float medianDepth = pKFini->ComputeSceneMedianDepth(2);
    float invMedianDepth;
    if(mSensor == System::IMU_MONOCULAR)
        invMedianDepth = 4.0f/medianDepth; // 4.0f
    else
        invMedianDepth = 1.0f/medianDepth;

    if(medianDepth<0 || pKFcur->TrackedMapPoints(1)<50) // TODO Check, originally 100 tracks
    {
        Verbose::PrintMess("Wrong initialization, reseting...", Verbose::VERBOSITY_NORMAL);
        mpSystem->ResetActiveMap();
        return;
    }

    // Scale initial baseline
    cv::Mat Tc2w = pKFcur->GetPose();
    Tc2w.col(3).rowRange(0,3) = Tc2w.col(3).rowRange(0,3)*invMedianDepth;
    pKFcur->SetPose(Tc2w);

    // Scale points
    vector<MapPoint*> vpAllMapPoints = pKFini->GetMapPointMatches();
    for(size_t iMP=0; iMP<vpAllMapPoints.size(); iMP++)
    {
        if(vpAllMapPoints[iMP])
        {
            MapPoint* pMP = vpAllMapPoints[iMP];
            pMP->SetWorldPos(pMP->GetWorldPos()*invMedianDepth);
            pMP->UpdateNormalAndDepth();
        }
    }

    if (mSensor == System::IMU_MONOCULAR)
    {
        pKFcur->mPrevKF = pKFini;
        pKFini->mNextKF = pKFcur;
        pKFcur->mpImuPreintegrated = mpImuPreintegratedFromLastKF;

        mpImuPreintegratedFromLastKF = new IMU::Preintegrated(pKFcur->mpImuPreintegrated->GetUpdatedBias(),pKFcur->mImuCalib);
    }


    mpLocalMapper->InsertKeyFrame(pKFini);
    mpLocalMapper->InsertKeyFrame(pKFcur);
    mpLocalMapper->mFirstTs=pKFcur->mTimeStamp;

    mCurrentFrame.SetPose(pKFcur->GetPose());
    mnLastKeyFrameId=mCurrentFrame.mnId;
    mpLastKeyFrame = pKFcur;
    mnLastRelocFrameId = mInitialFrame.mnId;

    mvpLocalKeyFrames.push_back(pKFcur);
    mvpLocalKeyFrames.push_back(pKFini);
    mvpLocalMapPoints=mpAtlas->GetAllMapPoints();
    mpReferenceKF = pKFcur;
    mCurrentFrame.mpReferenceKF = pKFcur;

    // Compute here initial velocity
    vector<KeyFrame*> vKFs = mpAtlas->GetAllKeyFrames();

    cv::Mat deltaT = vKFs.back()->GetPose()*vKFs.front()->GetPoseInverse();
    mVelocity = cv::Mat();
    Eigen::Vector3d phi = LogSO3(Converter::toMatrix3d(deltaT.rowRange(0,3).colRange(0,3)));


    double aux = (mCurrentFrame.mTimeStamp-mLastFrame.mTimeStamp)/(mCurrentFrame.mTimeStamp-mInitialFrame.mTimeStamp);
    phi *= aux;

    mLastFrame = Frame(mCurrentFrame);

    mpAtlas->SetReferenceMapPoints(mvpLocalMapPoints);

    mpMapDrawer->SetCurrentCameraPose(pKFcur->GetPose());

    mpAtlas->GetCurrentMap()->mvpKeyFrameOrigins.push_back(pKFini);

    mState=OK;

    initID = pKFcur->mnId;
}

相关函数-跟踪

  • 恒速模型+

TrackWithMotionModel 恒速模型跟踪

IMU 好使时,IMU预估,否则 与上一帧的地图点匹配跟踪

根据匀速度模型对上一帧的MapPoints进行跟踪
1. 非单目情况,需要对上一帧产生一些新的MapPoints(临时)
2. 将上一帧的MapPoints投影到当前帧的图像平面上,在投影的位置进行区域匹配
3. 根据匹配对估计当前帧的姿态
4. 根据姿态剔除误匹配
- see V-B Initial Pose Estimation From Previous Frame
bool Tracking::TrackWithMotionModel()
{	
	// 构造ORB匹配器
    ORBmatcher matcher(0.9,true);


	// 步骤1:赋值上一帧,并满足条件时根据深度值为上一关键帧生成新的MapPoints,方便跟踪
    // Update last frame pose according to its reference keyframe
    // Create "visual odometry" points if in Localization Mode
    // 根据其参考关键帧更新最后一帧姿势。如果处于定位模式,则创建“视觉里程计”点
    UpdateLastFrame();
	
	// IMu已初始化,且 最近未重定位
    if (mpAtlas->isImuInitialized() && (mCurrentFrame.mnId>mnLastRelocFrameId+mnFramesToResetIMU))
    {
        // Predict ste with IMU if it is initialized and it doesnt need reset
        // 如果已初始化且不需要重置,则使用 IMU 预测站点
        PredictStateIMU();
        return true;
    }
    else
    {	// 根据Const Velocity Model(认为这两帧之间的相对运动和之前两帧间相对运动相同)估计当前帧的位姿
        mCurrentFrame.SetPose(mVelocity*mLastFrame.mTcw);
    }

    fill(mCurrentFrame.mvpMapPoints.begin(),mCurrentFrame.mvpMapPoints.end(),static_cast<MapPoint*>(NULL));

    // Project points seen in previous frame 
    // 在前一帧中看到的投影点的阈值,双目7,其他15
    int th;

    if(mSensor==System::STEREO)
        th=7;
    else
        th=15;
        
	// 步骤2:根据匀速度模型进行对上一帧的MapPoints进行跟踪
	// 根据上一帧特征点对应的3D点投影的位置缩小特征点匹配范围
    int nmatches = matcher.SearchByProjection(mCurrentFrame,mLastFrame,th,mSensor==System::MONOCULAR || mSensor==System::IMU_MONOCULAR);

    // If few matches, uses a wider window search
    // 如果跟踪的点少,则扩大搜索半径再来一次
    if(nmatches<20)
    {
        Verbose::PrintMess("Not enough matches, wider window search!!", Verbose::VERBOSITY_NORMAL);
        fill(mCurrentFrame.mvpMapPoints.begin(),mCurrentFrame.mvpMapPoints.end(),static_cast<MapPoint*>(NULL));

        nmatches = matcher.SearchByProjection(mCurrentFrame,mLastFrame,2*th,mSensor==System::MONOCULAR || mSensor==System::IMU_MONOCULAR);
        Verbose::PrintMess("Matches with wider search: " + to_string(nmatches), Verbose::VERBOSITY_NORMAL);

    }
	
	/// 扩大范围后,匹配对仍小于20,则若使用IMU,则返回true,否则false
    if(nmatches<20)
    {
        Verbose::PrintMess("Not enough matches!!", Verbose::VERBOSITY_NORMAL);
        if (mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO)
            return true;
        else
            return false;
    }

	// 步骤3:优化位姿
    // Optimize frame pose with all matches
    Optimizer::PoseOptimization(&mCurrentFrame);

    // Discard outliers
    // 步骤4:优化位姿后剔除outlier的mvpMapPoints
    int nmatchesMap = 0;
    for(int i =0; i<mCurrentFrame.N; i++)
    {
        if(mCurrentFrame.mvpMapPoints[i])
        {
            if(mCurrentFrame.mvbOutlier[i])
            {
                MapPoint* pMP = mCurrentFrame.mvpMapPoints[i];

                mCurrentFrame.mvpMapPoints[i]=static_cast<MapPoint*>(NULL);
                mCurrentFrame.mvbOutlier[i]=false;
                if(i < mCurrentFrame.Nleft){
                    pMP->mbTrackInView = false;
                }
                else{
                    pMP->mbTrackInViewR = false;
                }
                pMP->mnLastFrameSeen = mCurrentFrame.mnId;
                nmatches--;
            }
            else if(mCurrentFrame.mvpMapPoints[i]->Observations()>0)
                nmatchesMap++;
        }
    }
	
	// 定位模式时,需要有至少20个匹配上
    if(mbOnlyTracking)
    {
        mbVO = nmatchesMap<10;
        return nmatches>20;
    }
	
	// 非定位模式,使用IMU,或者匹配数超过10个
    if (mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO)
        return true;
    else
        return nmatchesMap>=10;
}

UpdateLastFrame 更新上一Frame

  1. 跟新最近一帧 的位姿
  2. 满足条件后,为上一帧临时生成新的MapPoints
    1. 这些地图点不加入Map中,在tracking的最后会删除
    2. 跟踪过程中需要将将上一帧的MapPoints投影到当前帧可以缩小匹配范围,加快当前帧与上一帧进行特征点匹配
void Tracking::UpdateLastFrame()
{
	/// 步骤1:更新最近一帧的位姿
    // Update pose according to reference keyframe
    KeyFrame* pRef = mLastFrame.mpReferenceKF;
    cv::Mat Tlr = mlRelativeFramePoses.back();
    mLastFrame.SetPose(Tlr*pRef->GetPose());

	// 如果上一帧为关键帧,或者单目\单目IMU的情况,或 slam模式下
    if(mnLastKeyFrameId==mLastFrame.mnId || mSensor==System::MONOCULAR || mSensor==System::IMU_MONOCULAR || !mbOnlyTracking)
        return;

    // Create "visual odometry" MapPoints
    // We sort points according to their measured depth by the stereo/RGB-D sensor
	// 步骤2.1:得到上一帧有深度值的特征点
    vector<pair<float,int> > vDepthIdx;
    vDepthIdx.reserve(mLastFrame.N);
    for(int i=0; i<mLastFrame.N;i++)
    {
        float z = mLastFrame.mvDepth[i];
        if(z>0)
        {
            vDepthIdx.push_back(make_pair(z,i));
        }
    }

    if(vDepthIdx.empty())
        return;
	// 步骤2.2:按照深度从小到大排序
    sort(vDepthIdx.begin(),vDepthIdx.end());

    // We insert all close points (depth<mThDepth)
    // If less than 100 close points, we insert the 100 closest ones.
    // 步骤2.3:将距离比较近的点包装成MapPoints
    int nPoints = 0;
    for(size_t j=0; j<vDepthIdx.size();j++)
    {
        int i = vDepthIdx[j].second;

        bool bCreateNew = false;

        MapPoint* pMP = mLastFrame.mvpMapPoints[i];
        if(!pMP)
            bCreateNew = true;
        else if(pMP->Observations()<1)
        {
            bCreateNew = true;
        }

        if(bCreateNew)
        {
        	// 这些生成MapPoints后并没有通过:
	            // a.AddMapPoint、
	            // b.AddObservation、
	            // c.ComputeDistinctiveDescriptors、
	            // d.UpdateNormalAndDepth添加属性,
	            // 这些MapPoint仅仅为了提高双目和RGBD的跟踪成功率
            cv::Mat x3D = mLastFrame.UnprojectStereo(i);
            MapPoint* pNewMP = new MapPoint(x3D,mpAtlas->GetCurrentMap(),&mLastFrame,i);

            mLastFrame.mvpMapPoints[i]=pNewMP;
			// 标记为临时添加的MapPoint,之后在CreateNewKeyFrame之前会全部删除
            mlpTemporalPoints.push_back(pNewMP);
            nPoints++;
        }
        else
        {
            nPoints++;
        }

        if(vDepthIdx[j].first>mThDepth && nPoints>100)
        {
            break;
        }
    }
}

TrackReferenceKeyFrame 跟踪参考关键帧

  1. 计算当前帧的词包,将当前帧的特征点分到特定层的nodes上
  2. 对属于同一node的描述子进行匹配
  3. 根据匹配对估计当前帧的姿态
  4. 根据姿态剔除误匹配
bool Tracking::TrackReferenceKeyFrame()
{
    // Compute Bag of Words vector
    // 步骤1:将当前帧的描述子转化为BoW向量
    mCurrentFrame.ComputeBoW();

    // We perform first an ORB matching with the reference keyframe
    // If enough matches are found we setup a PnP solver
    // 我们首先与参考关键帧进行 ORB 匹配,如果找到足够的匹配,我们设置一个 PnP 求解器
    ORBmatcher matcher(0.7,true);
    vector<MapPoint*> vpMapPointMatches;

	// 步骤2:通过特征点的BoW加快当前帧与参考帧之间的特征点匹配
	// 特征点的匹配关系由MapPoints进行维护
    int nmatches = matcher.SearchByBoW(mpReferenceKF,mCurrentFrame,vpMapPointMatches);

    if(nmatches<15)	// 匹配点小于15 则return false
    {
        cout << "TRACK_REF_KF: Less than 15 matches!!\n";
        return false;
    }
	
	// 步骤3:将上一帧的位姿态作为当前帧位姿的初始值
    mCurrentFrame.mvpMapPoints = vpMapPointMatches;
    mCurrentFrame.SetPose(mLastFrame.mTcw);

    //mCurrentFrame.PrintPointDistribution();


    // cout << " TrackReferenceKeyFrame mLastFrame.mTcw:  " << mLastFrame.mTcw << endl;
    // 步骤4:通过优化3D-2D的重投影误差来获得位姿
    Optimizer::PoseOptimization(&mCurrentFrame);

    // Discard outliers
    // 步骤5:剔除优化后的outlier匹配点(MapPoints)
    int nmatchesMap = 0;
    for(int i =0; i<mCurrentFrame.N; i++)
    {
        if(mCurrentFrame.mvpMapPoints[i])
        {
            if(mCurrentFrame.mvbOutlier[i])
            {
                MapPoint* pMP = mCurrentFrame.mvpMapPoints[i];

                mCurrentFrame.mvpMapPoints[i]=static_cast<MapPoint*>(NULL);
                mCurrentFrame.mvbOutlier[i]=false;
                if(i < mCurrentFrame.Nleft){
                    pMP->mbTrackInView = false;
                }
                else{
                    pMP->mbTrackInViewR = false;
                }
                pMP->mbTrackInView = false;
                pMP->mnLastFrameSeen = mCurrentFrame.mnId;
                nmatches--;
            }
            else if(mCurrentFrame.mvpMapPoints[i]->Observations()>0)
                nmatchesMap++;
        }
    }

    // TODO check these conditions
    if (mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO)
        return true;
    else
        return nmatchesMap>=10;
}

TrackLocalMap 跟踪局部地图

  • 估计了相机姿态和帧中跟踪的一些地图点。
    检索本地地图并尝试在本地地图中找到与点的匹配项。
  • 步骤:
    • 更新局部关键帧mvpLocalKeyFrames和局部地图点mvpLocalMapPoints
    • 在局部地图中查找与当前帧匹配的MapPoints
    • 更新局部所有MapPoints后对位姿再次优化
      • 这儿区分了是否跟上帧还是整个局部地图优化
    • 评判是否跟踪成功
bool Tracking::TrackLocalMap()
{

    // We have an estimation of the camera pose and some map points tracked in the frame.
    // We retrieve the local map and try to find matches to points in the local map.
    mTrackedFr++;

#ifdef REGISTER_TIMES
    std::chrono::steady_clock::time_point time_StartLMUpdate = std::chrono::steady_clock::now();
#endif
	/// 步骤1:更新局部关键帧mvpLocalKeyFrames和局部地图点mvpLocalMapPoints
    UpdateLocalMap();
#ifdef REGISTER_TIMES
    std::chrono::steady_clock::time_point time_StartSearchLP = std::chrono::steady_clock::now();

    double timeUpdatedLM_ms = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(time_StartSearchLP - time_StartLMUpdate).count();
    vdUpdatedLM_ms.push_back(timeUpdatedLM_ms);
#endif
	// 步骤2:在局部地图中查找与当前帧匹配的MapPoints
    SearchLocalPoints();
#ifdef REGISTER_TIMES
    std::chrono::steady_clock::time_point time_StartPoseOpt = std::chrono::steady_clock::now();

    double timeSearchLP_ms = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(time_StartPoseOpt - time_StartSearchLP).count();
    vdSearchLP_ms.push_back(timeSearchLP_ms);
#endif

    // TOO check outliers before PO 在PO之前检查异常值
    // 只记录了 当前帧 地图点的个数 + 外点的个数 
    int aux1 = 0, aux2=0;
    for(int i=0; i<mCurrentFrame.N; i++)
        if( mCurrentFrame.mvpMapPoints[i])
        {
            aux1++;
            if(mCurrentFrame.mvbOutlier[i])
                aux2++;
        }

    int inliers;
    
    // 步骤3:更新局部所有MapPoints后对位姿再次优化
    if (!mpAtlas->isImuInitialized()) // IMU未初始化时,进行优化
        Optimizer::PoseOptimization(&mCurrentFrame);
    else
    {	// 最近重定位了,也进行优化
        if(mCurrentFrame.mnId<=mnLastRelocFrameId+mnFramesToResetIMU)
        {
            Verbose::PrintMess("TLM: PoseOptimization ", Verbose::VERBOSITY_DEBUG);
            Optimizer::PoseOptimization(&mCurrentFrame);
        }
        else
        {	 // 使用IMU,且最近未重定位,则用lastframe进行优化
            if(!mbMapUpdated)
            {
                Verbose::PrintMess("TLM: PoseInertialOptimizationLastFrame ", Verbose::VERBOSITY_DEBUG);
                inliers = Optimizer::PoseInertialOptimizationLastFrame(&mCurrentFrame); // , !mpLastKeyFrame->GetMap()->GetIniertialBA1());
            }
            else
            {
                Verbose::PrintMess("TLM: PoseInertialOptimizationLastKeyFrame ", Verbose::VERBOSITY_DEBUG);
                inliers = Optimizer::PoseInertialOptimizationLastKeyFrame(&mCurrentFrame); // , !mpLastKeyFrame->GetMap()->GetIniertialBA1());
            }
        }
    }
#ifdef REGISTER_TIMES
    std::chrono::steady_clock::time_point time_EndPoseOpt = std::chrono::steady_clock::now();

    double timePoseOpt_ms = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(time_EndPoseOpt - time_StartPoseOpt).count();
    vdPoseOpt_ms.push_back(timePoseOpt_ms);
#endif

    vnKeyFramesLM.push_back(mvpLocalKeyFrames.size());
    vnMapPointsLM.push_back(mvpLocalMapPoints.size());

    aux1 = 0, aux2 = 0;
    for(int i=0; i<mCurrentFrame.N; i++)
        if( mCurrentFrame.mvpMapPoints[i])
        {
            aux1++;
            if(mCurrentFrame.mvbOutlier[i])
                aux2++;
        }

    mnMatchesInliers = 0;

    // Update MapPoints Statistics 更新 MapPoints 统计信息
    for(int i=0; i<mCurrentFrame.N; i++)
    {
        if(mCurrentFrame.mvpMapPoints[i])
        {
            if(!mCurrentFrame.mvbOutlier[i])
            {
                mCurrentFrame.mvpMapPoints[i]->IncreaseFound();
                if(!mbOnlyTracking)
                {
                    if(mCurrentFrame.mvpMapPoints[i]->Observations()>0)
                        mnMatchesInliers++;
                }
                else
                    mnMatchesInliers++;
            }
            else if(mSensor==System::STEREO)
                mCurrentFrame.mvpMapPoints[i] = static_cast<MapPoint*>(NULL);
        }
    }

    // Decide if the tracking was succesful
    // More restrictive if there was a relocalization recently
    // 记录当前帧跟踪到的MapPoints,用于统计跟踪效果
    mpLocalMapper->mnMatchesInliers=mnMatchesInliers;

	// 步骤4:决定是否跟踪成功
    if(mCurrentFrame.mnId<mnLastRelocFrameId+mMaxFrames && mnMatchesInliers<50)
        return false;

	// 若最近几帧丢失,若有10个跟踪上,则认为跟踪成功
    if((mnMatchesInliers>10)&&(mState==RECENTLY_LOST))
        return true;
	
	// 使用IMU时,跟踪上15个就算跟踪成功,否则跟踪30个才算成功
    if (mSensor == System::IMU_MONOCULAR)
    {
        if(mnMatchesInliers<15)
        {
            return false;
        }
        else
            return true;
    }
    else if (mSensor == System::IMU_STEREO)
    {
        if(mnMatchesInliers<15)
        {
            return false;
        }
        else
            return true;
    }
    else
    {
        if(mnMatchesInliers<30)
            return false;
        else
            return true;
    }
}

相关函数-关键帧

Relocalization 重定位

步骤:

  1. 根据当前帧特征点的Bow映射,先从当前地图中找出候选关键帧 vpCandidateKFs
  2. 与候选值进行ORB 特征匹配(0.75),舍弃不相似的候选关键帧(<15个)
  3. 剩下的初始候选值 进行 PnP 求解.
  4. 对候选值5点迭代法进行评判,评判失败的丢弃
  5. 通过PoseOptimization对姿态进行优化求解,得到nGood 匹配对
    1. nGood <50,放宽查找,若两者大于50时,再次进行PoseOptimization
    2. 再次优化后匹配点在30-50时,再次放宽查找优化
  6. 若匹配点 > 50时,返回成功
bool Tracking::Relocalization()
{
    Verbose::PrintMess("Starting relocalization", Verbose::VERBOSITY_NORMAL);
    // Compute Bag of Words Vector
    /// 步骤1:计算当前帧特征点的Bow映射
    mCurrentFrame.ComputeBoW();

    // Relocalization is performed when tracking is lost
    // Track Lost: Query KeyFrame Database for keyframe candidates for relocalisation
    /// 步骤2:找到与当前帧相似的候选关键帧
    vector<KeyFrame*> vpCandidateKFs = mpKeyFrameDB->DetectRelocalizationCandidates(&mCurrentFrame, mpAtlas->GetCurrentMap());
	// 若无候选值,则 返回false
    if(vpCandidateKFs.empty()) {
        Verbose::PrintMess("There are not candidates", Verbose::VERBOSITY_NORMAL);
        return false;
    }

    const int nKFs = vpCandidateKFs.size();

    // We perform first an ORB matching with each candidate
    // If enough matches are found we setup a PnP solver
    ORBmatcher matcher(0.75,true);

	// 预设值用 PnP 求解
    vector<MLPnPsolver*> vpMLPnPsolvers;
    vpMLPnPsolvers.resize(nKFs);

    vector<vector<MapPoint*> > vvpMapPointMatches;
    vvpMapPointMatches.resize(nKFs);

    vector<bool> vbDiscarded;
    vbDiscarded.resize(nKFs);

    int nCandidates=0;

	/// 步骤3: 初始候选值筛选
    for(int i=0; i<nKFs; i++)
    {
        KeyFrame* pKF = vpCandidateKFs[i];
        if(pKF->isBad())
            vbDiscarded[i] = true;
        else
        {	// 当前帧与候选值通过BoW进行匹配
            int nmatches = matcher.SearchByBoW(pKF,mCurrentFrame,vvpMapPointMatches[i]);
            // 若 匹配个数小于15个,则 舍弃该候选值
            if(nmatches<15)
            {
                vbDiscarded[i] = true;
                continue;
            }
            else
            {	// 当前帧与候选值地图点进行PnP求解得到值
                MLPnPsolver* pSolver = new MLPnPsolver(mCurrentFrame,vvpMapPointMatches[i]);
                pSolver->SetRansacParameters(0.99,10,300,6,0.5,5.991);  //This solver needs at least 6 points
                vpMLPnPsolvers[i] = pSolver;
                nCandidates++;
            }
        }
    }

    // Alternatively perform some iterations of P4P RANSAC
    // Until we found a camera pose supported by enough inliers
    // 或者执行 P4P RANSAC 的一些迭代,直到我们找到一个由足够多的内点支持的相机姿势
    bool bMatch = false; 
    ORBmatcher matcher2(0.9,true);     /// 增加ORB匹配难度
	
	// 还有候选值,且未匹配成功
    while(nCandidates>0 && !bMatch)
    {
        for(int i=0; i<nKFs; i++)
        {
            if(vbDiscarded[i])
                continue;

            // Perform 5 Ransac Iterations 5点迭代法
            vector<bool> vbInliers;
            int nInliers;
            bool bNoMore;

            MLPnPsolver* pSolver = vpMLPnPsolvers[i];
            cv::Mat Tcw = pSolver->iterate(5,bNoMore,vbInliers,nInliers);

            // If Ransac reachs max. iterations discard keyframe
            if(bNoMore)  // 如果 Ransac 达到最大值。 迭代丢弃关键帧
            {
                vbDiscarded[i]=true;
                nCandidates--;
            }

            // If a Camera Pose is computed, optimize
            if(!Tcw.empty())
            {
                Tcw.copyTo(mCurrentFrame.mTcw);

                set<MapPoint*> sFound;

                const int np = vbInliers.size();

                for(int j=0; j<np; j++)
                {
                    if(vbInliers[j])
                    {
                        mCurrentFrame.mvpMapPoints[j]=vvpMapPointMatches[i][j];
                        sFound.insert(vvpMapPointMatches[i][j]);
                    }
                    else
                        mCurrentFrame.mvpMapPoints[j]=NULL;
                }
                
                // 步骤5:通过PoseOptimization对姿态进行优化求解
                int nGood = Optimizer::PoseOptimization(&mCurrentFrame);

                if(nGood<10)
                    continue;

                for(int io =0; io<mCurrentFrame.N; io++)
                    if(mCurrentFrame.mvbOutlier[io])
                        mCurrentFrame.mvpMapPoints[io]=static_cast<MapPoint*>(NULL);

                // If few inliers, search by projection in a coarse window and optimize again
                // 如果内点很少,则在粗窗口中通过投影搜索并再次优化
                if(nGood<50)
                {
                    int nadditional =matcher2.SearchByProjection(mCurrentFrame,vpCandidateKFs[i],sFound,10,100);

                    if(nadditional+nGood>=50)
                    {
                        nGood = Optimizer::PoseOptimization(&mCurrentFrame);

                        // If many inliers but still not enough, search by projection again in a narrower window
                        // the camera has been already optimized with many points
                        if(nGood>30 && nGood<50)
                        {
                            sFound.clear();
                            for(int ip =0; ip<mCurrentFrame.N; ip++)
                                if(mCurrentFrame.mvpMapPoints[ip])
                                    sFound.insert(mCurrentFrame.mvpMapPoints[ip]);
                            nadditional =matcher2.SearchByProjection(mCurrentFrame,vpCandidateKFs[i],sFound,3,64);

                            // Final optimization
                            if(nGood+nadditional>=50)
                            {
                                nGood = Optimizer::PoseOptimization(&mCurrentFrame);

                                for(int io =0; io<mCurrentFrame.N; io++)
                                    if(mCurrentFrame.mvbOutlier[io])
                                        mCurrentFrame.mvpMapPoints[io]=NULL;
                            }
                        }
                    }
                }


                // If the pose is supported by enough inliers stop ransacs and continue
                if(nGood>=50)
                {
                    bMatch = true;
                    break;
                }
            }
        }
    }

    if(!bMatch)
    {
        return false;
    }
    else
    {
        mnLastRelocFrameId = mCurrentFrame.mnId;
        cout << "Relocalized!!" << endl;
        return true;
    }

}

NeedNewKeyFrame 关键帧选取策略

  • 明确不需要插入新关键帧条件:

    • 在定位模式下,不需要新增关键帧
    • 如果局部地图被闭环检测使用,则不插入关键帧
    • 如果IMU 正在初始化,则不需要关键帧
    • 如果关键帧比较多 且 距离上一次重定位未超过1s,则不需插入关键帧
  • 得到参考关键帧跟踪到的MapPoints数量nRefMatches 和查询局部地图管理器是否繁忙 bLocalMappingIdle

  • 对于双目或RGBD摄像头,统计总的可以添加的MapPoints数量跟踪到地图中的MapPoints数量

  • 决策是否插入关键帧,条件:

    • 被跟踪地图点 小于 100 且 被创建大于 70 则需要插入
    • 条件 1a:插入最后一个关键帧与当前帧间隔大于 “MaxFrames”
    • 条件 1b:插入最后一个关键帧与当前帧间隔大于 “MinFrames”,且局部地图是空闲状态
    • 条件 1c:跟踪很脆弱,重复读0.25 或者 需要被插入
    • 条件2:跟踪要大于15 且 重复度小于阈值或需要被插入,重复度阈值比c1c要高
    • 条件3:时间阈值判断,使用imu时,与上一关键帧时间大于500ms
    • 条件4:单目IMU 且 重复:度在[15,75]或最近失位
  • 如果(((c1a||c1b||c1c) && c2)||c3 ||c4),且插入关键帧队列阻塞不能太多,则需插入关键帧,

    • tracking插入关键帧不是直接插入,而且先插入到mlNewKeyFrames中,然后localmapper再逐个pop出来插入到mspKeyFrames
///< 步骤1:确定不需要插入新关键帧
// 在定位模式下,不需要新增关键帧
if(mbOnlyTracking)
    return false;

// If Local Mapping is freezed by a Loop Closure do not insert keyframes
// 如果局部地图被闭环检测使用,则不插入关键帧
if(mpLocalMapper->isStopped() || mpLocalMapper->stopRequested())
{
    return false;
}

// Return false if IMU is initialazing
// 如果IMU 正在初始化,则不需要关键帧
if (mpLocalMapper->IsInitializing())
    return false;

const int nKFs = mpAtlas->KeyFramesInMap();
// Do not insert keyframes if not enough frames have passed from last relocalisation
// 如果关键帧比较多 且 距离上一次重定位未超过1s,则不需插入关键帧
if(mCurrentFrame.mnId<mnLastRelocFrameId+mMaxFrames && nKFs>mMaxFrames)
{
    return false;
}


///< 步骤2:得到参考关键帧跟踪到的MapPoints数量 和查询局部地图管理器是否繁忙
int nMinObs = 3;	
if(nKFs<=2)
    nMinObs=2;

// 参考关键帧中跟踪的 MapPoints,返回Matcher个数
int nRefMatches = mpReferenceKF->TrackedMapPoints(nMinObs);

// Local Mapping accept keyframes? 	局部地图接受关键帧 ??
bool bLocalMappingIdle = mpLocalMapper->AcceptKeyFrames();

// Check how many "close" points are being tracked and how many could be potentially created.
int nNonTrackedClose = 0;
int nTrackedClose= 0;

///< 步骤3:对于双目或RGBD摄像头,统计总的可以添加的MapPoints数量和跟踪到地图中的MapPoints数量
//  对于双目或RGBD摄像头,统计总的可以添加的MapPoints数量和跟踪到地图中的MapPoints数量
if(mSensor!=System::MONOCULAR && mSensor!=System::IMU_MONOCULAR)
{
    int N = (mCurrentFrame.Nleft == -1) ? mCurrentFrame.N : mCurrentFrame.Nleft;
    for(int i =0; i<N; i++)
    {
        if(mCurrentFrame.mvDepth[i]>0 && mCurrentFrame.mvDepth[i]<mThDepth)
        {
            if(mCurrentFrame.mvpMapPoints[i] && !mCurrentFrame.mvbOutlier[i])
                nTrackedClose++;
            else
                nNonTrackedClose++;

        }
    }
}


///< 步骤4:决策是否需要插入关键帧

// 被跟踪地图点 小于 100 且 被创建大于 70 则需要插入
bool bNeedToInsertClose;
bNeedToInsertClose = (nTrackedClose<100) && (nNonTrackedClose>70);

// Thresholds
float thRefRatio = 0.75f;
if(nKFs<2)
    thRefRatio = 0.4f;

if(mSensor==System::MONOCULAR)
    thRefRatio = 0.9f;

if(mpCamera2) thRefRatio = 0.75f;

if(mSensor==System::IMU_MONOCULAR)
{
    if(mnMatchesInliers>350) // Points tracked from the local map
        thRefRatio = 0.75f;
    else
        thRefRatio = 0.90f;
}

// Condition 1a: More than "MaxFrames" have passed from last keyframe insertion
// 条件 1a:插入最后一个关键帧与当前帧间隔大于 “MaxFrames”
const bool c1a = mCurrentFrame.mnId>=mnLastKeyFrameId+mMaxFrames;
// Condition 1b: More than "MinFrames" have passed and Local Mapping is idle
// 条件 1b:插入最后一个关键帧与当前帧间隔大于 “MaxFrames”,且局部地图是空闲状态
const bool c1b = ((mCurrentFrame.mnId>=mnLastKeyFrameId+mMinFrames) && bLocalMappingIdle);
//Condition 1c: tracking is weak
// 条件 1c:跟踪很脆弱,重复读0.25 或者 需要被插入
const bool c1c = mSensor!=System::MONOCULAR && mSensor!=System::IMU_MONOCULAR && mSensor!=System::IMU_STEREO && (mnMatchesInliers<nRefMatches*0.25 || bNeedToInsertClose) ;
// Condition 2: Few tracked points compared to reference keyframe. Lots of visual odometry compared to map matches.// 条件 2:与参考关键帧相比,跟踪点很少。 大量视觉里程计与地图匹配。
// 条件2:跟踪要大于15 且 重复度小于阈值或`需要被插入`,重复度阈值比c1c要高
const bool c2 = (((mnMatchesInliers<nRefMatches*thRefRatio || bNeedToInsertClose)) && mnMatchesInliers>15);

// 条件3:时间阈值判断,使用imu时,与上一关键帧时间大于500ms
bool c3 = false;
if(mpLastKeyFrame)
{	
    // 有IMU 时 500ms
    if (mSensor==System::IMU_MONOCULAR)
    {
        if ((mCurrentFrame.mTimeStamp-mpLastKeyFrame->mTimeStamp)>=0.5)
            c3 = true;
    }
    else if (mSensor==System::IMU_STEREO)
    {
        if ((mCurrentFrame.mTimeStamp-mpLastKeyFrame->mTimeStamp)>=0.5)
            c3 = true;
    }
}

bool c4 = false;
/// 条件4:单目IMU 且  重复:度在[15,75]或最近失位 
if ((((mnMatchesInliers<75) && (mnMatchesInliers>15)) || mState==RECENTLY_LOST) && ((mSensor == System::IMU_MONOCULAR))) // MODIFICATION_2, originally ((((mnMatchesInliers<75) && (mnMatchesInliers>15)) || mState==RECENTLY_LOST) && ((mSensor == System::IMU_MONOCULAR)))
    c4=true;
else
    c4=false;

if(((c1a||c1b||c1c) && c2)||c3 ||c4)
{
    // If the mapping accepts keyframes, insert keyframe.
    // Otherwise send a signal to interrupt BA
    if(bLocalMappingIdle)
    {
        return true;
    }
    else
    {
        mpLocalMapper->InterruptBA();
        if(mSensor!=System::MONOCULAR  && mSensor!=System::IMU_MONOCULAR)
        {	// 队列里不能阻塞太多关键帧
            // tracking插入关键帧不是直接插入,而且先插入到mlNewKeyFrames中,然后localmapper再逐个pop出来插入到mspKeyFrames
            if(mpLocalMapper->KeyframesInQueue()<3)
                return true;
            else
                return false;
        }
        else
            return false;
    }
}
else
    return false;
}

CreateNewKeyFrame 创建新的关键帧

  • 局部地图在初始化 或 设置局部地图stop失败,则 return
  • 当前帧构造关键帧:
    • 将当前帧构造成关键帧
    • 将当前关键帧设置为当前帧的参考关键帧
    • 若上一关键帧存在,则赋值:当前帧的上一关键帧+上一帧的下一关键帧
    • 从上一关键帧开始 新建 预积分对象,零偏+外参
  • 若传感器不是单目 或者 imu单目时,为当前帧生成新的MapPoints
    • 根据Tcw计算mRcw、mtcw和mRwc、mOw
    • 遍历特征点,得到当前帧深度小于阈值的特征点
    • 将特征点按照深度从小到大排序
    • 将距离比较近的点包装成MapPoints
  • 更新状态:
    • local_map 插入 关键帧
    • local_map SetNotStop(false)
    • 赋值上一关键帧数据
void Tracking::CreateNewKeyFrame()
{	
    // 步骤0:局部地图在初始化 或 设置局部地图stop失败,则 return
    if(mpLocalMapper->IsInitializing())
        return;
    if(!mpLocalMapper->SetNotStop(true))
        return;
	
    // 步骤1:将当前帧构造成关键帧
    KeyFrame* pKF = new KeyFrame(mCurrentFrame,mpAtlas->GetCurrentMap(),mpKeyFrameDB);

    if(mpAtlas->isImuInitialized()) // 确定使用IMU与否
        pKF->bImu = true;
	
    pKF->SetNewBias(mCurrentFrame.mImuBias); //设置bias
    // 步骤2:将当前关键帧设置为当前帧的参考关键帧
    mpReferenceKF = pKF;
    mCurrentFrame.mpReferenceKF = pKF;

    // 若上一关键帧存在,则赋值:当前帧的上一关键帧+上一帧的下一关键帧
    if(mpLastKeyFrame)
    {
        pKF->mPrevKF = mpLastKeyFrame;
        mpLastKeyFrame->mNextKF = pKF;
    }
    else
        Verbose::PrintMess("No last KF in KF creation!!", Verbose::VERBOSITY_NORMAL);

    // Reset preintegration from last KF (Create new object)
    // 从上一关键帧开始  新建 预积分对象,零偏+外参
    if (mSensor == System::IMU_MONOCULAR || mSensor == System::IMU_STEREO)
    {
        mpImuPreintegratedFromLastKF = new IMU::Preintegrated(pKF->GetImuBias(),pKF->mImuCalib);
    }
	
    // 步骤3:若传感器不是单目 或者 imu单目时,为当前帧生成新的MapPoints
    if(mSensor!=System::MONOCULAR && mSensor != System::IMU_MONOCULAR) // TODO check if incluide imu_stereo
    {	
        // 根据Tcw计算mRcw、mtcw和mRwc、mOw
        mCurrentFrame.UpdatePoseMatrices();
        // cout << "create new MPs" << endl;
        // We sort points by the measured depth by the stereo/RGBD sensor.
        // We create all those MapPoints whose depth < mThDepth.
        // If there are less than 100 close points we create the 100 closest.
        int maxPoint = 100;
        if(mSensor == System::IMU_STEREO)
            maxPoint = 100;
		
        // 步骤3.1:得到当前帧深度小于阈值的特征点
        vector<pair<float,int> > vDepthIdx;
        int N = (mCurrentFrame.Nleft != -1) ? mCurrentFrame.Nleft : mCurrentFrame.N;
        vDepthIdx.reserve(mCurrentFrame.N);
        for(int i=0; i<N; i++)
        {
            float z = mCurrentFrame.mvDepth[i];
            if(z>0)
            {
                vDepthIdx.push_back(make_pair(z,i));
            }
        }

        
        if(!vDepthIdx.empty())
        {	
            // 步骤3.2:按照深度从小到大排序
            sort(vDepthIdx.begin(),vDepthIdx.end());

            // 步骤3.3:将距离比较近的点包装成MapPoints
            int nPoints = 0;
            for(size_t j=0; j<vDepthIdx.size();j++)
            {
                int i = vDepthIdx[j].second;

                bool bCreateNew = false;

                MapPoint* pMP = mCurrentFrame.mvpMapPoints[i];
                if(!pMP)
                    bCreateNew = true;
                else if(pMP->Observations()<1)
                {
                    bCreateNew = true;
                    mCurrentFrame.mvpMapPoints[i] = static_cast<MapPoint*>(NULL);
                }

                if(bCreateNew)
                {
                    cv::Mat x3D;

                    if(mCurrentFrame.Nleft == -1){
                        x3D = mCurrentFrame.UnprojectStereo(i);
                    }
                    else{
                        x3D = mCurrentFrame.UnprojectStereoFishEye(i);
                    }

                    MapPoint* pNewMP = new MapPoint(x3D,pKF,mpAtlas->GetCurrentMap());
                    pNewMP->AddObservation(pKF,i);

                    //Check if it is a stereo observation in order to not
                    //duplicate mappoints
                    if(mCurrentFrame.Nleft != -1 && mCurrentFrame.mvLeftToRightMatch[i] >= 0){
                        mCurrentFrame.mvpMapPoints[mCurrentFrame.Nleft + mCurrentFrame.mvLeftToRightMatch[i]]=pNewMP;
                        pNewMP->AddObservation(pKF,mCurrentFrame.Nleft + mCurrentFrame.mvLeftToRightMatch[i]);
                        pKF->AddMapPoint(pNewMP,mCurrentFrame.Nleft + mCurrentFrame.mvLeftToRightMatch[i]);
                    }
					// 这些添加属性的操作是每次创建MapPoint后都要做的
                    pKF->AddMapPoint(pNewMP,i);
                    pNewMP->ComputeDistinctiveDescriptors();
                    pNewMP->UpdateNormalAndDepth();
                    mpAtlas->AddMapPoint(pNewMP);

                    mCurrentFrame.mvpMapPoints[i]=pNewMP;
                    nPoints++;
                }
                else
                {
                    nPoints++;
                }
                
                // 这里决定了双目和rgbd摄像头时地图点云的稠密程度
                // 但是仅仅为了让地图稠密直接改这些不太好,
                // 因为这些MapPoints会参与之后整个slam过程
                if(vDepthIdx[j].first>mThDepth && nPoints>maxPoint)
                {
                    break;
                }
            }

            Verbose::PrintMess("new mps for stereo KF: " + to_string(nPoints), Verbose::VERBOSITY_NORMAL);

        }
    }

	
    mpLocalMapper->InsertKeyFrame(pKF);

    mpLocalMapper->SetNotStop(false);

    mnLastKeyFrameId = mCurrentFrame.mnId;
    mpLastKeyFrame = pKF;
    //cout  << "end creating new KF" << endl;
}
  • 7
    点赞
  • 47
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值