(1)VINS-Fusion的代码前端 (笔记)

VINS-Fusion的前端我们可以将其主要分为三个线程去进行解读,分别为:
主线程main():主要订阅图像、imu数据等。
特征匹配线程sync_process():主要进行光流等操作。
测量线程processmeasurements():主要进行IMU预积分,特征点处理,VIO初始化等,这个线程包括了VIO的后端优化

相应地按照顺序从主程序看起:

主线程

int main(int argc, char **argv)
{
	......
	......
    readParameters(config_file);
    estimator.setParameter();//这一步在估计器中设置完参数后,开启processMeasurements()线程

#ifdef EIGEN_DONT_PARALLELIZE
    ROS_DEBUG("EIGEN_DONT_PARALLELIZE");
#endif

    ROS_WARN("waiting for image and imu...");

    registerPub(n);

    ros::Subscriber sub_imu;
    //1 、若使用IMU,订阅imu_callback话题
    if(USE_IMU)
    {
        sub_imu = n.subscribe(IMU_TOPIC, 2000, imu_callback, ros::TransportHints().tcpNoDelay());
    }
    //2、订阅特征点处理话题  
    ros::Subscriber sub_feature = n.subscribe("/feature_tracker/feature", 2000, feature_callback);
    //3、订阅左相机信息话题
    ros::Subscriber sub_img0 = n.subscribe(IMAGE0_TOPIC, 100, img0_callback);
    //4、定义右相机订阅方的变量
    ros::Subscriber sub_img1;

    //如果是双目模式
    if(STEREO)
    {
        //订阅右相机信息话题
        sub_img1 = n.subscribe(IMAGE1_TOPIC, 100, img1_callback);
    }

    ros::Subscriber sub_restart = n.subscribe("/vins_restart", 100, restart_callback);
    //接受模式切换
    ros::Subscriber sub_imu_switch = n.subscribe("/vins_imu_switch", 100, imu_switch_callback);
    ros::Subscriber sub_cam_switch = n.subscribe("/vins_cam_switch", 100, cam_switch_callback);

    创建sync_thread线程,指向sync_process,进行特征匹配了
    std::thread sync_thread{sync_process};
    ros::spin();

    return 0;
}

特征匹配


void sync_process()
{
    while(1)
    {
        //双目情况下,提取具有相同时间戳的图像
        if(STEREO)
        {
            cv::Mat image0, image1;
            std_msgs::Header header;
            double time = 0;
            m_buf.lock();
            //如果是双目的话做一个简单的时间同步
            if (!img0_buf.empty() && !img1_buf.empty())
            {

                //front:返回当前容器中起始元素的引用,返回的是reference类型
                double time0 = img0_buf.front()->header.stamp.toSec();
                double time1 = img1_buf.front()->header.stamp.toSec();

                //判断双目图像的时间差,如果提取两幅图像的时间间隔小于0.003s,使用getImageFromMsg将其输入到image0和image1变量之中。之后estimator.inputImage
                // 0.003s sync tolerance
                if(time0 < time1 - 0.003)
                {
                    //pop()函数:用于移除列表中的一个元素(默认最后一个元素),并且返回该元素的值
                    //定义中为:void,所以不需要返回对象值
                    //定义中为:c.pop_front();则直接移除输入进来的、且不符合时间要求的帧
                    img0_buf.pop();
                    printf("throw img0\n");
                }
                else if(time0 > time1 + 0.003)
                {
                    img1_buf.pop();
                    printf("throw img1\n");
                }
                //时间戳同步完后,对图片进行处理
                else
                {
                    time = img0_buf.front()->header.stamp.toSec();
                    header = img0_buf.front()->header;
                    //getImageFromMsg()将ros消息类型的图像通过cv_bridge转换成opencv下Mat类型的图像,这样就可以直接调用opencv的LK光流api了
                    image0 = getImageFromMsg(img0_buf.front());
                    img0_buf.pop();
                    image1 = getImageFromMsg(img1_buf.front());
                    img1_buf.pop();
                    //printf("find img0 and img1\n");
                }
            }
            m_buf.unlock();

            if(!image0.empty())
                //调用inputImage接口,准备光流
                estimator.inputImage(time, image0, image1);
        }

进入到inputImage()函数中

// 给Estimator输入图像
// 其实是给featureTracker.trackImage输入图像,之后返回图像特征featureFrame。填充featureBuf
// 之后执行processMeasurements线程
void Estimator::inputImage(double t, const cv::Mat &_img, const cv::Mat &_img1)
{
    inputImageCnt++;
    map<int, vector<pair<int, Eigen::Matrix<double, 7, 1>>>> featureFrame;//容器中存储的是什么后面会介绍
    TicToc featureTrackerTime;

    //然后光流追踪图像上的特征:trackImage,赋给featureFrame
    if(_img1.empty())//单目
        featureFrame = featureTracker.trackImage(t, _img);
    else//双目
        featureFrame = featureTracker.trackImage(t, _img, _img1);
    //printf("featureTracker time: %f\n", featureTrackerTime.toc());

    //展示图像轨迹
    if (SHOW_TRACK)
    {
        //getTrackImage函数对特征跟踪的图像进行一些处理得到imgTrack,并发布图片imgTrack
        cv::Mat imgTrack = featureTracker.getTrackImage();
        pubTrackImage(imgTrack, t);
    }
    //如果是多线程模式,做一下降采样
    if(MULTIPLE_THREAD)  
    {     
        if(inputImageCnt % 2 == 0)
        {
            mBuf.lock();
            featureBuf.push(make_pair(t, featureFrame));
            mBuf.unlock();
        }
    }
    else
    {
        mBuf.lock();
        featureBuf.push(make_pair(t, featureFrame));
        mBuf.unlock();
        TicToc processTime;
        processMeasurements();
        printf("process time: %f\n", processTime.toc());
    }
    
}
map<int, vector<pair<int, Eigen::Matrix<double, 7, 1>>>> FeatureTracker::trackImage(double _cur_time, const cv::Mat &_img, const cv::Mat &_img1)
{
    TicToc t_r;
    cur_time = _cur_time;
    cur_img = _img;//左图
    row = cur_img.rows;
    col = cur_img.cols;
    cv::Mat rightImg = _img1;//右图

    cur_pts.clear();//cur_pts容器存放的是左目相机采集的当前帧图像,光流前先清空
    //和上一帧做光流追踪
    if (prev_pts.size() > 0) //只有当第二帧进来时才会进行这步操作
    {
        TicToc t_o;
        vector<uchar> status;
        vector<float> err;


        //是否有一些预测的先验值,因为光流追踪本质上是一个高度非线性的优化问题,如果有更好的初值,就可以使用更小的金字塔层数来实现高精度追踪结果
        if(hasPrediction)
        {
            cur_pts = predict_pts;
            //使用opencv的稀疏光流法,使用2层金字塔
            cv::calcOpticalFlowPyrLK(prev_img, cur_img, prev_pts, cur_pts, status, err, cv::Size(21, 21), 1, 
            cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 0.01), cv::OPTFLOW_USE_INITIAL_FLOW);
            
            int succ_num = 0;
            for (size_t i = 0; i < status.size(); i++)
            {
                if (status[i])
                    succ_num++;
            }
            //如果配对成功的特征数少于10,调整阈值,使用四层金字塔再找一次
            if (succ_num < 10)
               cv::calcOpticalFlowPyrLK(prev_img, cur_img, prev_pts, cur_pts, status, err, cv::Size(21, 21), 3);
        }
        else//如果没有好的先验值,就和之前的一样,使用四层金字塔追踪
            cv::calcOpticalFlowPyrLK(prev_img, cur_img, prev_pts, cur_pts, status, err, cv::Size(21, 21), 3);

        //进行反向的光流检查
        if(FLOW_BACK)
        {
            vector<uchar> reverse_status;
            vector<cv::Point2f> reverse_pts = prev_pts;
            //交换两张图重新光流
            cv::calcOpticalFlowPyrLK(cur_img, prev_img, cur_pts, reverse_pts, reverse_status, err, cv::Size(21, 21), 1, 
            cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 0.01), cv::OPTFLOW_USE_INITIAL_FLOW);
            //cv::calcOpticalFlowPyrLK(cur_img, prev_img, cur_pts, reverse_pts, reverse_status, err, cv::Size(21, 21), 3); 
            //判断状态点是否超出边界
            for(size_t i = 0; i < status.size(); i++)
            {
                //当正向和反向都跟踪成功,并且反向跟踪回来的点与前一帧的点像素距离小于0.5像素时,跟踪成功(提高跟踪精度)
                if(status[i] && reverse_status[i] && distance(prev_pts[i], reverse_pts[i]) <= 0.5)
                {
                    status[i] = 1;
                }
                else
                    status[i] = 0;
            }
        }
        
        for (int i = 0; i < int(cur_pts.size()); i++)
            if (status[i] && !inBorder(cur_pts[i]))
                status[i] = 0;           
        //根据光流检查的标志位,来把不好的点去掉
        reduceVector(prev_pts, status);
        reduceVector(cur_pts, status);
        reduceVector(ids, status);
        reduceVector(track_cnt, status);
        ROS_DEBUG("temporal optical flow costs: %fms", t_o.toc());
        //printf("track cnt %d\n", (int)ids.size());
    }

    for (auto &n : track_cnt)
        n++;
    //提取新的特征点
    if (1)
    {
        //rejectWithF();
        ROS_DEBUG("set mask begins");
        TicToc t_m;
        //MASK
        setMask();
        ROS_DEBUG("set mask costs %fms", t_m.toc());

        ROS_DEBUG("detect feature begins");
        TicToc t_t;
        int n_max_cnt = MAX_CNT - static_cast<int>(cur_pts.size());
        if (n_max_cnt > 0)
        {
            if(mask.empty())
                cout << "mask is empty " << endl;
            if (mask.type() != CV_8UC1)
                cout << "mask type wrong " << endl;
            cv::goodFeaturesToTrack(cur_img, n_pts, MAX_CNT - cur_pts.size(), 0.01, MIN_DIST, mask);
        }
        else
            n_pts.clear();
        ROS_DEBUG("detect feature costs: %f ms", t_t.toc());

        for (auto &p : n_pts)
        {
            cur_pts.push_back(p);
            ids.push_back(n_id++);
            track_cnt.push_back(1);
        }
        //printf("feature cnt after add %d\n", (int)ids.size());
    }

    //把提取来的新的点使用camera0的参数,去畸变到归一化相机平面
    cur_un_pts = undistortedPts(cur_pts, m_camera[0]);
    //计算当前特征点速度,做延时估计
    pts_velocity = ptsVelocity(ids, cur_un_pts, cur_un_pts_map, prev_un_pts_map);

    
    //针对双目的处理方法
    if(!_img1.empty() && stereo_cam)
    {
        //状态位的清零
        ids_right.clear();
        cur_right_pts.clear();
        cur_un_right_pts.clear();
        right_pts_velocity.clear();
        cur_un_right_pts_map.clear();
        if(!cur_pts.empty())
        {
            //printf("stereo image; track feature on right image\n");
            vector<cv::Point2f> reverseLeftPts;
            vector<uchar> status, statusRightLeft;
            vector<float> err;
            // cur left ---- cur right
            //左目相机和右目相机做光流
            cv::calcOpticalFlowPyrLK(cur_img, rightImg, cur_pts, cur_right_pts, status, err, cv::Size(21, 21), 3);
            // reverse check cur right ---- cur left
            //双向光流
            if(FLOW_BACK)
            {
                cv::calcOpticalFlowPyrLK(rightImg, cur_img, cur_right_pts, reverseLeftPts, statusRightLeft, err, cv::Size(21, 21), 3);
                for(size_t i = 0; i < status.size(); i++)
                {
                    if(status[i] && statusRightLeft[i] && inBorder(cur_right_pts[i]) && distance(cur_pts[i], reverseLeftPts[i]) <= 0.5)
                        status[i] = 1;
                    else
                        status[i] = 0;
                }
            }

            ids_right = ids;
            reduceVector(cur_right_pts, status);
            reduceVector(ids_right, status);
            // only keep left-right pts
            /*
            reduceVector(cur_pts, status);
            reduceVector(ids, status);
            reduceVector(track_cnt, status);
            reduceVector(cur_un_pts, status);
            reduceVector(pts_velocity, status);
            */
            //右目去畸变以及计算速度
            cur_un_right_pts = undistortedPts(cur_right_pts, m_camera[1]);
            right_pts_velocity = ptsVelocity(ids_right, cur_un_right_pts, cur_un_right_pts_map, prev_un_right_pts_map);
        }
        prev_un_right_pts_map = cur_un_right_pts_map;
    }
    //画出追踪到的点
    if(SHOW_TRACK)
        drawTrack(cur_img, rightImg, ids, cur_pts, cur_right_pts, prevLeftPtsMap);
    //把当前帧的状态量作为上一帧的状态量为后续做准备
    prev_img = cur_img;
    prev_pts = cur_pts;
    prev_un_pts = cur_un_pts;
    prev_un_pts_map = cur_un_pts_map;
    prev_time = cur_time;
    hasPrediction = false;

    prevLeftPtsMap.clear();
    for(size_t i = 0; i < cur_pts.size(); i++)
        prevLeftPtsMap[ids[i]] = cur_pts[i];
    //生成图像的特征参数
    /*
    map容器:map<key,value> :key是索引,value是参数:存放着左目右目的特征存放在vector中
    pair容器:pair<first,second> :  first代表左/右目相机的状态位  second代表特征点对应其相机的性质:id,坐标,速度
    */
    map<int, vector<pair<int, Eigen::Matrix<double, 7, 1>>>> featureFrame;
    for (size_t i = 0; i < ids.size(); i++)
    {
        //图像帧的id ,应该对应第一个int
        int feature_id = ids[i];
        //归一化的相机坐标
        double x, y ,z;
        x = cur_un_pts[i].x;
        y = cur_un_pts[i].y;
        z = 1;
        //图像坐标
        double p_u, p_v;
        p_u = cur_pts[i].x;
        p_v = cur_pts[i].y;
        //左目的camera_id=0
        int camera_id = 0;
        //保留下来特征点的速度信息
        double velocity_x, velocity_y;
        velocity_x = pts_velocity[i].x;
        velocity_y = pts_velocity[i].y;

        Eigen::Matrix<double, 7, 1> xyz_uv_velocity;
        xyz_uv_velocity << x, y, z, p_u, p_v, velocity_x, velocity_y;
        featureFrame[feature_id].emplace_back(camera_id,  xyz_uv_velocity);
    }
    //如果是双目把右目的也存下来
    if (!_img1.empty() && stereo_cam)
    {
        for (size_t i = 0; i < ids_right.size(); i++)
        {
            int feature_id = ids_right[i];
            double x, y ,z;
            x = cur_un_right_pts[i].x;
            y = cur_un_right_pts[i].y;
            z = 1;
            double p_u, p_v;
            p_u = cur_right_pts[i].x;
            p_v = cur_right_pts[i].y;
            //右目的camera_id=1
            int camera_id = 1;
            double velocity_x, velocity_y;
            velocity_x = right_pts_velocity[i].x;
            velocity_y = right_pts_velocity[i].y;

            Eigen::Matrix<double, 7, 1> xyz_uv_velocity;
            xyz_uv_velocity << x, y, z, p_u, p_v, velocity_x, velocity_y;
            //只有右目也有对应特征点的才会emplace_back
            featureFrame[feature_id].emplace_back(camera_id,  xyz_uv_velocity);
        }
    }

这里我们就可以知道了Eigen::Matrix<double, 7, 1>容器存放的是什么了

 map<int, vector<pair<int, Eigen::Matrix<double, 7, 1>>>> featureFrame;
 //图像帧的id :对应第一个int
 //左目的相机id:  0代表左目 1代表右目,对应第二个int
 Matrix<double, 7, 1> xyz_uv_velocity;
 xyz_uv_velocity << x, y, z, p_u, p_v, velocity_x, velocity_y;
 //归一化的相机坐标(x,y,z=1)
 //图像的像素坐标(u,v)
 //特征点在x,y方向上的速度

测量线程

//处理全部量测的线程,IMU的预积分,特征点的处理等等都在这里进行
void Estimator::processMeasurements()
{
    while (1)
    {
        //printf("process measurments\n");
        /*
        double:时间戳
        int:特征点id
        vector<pair<int,Eigen::Matrix<double,7,1>>>
                    左右目相机id 特征点性质
        */
        pair<double, map<int, vector<pair<int, Eigen::Matrix<double, 7, 1> > > > > feature;
        vector<pair<double, Eigen::Vector3d>> accVector, gyrVector;
        if(!featureBuf.empty())
        {
            //取出一组光流追踪的结果
            feature = featureBuf.front();
            curTime = feature.first + td;
            while(1)//这个线程会一直进行
            {
                //如果使用IMU就提示wait for imu ...,等待imu数据进来
                if ((!USE_IMU  || IMUAvailable(feature.first + td)))
                    break;
                else
                {
                    printf("wait for imu ... \n");
                    if (! MULTIPLE_THREAD)
                        return;
                    std::chrono::milliseconds dura(5);
                    std::this_thread::sleep_for(dura);
                }
            }
            mBuf.lock();
            if(USE_IMU)
                //得到两帧之间的imu数据
                getIMUInterval(prevTime, curTime, accVector, gyrVector);

            featureBuf.pop();
            mBuf.unlock();

            if(USE_IMU)
            {
                if(!initFirstPoseFlag)
                    //如果是第一帧,就根据imu重力方向估计出大概的姿态,其中yaw置零就可以了
                    initFirstIMUPose(accVector);
                for(size_t i = 0; i < accVector.size(); i++)
                {
                    double dt;
                    if(i == 0)
                        dt = accVector[i].first - prevTime;
                    else if (i == accVector.size() - 1)
                        dt = curTime - accVector[i - 1].first;
                    else
                        dt = accVector[i].first - accVector[i - 1].first;
                    //预积分处理
                    processIMU(accVector[i].first, dt, accVector[i].second, gyrVector[i].second);
                }
            }
            mProcess.lock();
            //图像帧处理以及里程计的初始化
            processImage(feature.second, feature.first);
            prevTime = curTime;

            printStatistics(*this, 0);

            std_msgs::Header header;
            header.frame_id = "world";
            header.stamp = ros::Time(feature.first);

            pubOdometry(*this, header);
            pubKeyPoses(*this, header);
            pubCameraPose(*this, header);
            pubPointCloud(*this, header);
            pubKeyframe(*this);
            pubTF(*this, header);
            mProcess.unlock();
        }

        if (! MULTIPLE_THREAD)
            break;

        std::chrono::milliseconds dura(2);
        std::this_thread::sleep_for(dura);
    }
}

  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

徽州SLAM李

如果觉得不错,打赏一下哦,嘻

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值