Vins-Mono 论文 && Coding 一 7(2). pose_graph: 回环检测 && 重定位

一、处理关键帧流程

void PoseGraph::addKeyFrame(KeyFrame* cur_kf, bool flag_detect_loop)

1. shift to base frame

将当前帧 pose 转换到 drift-free 世界坐标系下

2. 回环检测

当前关键帧和历史关键帧回环检测

3. 重定位

求解当前关键帧和回环帧的相对 pose,并将重定位结果加入 vio 非线性优化

更新 shift

4. 加入 4dof pose-graph 和关键帧 list

PoseGraph::optimize4DoF 线程处理函数持续优化

二、shift to base frame

1. 步骤

将当前序列的 pose 变换至 初始序列世界坐标系下

2. 代码实现

cur_kf->getVioPose(vio_P_cur, vio_R_cur);
vio_P_cur = w_r_vio * vio_P_cur + w_t_vio;
vio_R_cur = w_r_vio *  vio_R_cur;
cur_kf->updateVioPose(vio_P_cur, vio_R_cur);
cur_kf->index = global_index;
global_index++;

三、回环检测

1. 实现

调用 DBoW2 API

用重新提取的角点的特征描述子进行匹配,返回和当前帧存在闭环的帧 idx

int PoseGraph::detectLoop(KeyFrame* keyframe, int frame_index)
{
    //first query; then add this frame into database!
    QueryResults ret;
    db.query(keyframe->brief_descriptors, ret, 4, frame_index - 50);
    db.add(keyframe->brief_descriptors);

    // ret[0] is the nearest neighbour's score. threshold change with neighour 
    // 优先选择远的帧,且 score > 0.015
    if (frame_index > 50)  {
        int min_index = -1;
        for (unsigned int i = 0; i < ret.size(); i++)  {
            if (min_index == -1 || (ret[i].Id < min_index && ret[i].Score > 0.015))
                min_index = ret[i].Id;
        }
        return min_index;
    }
    else
        return -1;
}

2. 回环检测可视化

 

四、重定位

bool KeyFrame::findConnection(KeyFrame* old_kf)

1. brief descriptors 匹配

当前帧: window_brief_descriptors (feature-tracker node 提取的角点对应的 feature descriptors)

参考帧: brief_descriptors (pose-graph node 提取的所有角点对应的 feature descriptors)

searchByBRIEFDes(matched_2d_old, matched_2d_old_norm, status, old_kf->brief_descriptors, old_kf->keypoints, old_kf->keypoints_norm);
void KeyFrame::searchByBRIEFDes(std::vector<cv::Point2f> &matched_2d_old,
								std::vector<cv::Point2f> &matched_2d_old_norm,
                                std::vector<uchar> &status,
                                const std::vector<BRIEF::bitset> &descriptors_old,
                                const std::vector<cv::KeyPoint> &keypoints_old,
                                const std::vector<cv::KeyPoint> &keypoints_old_norm) {
    for(int i = 0; i < (int)window_brief_descriptors.size(); i++)
    {
        cv::Point2f pt(0.f, 0.f);
        cv::Point2f pt_norm(0.f, 0.f);
        // 最近邻匹配, descriptor HammingDis
        if (searchInAera(window_brief_descriptors[i], descriptors_old, keypoints_old, keypoints_old_norm, pt, pt_norm))
          status.push_back(1);
        else
          status.push_back(0);
        matched_2d_old.push_back(pt);
        matched_2d_old_norm.push_back(pt_norm);
    }

}

2. PnP 求解

matched_2d_old_norm: old keyframe 角点像素坐标

matched_3d: 当前帧特征点在 vio 坐标系下位置

PnP_T_old, PnP_R_old: old keyframe 在 vio 对应世界坐标系下的 pose

void KeyFrame::PnPRANSAC(const vector<cv::Point2f> &matched_2d_old_norm,
                         const std::vector<cv::Point3f> &matched_3d,
                         std::vector<uchar> &status,
                         Eigen::Vector3d &PnP_T_old, Eigen::Matrix3d &PnP_R_old) {
    cv::Mat r, rvec, t, D, tmp_r;
    cv::Mat K = (cv::Mat_<double>(3, 3) << 1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0);
    Matrix3d R_inital;
    Vector3d P_inital;
   
    // 考虑外参, imu->camera 坐标系
    Matrix3d R_w_c = origin_vio_R * qic;
    Vector3d T_w_c = origin_vio_T + origin_vio_R * tic;

    // vio 结果初始值
    R_inital = R_w_c.inverse();
    P_inital = -(R_inital * T_w_c);
    cv::eigen2cv(R_inital, tmp_r);
    cv::Rodrigues(tmp_r, rvec);
    cv::eigen2cv(P_inital, t);

    cv::Mat inliers;
    
    // 调用 opencv API
    solvePnPRansac(matched_3d, matched_2d_old_norm, K, D, rvec, t, true, 100, 10.0 / 460.0, 100, inliers);

    for (int i = 0; i < (int)matched_2d_old_norm.size(); i++)
        status.push_back(0);
    for( int i = 0; i < inliers.rows; i++) {
        int n = inliers.at<int>(i);
        status[n] = 1;
    }

    cv::Rodrigues(rvec, r);
    Matrix3d R_pnp, R_w_c_old;
    cv::cv2eigen(r, R_pnp);
    R_w_c_old = R_pnp.transpose();
    Vector3d T_pnp, T_w_c_old;
    cv::cv2eigen(t, T_pnp);
    T_w_c_old = R_w_c_old * (-T_pnp);

    // camera 至 imu 坐标系
    PnP_R_old = R_w_c_old * qic.transpose();
    PnP_T_old = T_w_c_old - PnP_R_old * tic;
}

3. 求解相对变换

(1). 求解当前帧和历史回环帧之间的相对变换

T_{I_jI_i},其中 F_i 是当前关键帧,F_j 是历史关键帧 

relative_t = PnP_R_old.transpose() * (origin_vio_T - PnP_T_old);
relative_q = PnP_R_old.transpose() * origin_vio_R;
relative_yaw = Utility::normalizeAngle(Utility::R2ypr(origin_vio_R).x() - Utility::R2ypr(PnP_R_old).x());

(2). 更新回环信息

if (abs(relative_yaw) < 30.0 && relative_t.norm() < 20.0) {
    // 回环信息更新
    has_loop = true;
    loop_index = old_kf->index;
    loop_info << relative_t.x(), relative_t.y(), relative_t.z(), relative_q.w(), relative_q.x(), relative_q.y(), relative_q.z(), relative_yaw;
	if(FAST_RELOCALIZATION) {
    ...... 发布 FAST_RELOCALIZATION msg, 重定位
    }		    
	return true;
}

(3). 发布参考关键在 drift-free 世界坐标系下的 pose

if(FAST_RELOCALIZATION) {
    sensor_msgs::PointCloud msg_match_points;
    msg_match_points.header.stamp = ros::Time(time_stamp);
    for (int i = 0; i < (int)matched_2d_old_norm.size(); i++) {
        geometry_msgs::Point32 p;
	    p.x = matched_2d_old_norm[i].x;
	    p.y = matched_2d_old_norm[i].y;
		p.z = matched_id[i];
		msg_match_points.points.push_back(p);
    }
	Eigen::Vector3d T = old_kf->T_w_i; // drift-free pose
	Eigen::Matrix3d R = old_kf->R_w_i;
	Quaterniond Q(R);
	sensor_msgs::ChannelFloat32 t_q_index;
    t_q_index.values.push_back(T.x());
    t_q_index.values.push_back(T.y());
    t_q_index.values.push_back(T.z());
    t_q_index.values.push_back(Q.w());
    t_q_index.values.push_back(Q.x());
    t_q_index.values.push_back(Q.y());
    t_q_index.values.push_back(Q.z());
    t_q_index.values.push_back(index);
    msg_match_points.channels.push_back(t_q_index);
    pub_match_points.publish(msg_match_points);
}

vins_esimator 订阅并处理,并加入非线性优化

(4). 依据重定位结果计算 shift

Vector3d w_P_old, w_P_cur, vio_P_cur;
Matrix3d w_R_old, w_R_cur, vio_R_cur;

old_kf->getVioPose(w_P_old, w_R_old);  // old 关键帧在 drift-free world 中的 pose
cur_kf->getVioPose(vio_P_cur, vio_R_cur);  // 当前关键帧在 vio world 中的 pose

Vector3d relative_t;
Quaterniond relative_q;
relative_t = cur_kf->getLoopRelativeT();
relative_q = (cur_kf->getLoopRelativeQ()).toRotationMatrix();
w_P_cur = w_R_old * relative_t + w_P_old;
w_R_cur = w_R_old * relative_q;  // 当前帧在 drift-free wolrd 中的 pose

// 计算 shift
double shift_yaw;
Matrix3d shift_r;
Vector3d shift_t; 
shift_yaw = Utility::R2ypr(w_R_cur).x() - Utility::R2ypr(vio_R_cur).x();
shift_r = Utility::ypr2R(Vector3d(shift_yaw, 0, 0));
shift_t = w_P_cur - w_R_cur * vio_R_cur.transpose() * vio_P_cur; 

(5). 当前关键帧应用 drift

Vector3d P;
Matrix3d R;
cur_kf->getVioPose(P, R);
P = r_drift * P + t_drift;
R = r_drift * R;
cur_kf->updatePose(P, R);

drift 在 pose-graph 中更新

五. 把关键帧加入位姿图和关键帧 list

m_optimize_buf.lock();
optimize_buf.push(cur_kf->index);
m_optimize_buf.unlock();

参考:Vins-Mono 论文 && Coding 一 7(3). pose_graph: 4DOF pose_graph

m_keyframelist.lock();
keyframelist.push_back(cur_kf);
m_keyframelist.unlock();
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值