R2live代码estimator_node
-
Step1:同步image信息和imu信息,打包为measurements变量;
measurements = getMeasurements();
-
Step2:遍历每一个measurements变量:
for (auto &measurement : measurements)
Step2.1:使用lidar和cam各自buf的时间戳来判断是否有可用的lidar帧和cam帧;
while (g_camera_lidar_queue.if_camera_can_process() == false)
Step2.2:同步lio和vio,并将lio的bias、重力向量、速度传递给vins系统;
sync_lio_to_vio(estimator);
Step2.3:VIO预积分
estimator.processIMU(dt, Vector3d(dx, dy, dz), Vector3d(rx, ry, rz));
Step2.4:VINS重定位
estimator.setReloFrame(frame_stamp, frame_index, match_points, relo_t, relo_r);
Step2.5:lio和vio根据时间来判断两个系统的状态是否差的太大了,如果太大了 ,则将滑窗中lio的位姿和时间戳time强制赋值;
double start_dt = g_lio_state.last_update_time - imu_queue.front()->header.stamp.toSec(); // ^ lio上一帧的时间 - imu的起始时间 double end_dt = cam_update_tim - imu_queue.back()->header.stamp.toSec(); // ^ cam的当前时间 - imu的终止时间
if ((start_dt > -0.02) &&(fabs(end_dt) < 0.02)) { g_lio_state = state_aft_integration; g_lio_state.last_update_time = cam_update_tim; }
Step2.6:根据时间来判断Lidar和Cam是否能同步上,如果同步上了,才会进行进行ESKF计算卡尔曼增益;
if (esikf_update_valid && (estimator.solver_flag == Estimator::SolverFlag::NON_LINEAR) && (g_lio_state.last_update_time - g_camera_lidar_queue.m_visual_init_time > g_camera_lidar_queue.m_lidar_drag_cam_tim) )
Step2.7:明确变量
state_aft_integration:LIO预积分的位姿
diff_vins_lio_q:LIO预积分的预测位姿与VINS位姿的旋转差值;
diff_vins_lio_t:LIO预积分的预测位姿与VINS位姿的平移差值;
Step2.8:将LIO预积分的位姿 * 旋转差值,得到的结果相当于VINS的优化初值;
/* * diff_vins_lio_q.inverse():LIO的IMU预积分得到的状态值与VINS的估计值的差值 * q_pose_last = lio的位姿变换到vins下 */ Eigen::Quaterniond q_pose_last = Eigen::Quaterniond(state_aft_integration.rot_end * diff_vins_lio_q.inverse()); Eigen::Vector3d t_pose_last = state_aft_integration.pos_end - diff_vins_lio_t; estimator.m_para_Pose[win_idx][0] = t_pose_last(0); estimator.m_para_Pose[win_idx][1] = t_pose_last(1); estimator.m_para_Pose[win_idx][2] = t_pose_last(2); estimator.m_para_Pose[win_idx][3] = q_pose_last.x(); estimator.m_para_Pose[win_idx][4] = q_pose_last.y(); estimator.m_para_Pose[win_idx][5] = q_pose_last.z(); estimator.m_para_Pose[win_idx][6] = q_pose_last.w();
Step2.9:利用“上一步VINS的初值”,构建重投影误差,并计算雅可比矩阵
construct_camera_measure(win_idx, estimator, reppro_err_vec, J_mat_vec);
Step2.10根据雅可比矩阵,求解卡尔曼增益K
for (int residual_idx = 0; residual_idx < reppro_err_vec.size(); residual_idx++) { meas_vec.block(residual_idx * 2, 0, 2, 1) = -1 * reppro_err_vec[residual_idx]; // J_mat_vec[residual_idx].block(0,0,2,3) = J_mat_vec[residual_idx].block(0,0,2,3) * extrinsic_vins_lio_q.toRotationMatrix().transpose(); Hsub.block(residual_idx * 2, 0, 2, 6) = J_mat_vec[residual_idx]; } K_1.setZero(); auto Hsub_T = Hsub.transpose(); H_T_H.setZero(); H_T_H.block<6, 6>(0, 0) = Hsub_T * Hsub; K_1 = (H_T_H + (state_aft_integration.cov * CAM_MEASUREMENT_COV).inverse()).inverse();、 // * 卡尔曼增益 K = K_1.block<DIM_OF_STATES, 6>(0, 0) * Hsub_T; auto vec = state_prediction - state_aft_integration; solution = K * (meas_vec - Hsub * vec.block<6, 1>(0, 0) );
Step2.11 根据卡尔曼增益K,计算增量solution,然后根据增量来判断eskf是否收敛;
solution = state_aft_integration - state_prediction; rot_add = (solution).block<3, 1>(0, 0); t_add = solution.block<3, 1>(3, 0); flg_EKF_converged = false; // * 判断lio的ekf是否收敛 if (((rot_add.norm() * 57.3 - deltaR) < 0.01) && ((t_add.norm() - deltaT) < 0.015)) { flg_EKF_converged = true; } deltaR = rot_add.norm() * 57.3; deltaT = t_add.norm() ; }
Step2.12 小Trick:根据计算的增量,设置阈值,角度/平移变化量大于阈值,才会更新LIO的位姿;
if ((t_diff > 0.2) || (angular_diff > 2.0)) { g_lio_state = state_before_esikf; }
Step2.13 正式开始优化
estimator.solve_image_pose(img_msg->header);
void Estimator::optimization_LM()
需要注意,R2live中的最小二乘求解,和VINS-Mono已经不一样了,VINS-Mono中最小二乘是利用ceres自动求导,R2live是手写的求导+求解,所以看起来会非常恶心;
论文中也指出,考虑计算量,因子图优化只包含LIO的位姿,不优化Lidar的点云,实际上代码也是这么写的;
// * Add LiDAR prior residual for(int idx = 0 ; idx < lidar_prior_factor_vec.size(); idx++) { lidar_prior_factor_vec[idx].Evaluate(); res_pos_ros = margin_res_size + 15 * (number_of_imu_res) + idx * 15; residual.block(res_pos_ros, 0, 15, 1) = lidar_prior_factor_vec[idx].m_residual ; jacobian_pos_col = lidar_prior_factor_vec[idx].m_index_i * 15; // Pos[i] jacobian_mat.block(res_pos_ros, jacobian_pos_col, 6, 6) = lidar_prior_factor_vec[idx].m_jacobian_mat_vec[0].block(0, 0, 6, 6); jacobian_pos_col = lidar_prior_factor_vec[idx].m_index_i * 15 + 6; // speed_bias[i] jacobian_mat.block(res_pos_ros, jacobian_pos_col, 15, 9) = lidar_prior_factor_vec[idx].m_jacobian_mat_vec[1].block(0, 0, 15, 9); }
Step2.14 如果lidar和cam没有同步上,那么就不优化VINS的部分,也不进行ESKF计算增益,直接就输出Fast lio的位姿(注意,这个输出的位姿是上一lidar帧 + IMU预积分得到的预测位姿,其实就是Fast lio的输出位姿);
pub_LiDAR_Odometry(estimator, state_aft_integration, header);
Step2.15 如果同步上了,就会进行Step2.6—Step2.12,然后将VINS优化后的bias、速度、协方差再传递给LIO;
当然,VINS的位姿(R和t)传递给LIO的时候,也得需要用到diff_vins_lio_q和diff_vins_lio_t(LIO预积分与VINS的旋转差值、平移差值);
也就是说,送给LIO的,也是作为LIO的优化初值;
//* 将优化后的bias传递给lio if(estimator.Bas[WINDOW_SIZE].norm() < 0.5) { g_lio_state.bias_a = estimator.Bas[WINDOW_SIZE]; } // * 将优化后的bias、速度、协方差传递给lio g_lio_state.bias_g = estimator.Bgs[WINDOW_SIZE]; g_lio_state.vel_end = diff_vins_lio_q.toRotationMatrix() * estimator.Vs[WINDOW_SIZE]; g_lio_state.cov = state_aft_integration.cov; Eigen::Matrix3d temp_R = estimator.Rs[WINDOW_SIZE] * diff_vins_lio_q.toRotationMatrix(); Eigen::Vector3d temp_T = estimator.Ps[WINDOW_SIZE] + diff_vins_lio_t; eigen_q q_I = eigen_q(1.0, 0, 0, 0); double angular_diff = eigen_q(temp_R.transpose() * state_before_esikf.rot_end).angularDistance(q_I) * 57.3; double t_diff = (temp_T - state_before_esikf.pos_end).norm(); if ((t_diff < 0.2) && (angular_diff < 2.0)) { g_lio_state.cov = state_aft_integration.cov; g_lio_state.last_update_time = cam_update_tim; g_lio_state.rot_end = temp_R; g_lio_state.pos_end = temp_T; }
Step2.16 细化diff_vins_lio_q和diff_vins_lio_t??
estimator.refine_vio_system(diff_vins_lio_q, diff_vins_lio_t);
Step2.17 发布话题
if (g_camera_lidar_queue.m_if_have_lidar_data == false) { pubOdometry(estimator, header); // * estimator:VINS的优化器 } else { pub_LiDAR_Odometry(estimator, state_aft_integration, header); // * state_aft_integration:当前lio的位姿 } pubCameraPose(estimator, header); pubKeyPoses(estimator, header); pubPointCloud(estimator, header); pubTF(estimator, header); pubKeyframe(estimator);
-
总的来说,VIO的部分就是使用的VINS-Mono,但是使用了LIO的预积分来作为VINS优化的初值,然后VINS优化的结果,也传递给LIO,作为LIO优化的初值;
-
有一个非常需要注意的点,代码中设置了多个“Lidar和Camera同步的函数”,也就意味着,只有Lidar和Camera同步上了,才能“使用了LIO的预积分来作为VINS优化的初值,然后VINS优化的结果,也传递给LIO,作为LIO优化的初值”;
如果Lidar和Camera不能同步上,那就没有耦合的意义了,直接就是fast-lio;
-
综上所述,r2live和LVI-SAM的思想有点像,把LIO的位姿来推算VIO优化的初值;
不过呢,比LVI-SAM强的是:VIO的结果会传递给LIO,作为LIO优化的初值;
而且,确实在代码里,把LIO的位姿作为因子,加入到了优化中;