由于网上大部分的程序都是针对原来的VINS-fusion的单双目,很少有针对RGBD的解释,正好项目需求,因此在这里对该程序进行分析。当然写的时候一定存在问题,存在的问题请大家指出来。
相对与单双木的程序,本程序仅仅针对RGBD的情况,代码也基本上继承了原来的程序。
首先还是从主函数入手:
当然,由于我的版本是opencv4.6,我对原来程序的程序的opencv版本内容进行了替换与想修改
int main(int argc, char **argv)
{
ros::init(argc, argv, "vins_estimator");
ros::NodeHandle n("~");
ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Info);
if(argc != 2)
{
printf("please intput: rosrun vins vins_node [config file] \n"
"for example: rosrun vins vins_node "
"~/catkin_ws/src/VINS-Fusion/config/euroc/euroc_stereo_imu_config.yaml \n");
return 1;
}
string config_file = argv[1];
printf("config_file: %s\n", argv[1]);
/*
std::string config_file;
config_file = readParam<std::string>(n, "config_file");
cout << "config_file: "<< config_file <<endl;
*/
readParameters(config_file);
estimator.setParameter();
#ifdef EIGEN_DONT_PARALLELIZE
ROS_DEBUG("EIGEN_DONT_PARALLELIZE");
#endif
ROS_WARN("waiting for image and imu...");
registerPub(n);
ros::Subscriber sub_imu = n.subscribe(IMU_TOPIC, 2000, imu_callback, ros::TransportHints().tcpNoDelay());
ros::Subscriber sub_feature = n.subscribe("/feature_tracker/feature", 2000, feature_callback);
ros::Subscriber sub_img0 = n.subscribe(IMAGE0_TOPIC, 100, img0_callback);
ros::Subscriber sub_img1 = n.subscribe(IMAGE1_TOPIC, 100, img1_callback);
std::thread sync_thread{sync_process};
ros::spin();
return 0;
}
显然,主函数还是很简单,在main函数之前,首先是头文件与全局变量
#include <stdio.h>
#include <queue>
#include <map>
#include <thread>
#include <mutex>
#include <ros/ros.h>
#include <cv_bridge/cv_bridge.h>
#include <opencv2/opencv.hpp>
#include "estimator/estimator.h"
#include "estimator/parameters.h"
#include "utility/visualization.h"
Estimator estimator;
queue<sensor_msgs::ImuConstPtr> imu_buf;
queue<sensor_msgs::PointCloudConstPtr> feature_buf;
queue<sensor_msgs::ImageConstPtr> img0_buf;
queue<sensor_msgs::ImageConstPtr> img1_buf;
std::mutex m_buf;
在这里可以发现在这里其实已经定了一个Estimator estimator;估计器变量了,当然,构造函数此时也开始运行了,可以看一
class Estimator
{
public:
Estimator();///构造函数
void setParameter();
// interface
void initFirstPose(Eigen::Vector3d p, Eigen::Matrix3d r);
void inputIMU(double t, const Vector3d &linearAcceleration, const Vector3d &angularVelocity);
void inputFeature(double t, const map<int, vector<pair<int, Eigen::Matrix<double, 7, 1>>>> &featureFrame);
void inputImage(double t, const cv::Mat &_img, const cv::Mat &_img1 = cv::Mat());
void processIMU(double t, double dt, const Vector3d &linear_acceleration, const Vector3d &angular_velocity);
void processImage(const map<int, vector<pair<int, Eigen::Matrix<double, 7, 1>>>> &image, const double header);
void processMeasurements();
// internal
void clearState();
bool initialStructure();
bool visualInitialAlign();
bool visualInitialAlignWithDepth();
bool relativePose(Matrix3d &relative_R, Vector3d &relative_T, int &l);
void slideWindow();
void slideWindowNew();
void slideWindowOld();
void optimization();
void vector2double();
void double2vector();
bool failureDetection();
bool getIMUInterval(double t0, double t1, vector<pair<double, Eigen::Vector3d>> &accVector,
vector<pair<double, Eigen::Vector3d>> &gyrVector);
void getPoseInWorldFrame(Eigen::Matrix4d &T);
void getPoseInWorldFrame(int index, Eigen::Matrix4d &T);
void predictPtsInNextFrame();
void outliersRejection(set<int> &removeIndex);
double reprojectionError(Matrix3d &Ri, Vector3d &Pi, Matrix3d &rici, Vector3d &tici,
Matrix3d &Rj, Vector3d &Pj, Matrix3d &ricj, Vector3d &ticj,
double depth, Vector3d &uvi, Vector3d &uvj);
void updateLatestStates();
void fastPredictIMU(double t, Eigen::Vector3d linear_acceleration, Eigen::Vector3d angular_velocity);
bool IMUAvailable(double t);
void initFirstIMUPose(vector<pair<double, Eigen::Vector3d>> &accVector);
enum SolverFlag
{
INITIAL,
NON_LINEAR
};
enum MarginalizationFlag
{
MARGIN_OLD = 0,
MARGIN_SECOND_NEW = 1
};
std::mutex mBuf;
queue<pair<double, Eigen::Vector3d>> accBuf;
queue<pair<double, Eigen::Vector3d>> gyrBuf;
queue<pair<double, map<int, vector<pair<int, Eigen::Matrix<double, 7, 1> > > > > > featureBuf;
double prevTime, curTime;
bool openExEstimation;
std::thread trackThread;
std::thread processThread;
FeatureTracker featureTracker;
SolverFlag solver_flag;
MarginalizationFlag marginalization_flag;
Vector3d g;
Matrix3d ric[2];
Vector3d tic[2];
Vector3d Ps[(WINDOW_SIZE + 1)];
Vector3d Vs[(WINDOW_SIZE + 1)];
Matrix3d Rs[(WINDOW_SIZE + 1)];
Vector3d Bas[(WINDOW_SIZE + 1)];
Vector3d Bgs[(WINDOW_SIZE + 1)];
double td;
Matrix3d back_R0, last_R, last_R0;
Vector3d back_P0, last_P, last_P0;
double Headers[(WINDOW_SIZE + 1)];
IntegrationBase *pre_integrations[(WINDOW_SIZE + 1)];
Vector3d acc_0, gyr_0;
vector<double> dt_buf[(WINDOW_SIZE + 1)];
vector<Vector3d> linear_acceleration_buf[(WINDOW_SIZE + 1)];
vector<Vector3d> angular_velocity_buf[(WINDOW_SIZE + 1)];
int frame_count;
int sum_of_outlier, sum_of_back, sum_of_front, sum_of_invalid;
int inputImageCnt;
FeatureManager f_manager;
MotionEstimator m_estimator;
InitialEXRotation initial_ex_rotation;
bool first_imu;
bool is_valid, is_key;
bool failure_occur;
vector<Vector3d> point_cloud;
vector<Vector3d> margin_cloud;
vector<Vector3d> key_poses;
double initial_timestamp;
double para_Pose[WINDOW_SIZE + 1][SIZE_POSE];
double para_SpeedBias[WINDOW_SIZE + 1][SIZE_SPEEDBIAS];
double para_Feature[NUM_OF_F][SIZE_FEATURE];
double para_Ex_Pose[2][SIZE_POSE];
double para_Retrive_Pose[SIZE_POSE];
double para_Td[1][1];
double para_Tr[1][1];
int loop_window_index;
MarginalizationInfo *last_marginalization_info;
vector<double *> last_marginalization_parameter_blocks;
map<double, ImageFrame> all_image_frame;
IntegrationBase *tmp_pre_integration;
Eigen::Vector3d initP;
Eigen::Matrix3d initR;
double latest_time;
Eigen::Vector3d latest_P, latest_V, latest_Ba, latest_Bg, latest_acc_0, latest_gyr_0;
Eigen::Quaterniond latest_Q;
bool initFirstPoseFlag;
};
非常的多的变量与函数,当然在初始化变量的时候运行的是构造函数
///2.1.0构造函数初始化
Estimator::Estimator(): f_manager{Rs}
{
ROS_INFO("init begins");
clearState();
prevTime = -1;
curTime = 0;
openExEstimation = 0;
initP = Eigen::Vector3d(0, 0, 0);
initR = Eigen::Matrix3d::Identity();
inputImageCnt = 0;
initFirstPoseFlag = false;
}
在构造函数后面跟了一个冒号是对象成员变量定义意思,这里就是f_manager赋值{Rs},其中f_manager是FeatureManager类,FeatureManager管理所有特征点,通过list容器存储特征点属性,list feature。
而这个构造函数里面调用了
///2.1.10
void Estimator::clearState()
{
for (int i = 0; i < WINDOW_SIZE + 1; i++)
{
Rs[i].setIdentity();
Ps[i].setZero();
Vs[i].setZero();
Bas[i].setZero();
Bgs[i].setZero();
dt_buf[i].clear();
linear_acceleration_buf[i].clear();
angular_velocity_buf[i].clear();
if (pre_integrations[i] != nullptr)
{
delete pre_integrations[i];
}
pre_integrations[i] = nullptr;
}
for (int i = 0; i < NUM_OF_CAM; i++)
{
tic[i] = Vector3d::Zero();
ric[i] = Matrix3d::Identity();
}
first_imu = false,
sum_of_back = 0;
sum_of_front = 0;
frame_count = 0;
solver_flag = INITIAL;
initial_timestamp = 0;
all_image_frame.clear();
if (tmp_pre_integration != nullptr)
delete tmp_pre_integration;
if (last_marginalization_info != nullptr)
delete last_marginalization_info;
tmp_pre_integration = nullptr;
last_marginalization_info = nullptr;
last_marginalization_parameter_blocks.clear();
f_manager.clearState();
failure_occur = 0;
}
这个函数的意思就是所有的变量都清零。
在这之后是main ,mian函数里面首先是初始化ROS
ros::init(argc, argv, "vins_estimator");
ros::NodeHandle n("~");
ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Info);
然后是读入参数,大家可以参考这一位大哥的内容
VINS参数描述
https://blog.csdn.net/weixin_40084095/article/details/124770050
///1.1.1读入参数
void readParameters(std::string config_file)
{
FILE *fh = fopen(config_file.c_str(),"r");
if(fh == NULL){
ROS_WARN("config_file dosen't exist; wrong config_file path");
ROS_BREAK();
return;
}
fclose(fh);
cv::FileStorage fsSettings(config_file, cv::FileStorage::READ);
if(!fsSettings.isOpened())
{
std::cerr << "ERROR: Wrong path to settings" << std::endl;
}
///string类型的数据用>>读入,int类型用=读入
fsSettings["image0_topic"] >> IMAGE0_TOPIC;
fsSettings["image1_topic"] >> IMAGE1_TOPIC;
MAX_CNT = fsSettings["max_cnt"];///最大特征点数量
MIN_DIST = fsSettings["min_dist"];
F_THRESHOLD = fsSettings["F_threshold"];
SHOW_TRACK = fsSettings["show_track"];
FLOW_BACK = fsSettings["flow_back"];///双向光流
MULTIPLE_THREAD = fsSettings["multiple_thread"];
USE_IMU = fsSettings["imu"];
printf("USE_IMU: %d\n", USE_IMU);
if(USE_IMU)
{
fsSettings["imu_topic"] >> IMU_TOPIC;
printf("IMU_TOPIC: %s\n", IMU_TOPIC.c_str());
ACC_N = fsSettings["acc_n"];
ACC_W = fsSettings["acc_w"];
GYR_N = fsSettings["gyr_n"];
GYR_W = fsSettings["gyr_w"];
G.z() = fsSettings["g_norm"];
}
SOLVER_TIME = fsSettings["max_solver_time"];///最大优化时间
NUM_ITERATIONS = fsSettings["max_num_iterations"];///最大迭代次数
MIN_PARALLAX = fsSettings["keyframe_parallax"];///是否是渐渐镇的视差阈值
MIN_PARALLAX = MIN_PARALLAX / FOCAL_LENGTH;///真实世界最小视差=最小视差/内参数fx
fsSettings["output_path"] >> OUTPUT_FOLDER;
VINS_RESULT_PATH = OUTPUT_FOLDER + "/vio.csv";
std::cout << "result path " << VINS_RESULT_PATH << std::endl;
std::ofstream fout(VINS_RESULT_PATH, std::ios::out);
fout.close();
ESTIMATE_EXTRINSIC = fsSettings["estimate_extrinsic"];///IMU与相机之间的外参数
if (ESTIMATE_EXTRINSIC == 2)
{
ROS_WARN("have no prior about extrinsic param, calibrate extrinsic param");
RIC.push_back(Eigen::Matrix3d::Identity());
TIC.push_back(Eigen::Vector3d::Zero());
EX_CALIB_RESULT_PATH = OUTPUT_FOLDER + "/extrinsic_parameter.csv";
}
else
{
if ( ESTIMATE_EXTRINSIC == 1)
{
ROS_WARN(" Optimize extrinsic param around initial guess!");
EX_CALIB_RESULT_PATH = OUTPUT_FOLDER + "/extrinsic_parameter.csv";
}
if (ESTIMATE_EXTRINSIC == 0)
ROS_WARN(" fix extrinsic param ");
cv::Mat cv_T;
fsSettings["body_T_cam0"] >> cv_T;
Eigen::Matrix4d T;
cv::cv2eigen(cv_T, T);
RIC.push_back(T.block<3, 3>(0, 0));
TIC.push_back(T.block<3, 1>(0, 3));
}
NUM_OF_CAM = fsSettings["num_of_cam"];
printf("camera number %d\n", NUM_OF_CAM);
if(NUM_OF_CAM != 1 && NUM_OF_CAM != 2)
{
printf("num_of_cam should be 1 or 2\n");
assert(0);
}
int pn = config_file.find_last_of('/');
std::string configPath = config_file.substr(0, pn);
std::string cam0Calib;
fsSettings["cam0_calib"] >> cam0Calib;
std::string cam0Path = configPath + "/" + cam0Calib;
CAM_NAMES.push_back(cam0Path);
if(NUM_OF_CAM == 2)
{
STEREO = 1;
std::string cam1Calib;
fsSettings["cam1_calib"] >> cam1Calib;
std::string cam1Path = configPath + "/" + cam1Calib;
//printf("%s cam1 path\n", cam1Path.c_str() );
CAM_NAMES.push_back(cam1Path);
cv::Mat cv_T;
fsSettings["body_T_cam1"] >> cv_T;
Eigen::Matrix4d T;
cv::cv2eigen(cv_T, T);
RIC.push_back(T.block<3, 3>(0, 0));
TIC.push_back(T.block<3, 1>(0, 3));
}
DEPTH = fsSettings["depth"];
if(DEPTH == 1)
{
cv::Mat cv_T;
fsSettings["body_T_cam1"] >> cv_T;///RGBD相机与机体系之间外参数
Eigen::Matrix4d T;
cv::cv2eigen(cv_T, T);
RIC.push_back(T.block<3, 3>(0, 0));
TIC.push_back(T.block<3, 1>(0, 3));
NUM_OF_CAM++;
}
INIT_DEPTH = 5.0;
BIAS_ACC_THRESHOLD = 0.1;
BIAS_GYR_THRESHOLD = 0.1;
TD = fsSettings["td"];
ESTIMATE_TD = fsSettings["estimate_td"];
if (ESTIMATE_TD)
ROS_INFO_STREAM("Unsynchronized sensors, online estimate time offset, initial td: " << TD);
else
ROS_INFO_STREAM("Synchronized sensors, fix time offset: " << TD);
ROW = fsSettings["image_height"];
COL = fsSettings["image_width"];
ROS_INFO("ROW: %d COL: %d ", ROW, COL);
if(!USE_IMU)
{
ESTIMATE_EXTRINSIC = 0;
ESTIMATE_TD = 0;
printf("no imu, fix extrinsic param; no time offset calibration\n");
}
fsSettings.release();
}
基本上就是在原有的参数上加入了深度相机的外参数。
接下来同样的
estimator.setParameter();
这个函数在estimater.cpp里面,它读取了每一个相机到IMU坐标系的旋转/平移外参数和非线性优化的重投影误差部分的信息矩阵。
//2.1.1设置参数
void Estimator::setParameter()
{
for (int i = 0; i < NUM_OF_CAM; i++)
{
tic[i] = TIC[i];
ric[i] = RIC[i];
cout << " exitrinsic cam " << i << endl << ric[i] << endl << tic[i].transpose() << endl;
}
f_manager.setRic(ric);
ProjectionTwoFrameOneCamFactor::sqrt_info = FOCAL_LENGTH / 1.5 * Matrix2d::Identity();
ProjectionTwoFrameTwoCamFactor::sqrt_info = FOCAL_LENGTH / 1.5 * Matrix2d::Identity();
ProjectionOneFrameTwoCamFactor::sqrt_info = FOCAL_LENGTH / 1.5 * Matrix2d::Identity();
td = TD;
g = G;
cout << "set g " << g.transpose() << endl;
featureTracker.readIntrinsicParameter(CAM_NAMES, DEPTH);
std::cout << "MULTIPLE_THREAD is " << MULTIPLE_THREAD << '\n';
if (MULTIPLE_THREAD)
{
processThread = std::thread(&Estimator::processMeasurements, this);
}
}
读取每一个相机的外参数,当然这里有一个好玩的事情是,在双目的机体系的原点是左目相机,但是在这里却是一个未知的位置。
featureTracker.readIntrinsicParameter(CAM_NAMES, DEPTH);
在这里读取了相机的内参数
///3.1.11
void FeatureTracker::readIntrinsicParameter(const vector<string> &calib_file,const int depth)
{
for (size_t i = 0; i < calib_file.size(); i++)
{
ROS_INFO("reading paramerter of camera %s", calib_file[i].c_str());
camodocal::CameraPtr camera = CameraFactory::instance()->generateCameraFromYamlFile(calib_file[i]);///读取相机内参的文件名字
m_camera.push_back(camera);
}
if (calib_file.size() == 2)
stereo_cam = 1;
if(depth)
depth_cam = 1;
}
这里有个很重要的参数是, stereo_cam = 1; 与 depth_cam = 1;这两个参数决定了本文算法是双目还是RGBD,RGBD将深度图替代了原来的右目。这里面主要的工作就是读取相机的内参数,当然是调用了其他文件夹里面的函数
CameraPtr
CameraFactory::generateCameraFromYamlFile( const std::string& filename )
{
cv::FileStorage fs( filename, cv::FileStorage::READ );
if ( !fs.isOpened( ) )
{
return CameraPtr( );
}
Camera::ModelType modelType = Camera::MEI;
if ( !fs["model_type"].isNone( ) )
{
std::string sModelType;
fs["model_type"] >> sModelType;
if ( boost::iequals( sModelType, "kannala_brandt" ) )
{
modelType = Camera::KANNALA_BRANDT;
}
else if ( boost::iequals( sModelType, "mei" ) )
{
modelType = Camera::MEI;
}
else if ( boost::iequals( sModelType, "scaramuzza" ) )
{
modelType = Camera::SCARAMUZZA;
}
else if ( boost::iequals( sModelType, "pinhole" ) )
{
modelType = Camera::PINHOLE;
}
else if ( boost::iequals( sModelType, "PINHOLE_FULL" ) )
{
modelType = Camera::PINHOLE_FULL;
}
else
{
std::cerr << "# ERROR: Unknown camera model: " << sModelType << std::endl;
return CameraPtr( );
}
}
switch ( modelType )
{
case Camera::KANNALA_BRANDT:
{
EquidistantCameraPtr camera( new EquidistantCamera );
EquidistantCamera::Parameters params = camera->getParameters( );
params.readFromYamlFile( filename );
camera->setParameters( params );
return camera;
}
case Camera::PINHOLE:
{
PinholeCameraPtr camera( new PinholeCamera );
PinholeCamera::Parameters params = camera->getParameters( );
params.readFromYamlFile( filename );
camera->setParameters( params );
return camera;
}
case Camera::PINHOLE_FULL:
{
PinholeFullCameraPtr camera( new PinholeFullCamera );
PinholeFullCamera::Parameters params = camera->getParameters( );
params.readFromYamlFile( filename );
camera->setParameters( params );
return camera;
}
case Camera::SCARAMUZZA:
{
OCAMCameraPtr camera( new OCAMCamera );
OCAMCamera::Parameters params = camera->getParameters( );
params.readFromYamlFile( filename );
camera->setParameters( params );
return camera;
}
case Camera::MEI:
default:
{
CataCameraPtr camera( new CataCamera );
CataCamera::Parameters params = camera->getParameters( );
params.readFromYamlFile( filename );
camera->setParameters( params );
return camera;
}
}
return CameraPtr( );
}
}
这里面实际上利用cam0_calib: "cam0_pinhole.yaml"的文件读取参数文件,而不是直接读取txt里面的矩阵
接下来就是
std::cout << "MULTIPLE_THREAD is " << MULTIPLE_THREAD << '\n';
if (MULTIPLE_THREAD)
{
processThread = std::thread(&Estimator::processMeasurements, this);
}
当然在void Estimator::setParameter()的最后,在这个里面实际上开启了线程processMeasurements。
而这个线程是最重要的一个线程,所以咱们第二节再说
///2.1.7
void Estimator::processMeasurements()
{
while (1)
{
//printf("process measurments\n");
pair<double, map<int, vector<pair<int, Eigen::Matrix<double, 7, 1> > > > > feature;
vector<pair<double, Eigen::Vector3d>> accVector, gyrVector;
if(!featureBuf.empty())
{
feature = featureBuf.front();
curTime = feature.first + td;
while(1)
{
if ((!USE_IMU || IMUAvailable(feature.first + td)))
break;
else
{
printf("wait for imu ... \n");
if (! MULTIPLE_THREAD)
return;
std::chrono::milliseconds dura(5);
std::this_thread::sleep_for(dura);
}
}
mBuf.lock();
if(USE_IMU)
getIMUInterval(prevTime, curTime, accVector, gyrVector);
featureBuf.pop();
mBuf.unlock();
if(USE_IMU)
{
if(!initFirstPoseFlag)
initFirstIMUPose(accVector);
for(size_t i = 0; i < accVector.size(); i++)
{
double dt;
if(i == 0)
dt = accVector[i].first - prevTime;
else if (i == accVector.size() - 1)
dt = curTime - accVector[i - 1].first;
else
dt = accVector[i].first - accVector[i - 1].first;
processIMU(accVector[i].first, dt, accVector[i].second, gyrVector[i].second);
}
}
processImage(feature.second, feature.first);
prevTime = curTime;
printStatistics(*this, 0);
std_msgs::Header header;
header.frame_id = "world";
header.stamp = ros::Time(feature.first);
pubOdometry(*this, header);
pubKeyPoses(*this, header);
pubCameraPose(*this, header);
pubPointCloud(*this, header);
pubKeyframe(*this);
pubTF(*this, header);
}
if (! MULTIPLE_THREAD)
break;
std::chrono::milliseconds dura(2);
std::this_thread::sleep_for(dura);
}
}