1.先看vins_estimator下cmakelists代码详解
cmake_minimum_required(VERSION 2.8.3)
project(vins)
set(CMAKE_BUILD_TYPE "Release") # 编译的类型
set(CMAKE_CXX_FLAGS "-std=c++11") # c++的版本
#-DEIGEN_USE_MKL_ALL")
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -Wall -g")
find_package(catkin REQUIRED COMPONENTS #找到相应的库的config文件
roscpp #ros的c++库
std_msgs
geometry_msgs #ros的几何库
nav_msgs #ros的路径库
tf #ros的路径包
cv_bridge #ros的解码库
camera_models #vins中的相机模型
image_transport #ros的编码库 )
find_package(OpenCV REQUIRED)
# message(WARNING "OpenCV_VERSION: ${OpenCV_VERSION}")
find_package(Ceres REQUIRED)
#求解非线性优化问题
include_directories(${catkin_INCLUDE_DIRS} ${CERES_INCLUDE_DIRS})
#把上面的头文件加进来了
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
find_package(Eigen3)
include_directories(
${catkin_INCLUDE_DIRS}
${EIGEN3_INCLUDE_DIR}
)
catkin_package()
add_library(vins_lib
src/estimator/parameters.cpp
src/estimator/estimator.cpp
src/estimator/feature_manager.cpp
src/factor/pose_local_parameterization.cpp
src/factor/projectionTwoFrameOneCamFactor.cpp
src/factor/projectionTwoFrameTwoCamFactor.cpp
src/factor/projectionOneFrameTwoCamFactor.cpp
src/factor/marginalization_factor.cpp
src/utility/utility.cpp
src/utility/visualization.cpp
src/utility/CameraPoseVisualization.cpp
src/initial/solve_5pts.cpp
src/initial/initial_aligment.cpp
src/initial/initial_sfm.cpp
src/initial/initial_ex_rotation.cpp
src/featureTracker/feature_tracker.cpp)
target_link_libraries(vins_lib ${catkin_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES})
add_executable(vins_node src/rosNodeTest.cpp)
target_link_libraries(vins_node vins_lib)
add_executable(kitti_odom_test src/KITTIOdomTest.cpp)
target_link_libraries(kitti_odom_test vins_lib)
add_executable(kitti_gps_test src/KITTIGPSTest.cpp)
target_link_libraries(kitti_gps_test vins_lib)
2.在vins_estimator/src/utility下rosNodeTest.cpp代码详解
#include <stdio.h>
#include <queue>
#include <map>
#include <thread>
#include <mutex>
#include <ros/ros.h>
#include <cv_bridge/cv_bridge.h>
#include <opencv2/opencv.hpp>
#include "estimator/estimator.h"
#include "estimator/parameters.h"
#include "utility/visualization.h"
Estimator estimator;
queue<sensor_msgs::ImuConstPtr> imu_buf;
queue<sensor_msgs::PointCloudConstPtr> feature_buf;
queue<sensor_msgs::ImageConstPtr> img0_buf;
queue<sensor_msgs::ImageConstPtr> img1_buf;
std::mutex m_buf;
void img0_callback(const sensor_msgs::ImageConstPtr &img_msg)
{
m_buf.lock();
img0_buf.push(img_msg);
m_buf.unlock();
}
void img1_callback(const sensor_msgs::ImageConstPtr &img_msg)
{
m_buf.lock();
img1_buf.push(img_msg);
m_buf.unlock();
}
cv::Mat getImageFromMsg(const sensor_msgs::ImageConstPtr &img_msg)
{
cv_bridge::CvImageConstPtr ptr;
if (img_msg->encoding == "8UC1")
{
sensor_msgs::Image img;
img.header = img_msg->header;
img.height = img_msg->height;
img.width = img_msg->width;
img.is_bigendian = img_msg->is_bigendian;
img.step = img_msg->step;
img.data = img_msg->data;
img.encoding = "mono8";
ptr = cv_bridge::toCvCopy(img, sensor_msgs::image_encodings::MONO8);
}
else
ptr = cv_bridge::toCvCopy(img_msg, sensor_msgs::image_encodings::MONO8);
cv::Mat img = ptr->image.clone();
return img;
}
void sync_process()
{
while(1)
{
if(STEREO)
{
cv::Mat image0, image1;
std_msgs::Header header;
double time = 0;
m_buf.lock();
if (!img0_buf.empty() && !img1_buf.empty())
{
double time0 = img0_buf.front()->header.stamp.toSec();
double time1 = img1_buf.front()->header.stamp.toSec();
if(time0 < time1 - 0.003)
{
img0_buf.pop();
printf("throw img0\n");
}
else if(time0 > time1 + 0.003)
{
img1_buf.pop();
printf("throw img1\n");
}
else
{
time = img0_buf.front()->header.stamp.toSec();
header = img0_buf.front()->header;
image0 = getImageFromMsg(img0_buf.front());
img0_buf.pop();
image1 = getImageFromMsg(img1_buf.front());
img1_buf.pop();
}
}
m_buf.unlock();
if(!image0.empty())
estimator.inputImage(time, image0, image1);
}
else
{
cv::Mat image;
std_msgs::Header header;
double time = 0;
m_buf.lock();
if(!img0_buf.empty())
{
time = img0_buf.front()->header.stamp.toSec();
header = img0_buf.front()->header;
image = getImageFromMsg(img0_buf.front());
img0_buf.pop();
}
m_buf.unlock();
if(!image.empty())
estimator.inputImage(time, image);
}
std::chrono::milliseconds dura(2);
std::this_thread::sleep_for(dura);
}
}
void imu_callback(const sensor_msgs::ImuConstPtr &imu_msg)
{
double t = imu_msg->header.stamp.toSec();
double dx = imu_msg->linear_acceleration.x;
double dy = imu_msg->linear_acceleration.y;
double dz = imu_msg->linear_acceleration.z;
double rx = imu_msg->angular_velocity.x;
double ry = imu_msg->angular_velocity.y;
double rz = imu_msg->angular_velocity.z;
Vector3d acc(dx, dy, dz);
Vector3d gyr(rx, ry, rz);
estimator.inputIMU(t, acc, gyr);
return;
}
void feature_callback(const sensor_msgs::PointCloudConstPtr &feature_msg)
{
map<int, vector<pair<int, Eigen::Matrix<double, 7, 1>>>> featureFrame;
for (unsigned int i = 0; i < feature_msg->points.size(); i++)
{
int feature_id = feature_msg->channels[0].values[i];
int camera_id = feature_msg->channels[1].values[i];
double x = feature_msg->points[i].x;
double y = feature_msg->points[i].y;
double z = feature_msg->points[i].z;
double p_u = feature_msg->channels[2].values[i];
double p_v = feature_msg->channels[3].values[i];
double velocity_x = feature_msg->channels[4].values[i];
double velocity_y = feature_msg->channels[5].values[i];
if(feature_msg->channels.size() > 5)
{
double gx = feature_msg->channels[6].values[i];
double gy = feature_msg->channels[7].values[i];
double gz = feature_msg->channels[8].values[i];
pts_gt[feature_id] = Eigen::Vector3d(gx, gy, gz);
}
ROS_ASSERT(z == 1);
Eigen::Matrix<double, 7, 1> xyz_uv_velocity;
xyz_uv_velocity << x, y, z, p_u, p_v, velocity_x, velocity_y;
featureFrame[feature_id].emplace_back(camera_id, xyz_uv_velocity);
}
double t = feature_msg->header.stamp.toSec();
estimator.inputFeature(t, featureFrame);
return;
}
void restart_callback(const std_msgs::BoolConstPtr &restart_msg)
{
if (restart_msg->data == true)
{
ROS_WARN("restart the estimator!");
estimator.clearState();
estimator.setParameter();
}
return;
}
void imu_switch_callback(const std_msgs::BoolConstPtr &switch_msg)
{
if (switch_msg->data == true)
{
estimator.changeSensorType(1, STEREO);
}
else
{
estimator.changeSensorType(0, STEREO);
}
return;
}
void cam_switch_callback(const std_msgs::BoolConstPtr &switch_msg)
{
if (switch_msg->data == true)
{
estimator.changeSensorType(USE_IMU, 1);
}
else
{
estimator.changeSensorType(USE_IMU, 0);
}
return;
}
int main(int argc, char **argv)
{
ros::init(argc, argv, "vins_estimator");
ros::NodeHandle n("~");
ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Info);
if(argc != 2)
{
printf("please intput: rosrun vins vins_node [config file] \n"
"for example: rosrun vins vins_node "
"~/catkin_ws/src/VINS-Fusion/config/euroc/euroc_stereo_imu_config.yaml \n");
return 1;
}
string config_file = argv[1];
printf("config_file: %s\n", argv[1]);
readParameters(config_file);
estimator.setParameter();
#ifdef EIGEN_DONT_PARALLELIZE
ROS_DEBUG("EIGEN_DONT_PARALLELIZE");
#endif
ROS_WARN("waiting for image and imu...");
registerPub(n);
ros::Subscriber sub_imu;
if(USE_IMU)
{
sub_imu = n.subscribe(IMU_TOPIC, 2000, imu_callback, ros::TransportHints().tcpNoDelay());
}
ros::Subscriber sub_feature = n.subscribe("/feature_tracker/feature", 2000, feature_callback);
ros::Subscriber sub_img0 = n.subscribe(IMAGE0_TOPIC, 100, img0_callback);
ros::Subscriber sub_img1;
if(STEREO)
{
sub_img1 = n.subscribe(IMAGE1_TOPIC, 100, img1_callback);
}
ros::Subscriber sub_restart = n.subscribe("/vins_restart", 100, restart_callback);
ros::Subscriber sub_imu_switch = n.subscribe("/vins_imu_switch", 100, imu_switch_callback);
ros::Subscriber sub_cam_switch = n.subscribe("/vins_cam_switch", 100, cam_switch_callback);
std::thread sync_thread{sync_process};
ros::spin();
return 0;
}