LVI-SAM 代码学习-1

激光里程计:

激光里程计中主要是用lio-sam的代码,修改较少

视觉里程计:

特征提取和跟踪:

feature_tracker_node.cpp中,加入了激光数据的callback

  ros::Subscriber sub_img = n.subscribe(IMAGE_TOPIC, 5, img_callback);
  ros::Subscriber sub_lidar = n.subscribe(POINT_CLOUD_TOPIC, 5, lidar_callback);

在lidar_callback中

0:通过tf监听“vins_world”和“vins_body_ros”之间的变换,转换为transNow

  static tf::TransformListener listener;
  static tf::StampedTransform transform;
  try {
    listener.waitForTransform("vins_world", "vins_body_ros",
                              laser_msg->header.stamp, ros::Duration(0.01));
    listener.lookupTransform("vins_world", "vins_body_ros",
                             laser_msg->header.stamp, transform);
  } catch (tf::TransformException ex) {
    // ROS_ERROR("lidar no tf");
    return;
  }

  double xCur, yCur, zCur, rollCur, pitchCur, yawCur;
  xCur = transform.getOrigin().x();
  yCur = transform.getOrigin().y();
  zCur = transform.getOrigin().z();
  tf::Matrix3x3 m(transform.getRotation());
  m.getRPY(rollCur, pitchCur, yawCur);
  Eigen::Affine3f transNow =
      pcl::getTransformation(xCur, yCur, zCur, rollCur, pitchCur, yawCur);

1:把激光点云数据转换成pcl格式的数据

  // 1. convert laser cloud message to pcl
  pcl::PointCloud<PointType>::Ptr laser_cloud_in(
      new pcl::PointCloud<PointType>());
  pcl::fromROSMsg(*laser_msg, *laser_cloud_in);

2:降采样接收到的激光数据


  // 2. downsample new cloud (save memory)
  pcl::PointCloud<PointType>::Ptr laser_cloud_in_ds(
      new pcl::PointCloud<PointType>());
  static pcl::VoxelGrid<PointType> downSizeFilter;
  downSizeFilter.setLeafSize(0.2, 0.2, 0.2);
  downSizeFilter.setInputCloud(laser_cloud_in);
  downSizeFilter.filter(*laser_cloud_in_ds);
  *laser_cloud_in = *laser_cloud_in_ds;

3:过滤出在相机视野中的点云

// 3. filter lidar points (only keep points in camera view)
pcl::PointCloud<PointType>::Ptr laser_cloud_in_filter(
    new pcl::PointCloud<PointType>());
for (int i = 0; i < (int)laser_cloud_in->size(); ++i) {
  PointType p = laser_cloud_in->points[i];
  if (p.x >= 0 && abs(p.y / p.x) <= 10 && abs(p.z / p.x) <= 10)
    laser_cloud_in_filter->push_back(p);
}
*laser_cloud_in = *laser_cloud_in_filter;

4:坐标系转换,把点云转换到相机坐标系下

// TODO: transform to IMU body frame
// 4. offset T_lidar -> T_camera
pcl::PointCloud<PointType>::Ptr laser_cloud_offset(
    new pcl::PointCloud<PointType>());
Eigen::Affine3f transOffset =
    pcl::getTransformation(L_C_TX, L_C_TY, L_C_TZ, L_C_RX, L_C_RY, L_C_RZ);
pcl::transformPointCloud(*laser_cloud_in, *laser_cloud_offset, transOffset);
*laser_cloud_in = *laser_cloud_offset;

5:在通过之前监听tf得到的tranNow,把点云转换到vins_world坐标系下

  // 5. transform new cloud into global odom frame
  pcl::PointCloud<PointType>::Ptr laser_cloud_global(
      new pcl::PointCloud<PointType>());
  pcl::transformPointCloud(*laser_cloud_in, *laser_cloud_global, transNow);

6,7:添加新的点云,并且删除时间间隔过大的点云

// 6. save new cloud
double timeScanCur = laser_msg->header.stamp.toSec();
cloudQueue.push_back(*laser_cloud_global);
timeQueue.push_back(timeScanCur);
// 7. pop old cloud
while (!timeQueue.empty()) {
  if (timeScanCur - timeQueue.front() > 5.0) {
    cloudQueue.pop_front();
    timeQueue.pop_front();
  } else {
    break;
  }
}

8,9:把cloudQueue中的点云拼接成一个点云,并且对拼接后的点云进行降采样

std::lock_guard<std::mutex> lock(mtx_lidar);
// 8. fuse global cloud
depthCloud->clear();
for (int i = 0; i < (int)cloudQueue.size(); ++i) *depthCloud += cloudQueue[i];

// 9. downsample global cloud
pcl::PointCloud<PointType>::Ptr depthCloudDS(
    new pcl::PointCloud<PointType>());
downSizeFilter.setLeafSize(0.2, 0.2, 0.2);
downSizeFilter.setInputCloud(depthCloud);
downSizeFilter.filter(*depthCloudDS);
*depthCloud = *depthCloudDS;

在img_callback中

光流跟踪和vins几乎没有区别

double cur_img_time = img_msg->header.stamp.toSec();

//判断是否是第一帧
if (first_image_flag) {
  first_image_flag = false;
  first_image_time = cur_img_time;
  last_image_time = cur_img_time;
  return;
}
// detect unstable camera stream
// 通过时间间隔判断相机数据流是否稳定,有问题则restart
if (cur_img_time - last_image_time > 1.0 || cur_img_time < last_image_time) {
  ROS_WARN("image discontinue! reset the feature tracker!");
  first_image_flag = true;
  last_image_time = 0;
  pub_count = 1;
  std_msgs::Bool restart_flag;
  restart_flag.data = true;
  pub_restart.publish(restart_flag);  //PROJECT_NAME + "/vins/feature/restart"
  return;
}
last_image_time = cur_img_time;
// frequency control
// 发布频率控制
// 并不是每读入一帧图像,就要发布特征点
// 判断间隔时间内的发布次数
if (round(1.0 * pub_count / (cur_img_time - first_image_time)) <= FREQ) {
  PUB_THIS_FRAME = true;
  // reset the frequency control
  if (abs(1.0 * pub_count / (cur_img_time - first_image_time) - FREQ) <
      0.01 * FREQ) {
    first_image_time = cur_img_time;
    pub_count = 0;
  }
} else {
  PUB_THIS_FRAME = false;
}

cv_bridge::CvImageConstPtr ptr;
// 将图像编码8UC1转换为mono8
if (img_msg->encoding == "8UC1") {
  sensor_msgs::Image img;
  img.header = img_msg->header;
  img.height = img_msg->height;
  img.width = img_msg->width;
  img.is_bigendian = img_msg->is_bigendian;
  img.step = img_msg->step;
  img.data = img_msg->data;
  img.encoding = "mono8";
  ptr = cv_bridge::toCvCopy(img, sensor_msgs::image_encodings::MONO8);
} else
  ptr = cv_bridge::toCvCopy(img_msg, sensor_msgs::image_encodings::MONO8);

cv::Mat show_img = ptr->image;
TicToc t_r;
for (int i = 0; i < NUM_OF_CAM; i++) {
  ROS_DEBUG("processing camera %d", i);
  // readImage()函数读取图像数据进行处理
  // 单目使用readImage 函数,会用光流跟踪上一帧
  if (i != 1 || !STEREO_TRACK)
    trackerData[i].readImage(ptr->image.rowRange(ROW * i, ROW * (i + 1)),
                             cur_img_time);
  else {
    if (EQUALIZE) {
      cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE();
      clahe->apply(ptr->image.rowRange(ROW * i, ROW * (i + 1)),
                   trackerData[i].cur_img);
    } else
      trackerData[i].cur_img = ptr->image.rowRange(ROW * i, ROW * (i + 1));
  }

#if SHOW_UNDISTORTION
  trackerData[i].showUndistortion("undistrotion_" + std::to_string(i));
#endif
}

// 更新全局ID
for (unsigned int i = 0;; i++) {
  bool completed = false;
  for (int j = 0; j < NUM_OF_CAM; j++)
    if (j != 1 || !STEREO_TRACK) completed |= trackerData[j].updateID(i);
  if (!completed) break;
}

在发布过程中,生成feature_points的过程也和vins的过程相同


  if (PUB_THIS_FRAME) {
    pub_count++;
    sensor_msgs::PointCloudPtr feature_points(new sensor_msgs::PointCloud);
    sensor_msgs::ChannelFloat32 id_of_point;
    sensor_msgs::ChannelFloat32 u_of_point;  // 像素坐标x
    sensor_msgs::ChannelFloat32 v_of_point;  // 像素坐标y
    sensor_msgs::ChannelFloat32 velocity_x_of_point;
    sensor_msgs::ChannelFloat32 velocity_y_of_point;

    feature_points->header.stamp = img_msg->header.stamp;
    feature_points->header.frame_id = "vins_body";

    vector<set<int>> hash_ids(NUM_OF_CAM);
    for (int i = 0; i < NUM_OF_CAM; i++) {
      auto &un_pts = trackerData[i].cur_un_pts;  // 归一化坐标
      auto &cur_pts = trackerData[i].cur_pts;    // 像素坐标
      auto &ids = trackerData[i].ids;
      auto &pts_velocity = trackerData[i].pts_velocity;
      for (unsigned int j = 0; j < ids.size(); j++) {
        if (trackerData[i].track_cnt[j] > 1) {  // 只发布追踪次数大于1的特征点
          int p_id = ids[j];
          hash_ids[i].insert(p_id);
          geometry_msgs::Point32 p;
          p.x = un_pts[j].x;
          p.y = un_pts[j].y;
          p.z = 1;

          feature_points->points.push_back(p);
          id_of_point.values.push_back(p_id * NUM_OF_CAM + i);
          u_of_point.values.push_back(cur_pts[j].x);
          v_of_point.values.push_back(cur_pts[j].y);
          velocity_x_of_point.values.push_back(pts_velocity[j].x);
          velocity_y_of_point.values.push_back(pts_velocity[j].y);
        }
      }
    }

    feature_points->channels.push_back(id_of_point);
    feature_points->channels.push_back(u_of_point);
    feature_points->channels.push_back(v_of_point);
    feature_points->channels.push_back(velocity_x_of_point);
    feature_points->channels.push_back(velocity_y_of_point);

LVI-SAM中在生成视觉特征的同时也加入了特征点的点云数据

    // get feature depth from lidar point cloud
    pcl::PointCloud<PointType>::Ptr depth_cloud_temp(
        new pcl::PointCloud<PointType>());
    mtx_lidar.lock();
    *depth_cloud_temp = *depthCloud;
    mtx_lidar.unlock();

    sensor_msgs::ChannelFloat32 depth_of_points = depthRegister->get_depth(
        img_msg->header.stamp, show_img, depth_cloud_temp,
        trackerData[0].m_camera, feature_points->points);
    feature_points->channels.push_back(depth_of_points);

其中depthRegister->get_depth函数是获取特征点激光深度的核心函数

depthRegister->get_depth:

0.1, 0.2:初始化返回,内容长度为特征点的个数。判断传入的激光点云个数不为0

    // 0.1 initialize depth for return
    sensor_msgs::ChannelFloat32 depth_of_point;
    depth_of_point.name = "depth";
    depth_of_point.values.resize(features_2d.size(), -1);

    // 0.2  check if depthCloud available
    if (depthCloud->size() == 0) return depth_of_point;

0.3:监听tf,获取图像输入时刻的"vins_world","vins_body_ros"之前的坐标系转换

// 0.3 look up transform at current image time
try {
  listener.waitForTransform("vins_world", "vins_body_ros", stamp_cur,
                            ros::Duration(0.01));
  listener.lookupTransform("vins_world", "vins_body_ros", stamp_cur,
                           transform);
} catch (tf::TransformException ex) {
  // ROS_ERROR("image no tf");
  return depth_of_point;
}

double xCur, yCur, zCur, rollCur, pitchCur, yawCur;
xCur = transform.getOrigin().x();
yCur = transform.getOrigin().y();
zCur = transform.getOrigin().z();
tf::Matrix3x3 m(transform.getRotation());
m.getRPY(rollCur, pitchCur, yawCur);
Eigen::Affine3f transNow =
  pcl::getTransformation(xCur, yCur, zCur, rollCur, pitchCur, yawCur);

0.4:通过相机的全局位姿,把激光点云转换到图像的坐标系中

// 0.4 transform cloud from global frame to camera frame
pcl::PointCloud<PointType>::Ptr depth_cloud_local(
    new pcl::PointCloud<PointType>());
pcl::transformPointCloud(*depthCloud, *depth_cloud_local,
                         transNow.inverse());

0.5:特征点投影到单位球上,把点的表示转到ros标准下

// 0.5 project undistorted normalized (z) 2d features onto a unit sphere
pcl::PointCloud<PointType>::Ptr features_3d_sphere(
    new pcl::PointCloud<PointType>());
for (int i = 0; i < (int)features_2d.size(); ++i) {
  // normalize 2d feature to a unit sphere
  Eigen::Vector3f feature_cur(features_2d[i].x, features_2d[i].y,
                              features_2d[i].z);  // z always equal to 1
  feature_cur.normalize();
  // convert to ROS standard
  PointType p;
  p.x = feature_cur(2);
  p.y = -feature_cur(0);
  p.z = -feature_cur(1);
  p.intensity = -1;  // intensity will be used to save depth
  features_3d_sphere->push_back(p);
}

3:求出激光点云中所有满足要求点的距离,保存在rangeImage中(0.5之后直接就是3了????)

// 3. project depth cloud on a range image, filter points satcked in the
// same region
float bin_res = 180.0 / (float)num_bins;  // currently only cover the space
                                          // in front of lidar (-90 ~ 90)
cv::Mat rangeImage =
    cv::Mat(num_bins, num_bins, CV_32F, cv::Scalar::all(FLT_MAX));

for (int i = 0; i < (int)depth_cloud_local->size(); ++i) {
  PointType p = depth_cloud_local->points[i];
  // filter points not in camera view
  // 过滤不在激光视野的点
  if (p.x < 0 || abs(p.y / p.x) > 10 || abs(p.z / p.x) > 10) continue;
  // find row id in range image
  float row_angle = atan2(p.z, sqrt(p.x * p.x + p.y * p.y)) * 180.0 / M_PI +
                    90.0;  // degrees, bottom -> up, 0 -> 360
  int row_id = round(row_angle / bin_res);
  // find column id in range image
  float col_angle =
      atan2(p.x, p.y) * 180.0 / M_PI;  // degrees, left -> right, 0 -> 360
  int col_id = round(col_angle / bin_res);
  // id may be out of boundary
  if (row_id < 0 || row_id >= num_bins || col_id < 0 || col_id >= num_bins)
    continue;
  // only keep points that's closer
  float dist = pointDistance(p);
  if (dist < rangeImage.at<float>(row_id, col_id)) {
    rangeImage.at<float>(row_id, col_id) = dist;
    pointsArray[row_id][col_id] = p;
  }
}

4:发布处理后的激光点云(应该是为了rviz显示)

// 4. extract downsampled depth cloud from range image
pcl::PointCloud<PointType>::Ptr depth_cloud_local_filter2(
    new pcl::PointCloud<PointType>());
for (int i = 0; i < num_bins; ++i) {
  for (int j = 0; j < num_bins; ++j) {
    if (rangeImage.at<float>(i, j) != FLT_MAX)
      depth_cloud_local_filter2->push_back(pointsArray[i][j]);
  }
}
*depth_cloud_local = *depth_cloud_local_filter2;
publishCloud(&pub_depth_cloud, depth_cloud_local, stamp_cur,
             "vins_body_ros");

5:过滤后的激光点云投影在单位球上,如果过滤后点云过少就不进行后续处理

// 5. project depth cloud onto a unit sphere
pcl::PointCloud<PointType>::Ptr depth_cloud_unit_sphere(
    new pcl::PointCloud<PointType>());
for (int i = 0; i < (int)depth_cloud_local->size(); ++i) {
  PointType p = depth_cloud_local->points[i];
  float range = pointDistance(p);
  p.x /= range;
  p.y /= range;
  p.z /= range;
  p.intensity = range;
  depth_cloud_unit_sphere->push_back(p);
}
if (depth_cloud_unit_sphere->size() < 10) return depth_of_point;

6:把处理后的激光点云加入KD-tree中

// 6. create a kd-tree using the spherical depth cloud
pcl::KdTreeFLANN<PointType>::Ptr kdtree(new pcl::KdTreeFLANN<PointType>());
kdtree->setInputCloud(depth_cloud_unit_sphere);

7:计算特征点的深度。在单位球上找到3个最近的激光点云,并且最远距离小于dist_sq_threshold,使用两个向量叉积计算出平面的法向量,然后求解点到面的距离s

// 7. find the feature depth using kd-tree
vector<int> pointSearchInd;
vector<float> pointSearchSqDis;
float dist_sq_threshold = pow(sin(bin_res / 180.0 * M_PI) * 5.0, 2);
for (int i = 0; i < (int)features_3d_sphere->size(); ++i) {
  kdtree->nearestKSearch(features_3d_sphere->points[i], 3, pointSearchInd,
                         pointSearchSqDis);
  if (pointSearchInd.size() == 3 &&
      pointSearchSqDis[2] < dist_sq_threshold) {
    float r1 = depth_cloud_unit_sphere->points[pointSearchInd[0]].intensity;
    Eigen::Vector3f A(
        depth_cloud_unit_sphere->points[pointSearchInd[0]].x * r1,
        depth_cloud_unit_sphere->points[pointSearchInd[0]].y * r1,
        depth_cloud_unit_sphere->points[pointSearchInd[0]].z * r1);

    float r2 = depth_cloud_unit_sphere->points[pointSearchInd[1]].intensity;
    Eigen::Vector3f B(
        depth_cloud_unit_sphere->points[pointSearchInd[1]].x * r2,
        depth_cloud_unit_sphere->points[pointSearchInd[1]].y * r2,
        depth_cloud_unit_sphere->points[pointSearchInd[1]].z * r2);

    float r3 = depth_cloud_unit_sphere->points[pointSearchInd[2]].intensity;
    Eigen::Vector3f C(
        depth_cloud_unit_sphere->points[pointSearchInd[2]].x * r3,
        depth_cloud_unit_sphere->points[pointSearchInd[2]].y * r3,
        depth_cloud_unit_sphere->points[pointSearchInd[2]].z * r3);

    // https://math.stackexchange.com/questions/100439/determine-where-a-vector-will-intersect-a-plane
    Eigen::Vector3f V(features_3d_sphere->points[i].x,
                      features_3d_sphere->points[i].y,
                      features_3d_sphere->points[i].z);

    Eigen::Vector3f N = (A - B).cross(B - C);  // 平面法向量
    float s = (N(0) * A(0) + N(1) * A(1) + N(2) * A(2)) /
              (N(0) * V(0) + N(1) * V(1) + N(2) * V(2));  // 点到面的距离

    float min_depth = min(r1, min(r2, r3));
    float max_depth = max(r1, max(r2, r3));
    if (max_depth - min_depth > 2 || s <= 0.5) {
      continue;
    } else if (s - max_depth > 0) {
      s = max_depth;
    } else if (s - min_depth < 0) {
      s = min_depth;
    }
    // convert feature into cartesian space if depth is available
    features_3d_sphere->points[i].x *= s;
    features_3d_sphere->points[i].y *= s;
    features_3d_sphere->points[i].z *= s;
    // the obtained depth here is for unit sphere, VINS estimator needs
    // depth for normalized feature (by value z), (lidar x = camera z)
    features_3d_sphere->points[i].intensity =
        features_3d_sphere->points[i].x;
  }
}

最后:发布匹配后的特征点点云,特征点匹配激光后只使用超过3米以外的点,没有匹配的点距离都为-1

处理完之后发布点云和图像

    // skip the first image; since no optical speed on frist image
    if (!init_pub) {
      init_pub = 1;
    } else
      pub_feature.publish(feature_points);

    // publish features in image
    if (pub_match.getNumSubscribers() != 0) {
      ptr = cv_bridge::cvtColor(ptr, sensor_msgs::image_encodings::RGB8);
      // cv::Mat stereo_img(ROW * NUM_OF_CAM, COL, CV_8UC3);
      cv::Mat stereo_img = ptr->image;

      for (int i = 0; i < NUM_OF_CAM; i++) {
        cv::Mat tmp_img = stereo_img.rowRange(i * ROW, (i + 1) * ROW);
        cv::cvtColor(show_img, tmp_img, CV_GRAY2RGB);

        for (unsigned int j = 0; j < trackerData[i].cur_pts.size(); j++) {
          if (SHOW_TRACK) {
            // track count
            double len =
                std::min(1.0, 1.0 * trackerData[i].track_cnt[j] / WINDOW_SIZE);
            cv::circle(tmp_img, trackerData[i].cur_pts[j], 4,
                       cv::Scalar(255 * (1 - len), 255 * len, 0), 4);
          } else {
            // depth
            if (j < depth_of_points.values.size()) {
              if (depth_of_points.values[j] > 0)
                cv::circle(tmp_img, trackerData[i].cur_pts[j], 4,
                           cv::Scalar(0, 255, 0), 4);
              else
                cv::circle(tmp_img, trackerData[i].cur_pts[j], 4,
                           cv::Scalar(0, 0, 255), 4);
            }
          }
        }
      }

      pub_match.publish(ptr->toImageMsg());
    }

  • 0
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值