autoware下ndt_mapping节点解读

利用激光雷达进行建图,首先需要得到稠密点云,然后进行体素滤波进行过滤得到包含特征的点云数据。接着利用每一帧扫描的点云地图进行ndt配准逐帧拼接,最后能够得到激光雷达扫描路径下的整体点云地图。

ndt_mapping

首先看ndt_matching的launch文件,设定相关参数,并且运行ndt_mapping以及queue_counter节点。

<!-- -->
<launch>
  <arg name="method_type" default="0" /> <!-- pcl_generic=0, pcl_anh=1, pcl_anh_gpu=2, pcl_openmp=3 -->
  <arg name="use_odom" default="false" />
  <arg name="use_imu" default="false" />
  <arg name="imu_upside_down" default="false" />
  <arg name="imu_topic" default="/imu_raw" />
  <arg name="incremental_voxel_update" default="false" />
  <!-- rosrun lidar_localizer ndt_mapping  -->
  <node pkg="lidar_localizer" type="queue_counter" name="queue_counter" output="screen"/>
  <node pkg="lidar_localizer" type="ndt_mapping" name="ndt_mapping" output="screen">
    <param name="method_type" value="$(arg method_type)" />
    <param name="use_imu" value="$(arg use_imu)" />
    <param name="use_odom" value="$(arg use_odom)" />
    <param name="imu_upside_down" value="$(arg imu_upside_down)" />
    <param name="imu_topic" value="$(arg imu_topic)" />
    <param name="incremental_voxel_update" value="$(arg incremental_voxel_update)" />
  </node>
</launch>

2:然后关注main函数部分
main函数首先对设定的各种位置进行了初始化,previous_pose表示前一帧点云图车辆的位置,current_pose表示当前帧点云图车辆的位置,ndt_pose表示使用ndt匹配算法,guess_pose表示提供的初始位置,diff表示前后两帧的位置与角度变化,offset表示位置与角度的偏差矫正。

previous_pose.x = 0.0;
  previous_pose.y = 0.0;
  previous_pose.z = 0.0;
  previous_pose.roll = 0.0;
  previous_pose.pitch = 0.0;
  previous_pose.yaw = 0.0;

  ndt_pose.x = 0.0;
  ndt_pose.y = 0.0;
  ndt_pose.z = 0.0;
  ndt_pose.roll = 0.0;
  ndt_pose.pitch = 0.0;
  ndt_pose.yaw = 0.0;

  current_pose.x = 0.0;
  current_pose.y = 0.0;
  current_pose.z = 0.0;
  current_pose.roll = 0.0;
  current_pose.pitch = 0.0;
  current_pose.yaw = 0.0;

  current_pose_imu.x = 0.0;
  current_pose_imu.y = 0.0;
  current_pose_imu.z = 0.0;
  current_pose_imu.roll = 0.0;
  current_pose_imu.pitch = 0.0;
  current_pose_imu.yaw = 0.0;

  guess_pose.x = 0.0;
  guess_pose.y = 0.0;
  guess_pose.z = 0.0;
  guess_pose.roll = 0.0;
  guess_pose.pitch = 0.0;
  guess_pose.yaw = 0.0;

  added_pose.x = 0.0;
  added_pose.y = 0.0;
  added_pose.z = 0.0;
  added_pose.roll = 0.0;
  added_pose.pitch = 0.0;
  added_pose.yaw = 0.0;

  diff_x = 0.0;
  diff_y = 0.0;
  diff_z = 0.0;
  diff_yaw = 0.0;

  offset_imu_x = 0.0;
  offset_imu_y = 0.0;
  offset_imu_z = 0.0;
  offset_imu_roll = 0.0;
  offset_imu_pitch = 0.0;
  offset_imu_yaw = 0.0;

  offset_odom_x = 0.0;
  offset_odom_y = 0.0;
  offset_odom_z = 0.0;
  offset_odom_roll = 0.0;
  offset_odom_pitch = 0.0;
  offset_odom_yaw = 0.0;

  offset_imu_odom_x = 0.0;
  offset_imu_odom_y = 0.0;
  offset_imu_odom_z = 0.0;
  offset_imu_odom_roll = 0.0;
  offset_imu_odom_pitch = 0.0;
  offset_imu_odom_yaw = 0.0;

通过/ndt_map话题将点云建图发布出来,以及通过回调函数订阅建图,里程计以及imu信息。

ndt_map_pub = nh.advertise<sensor_msgs::PointCloud2>("/ndt_map", 1000);//
  current_pose_pub = nh.advertise<geometry_msgs::PoseStamped>("/current_pose", 1000);
  ros::Subscriber param_sub = nh.subscribe("config/ndt_mapping", 10, param_callback);
  ros::Subscriber output_sub = nh.subscribe("config/ndt_mapping_output", 10, output_callback);
  ros::Subscriber points_sub = nh.subscribe("points_raw", 100000, points_callback);
  ros::Subscriber odom_sub = nh.subscribe("/vehicle/odom", 100000, odom_callback);
  ros::Subscriber imu_sub = nh.subscribe(_imu_topic, 100000, imu_callback);

定义了结构体表示位置,枚举使用的方法类型,方法类型可以在使用autoware的时候进行选择。

struct pose//定义结构体用来表示位置
{
  double x;
  double y;
  double z;
  double roll;
  double pitch;
  double yaw;
};
enum class MethodType//计算方法类型
{
  PCL_GENERIC = 0,
  PCL_ANH = 1,
  PCL_ANH_GPU = 2,
  PCL_OPENMP = 3,
};

在这里插入图片描述
参数回调函数利用ConfigNDTMapping文件进行参数设置,设置参数有ndt分辨率,步长,最大迭代次数,体素叶大小,最大最小扫描范围(用来过滤距离车辆较近以及较远的点云数据)

static void param_callback(const autoware_config_msgs::ConfigNDTMapping::ConstPtr& input)
{
  ndt_res = input->resolution;
  step_size = input->step_size;
  trans_eps = input->trans_epsilon;
  max_iter = input->max_iterations;
  voxel_leaf_size = input->leaf_size;
  min_scan_range = input->min_scan_range;
  max_scan_range = input->max_scan_range;
  min_add_scan_shift = input->min_add_scan_shift;
  
  std::cout << "param_callback" << std::endl;
  std::cout << "ndt_res: " << ndt_res << std::endl;
  std::cout << "step_size: " << step_size << std::endl;
  std::cout << "trans_epsilon: " << trans_eps << std::endl;
  std::cout << "max_iter: " << max_iter << std::endl;
  std::cout << "voxel_leaf_size: " << voxel_leaf_size << std::endl;
  std::cout << "min_scan_range: " << min_scan_range << std::endl;
  std::cout << "max_scan_range: " << max_scan_range << std::endl;
  std::cout << "min_add_scan_shift: " << min_add_scan_shift << std::endl;
}

output_callback用来输出点云建图,首先从参数配置文件中获取过滤分辨率参数,然后声明点云原始地图以及过滤处理地图的指针,然后应用体素滤波对原始点云图进行过滤,如果判断滤波分辨率为0,则输出原始点云尺寸,否则进行滤波之后,将处理之后的点云图像转换为ros类型的点云消息,并通过ndt_map_pub发布该点云消息

static void output_callback(const autoware_config_msgs::ConfigNDTMappingOutput::ConstPtr& input)//
{
  double filter_res = input->filter_res;
  std::string filename = input->filename;
  std::cout << "output_callback" << std::endl;
  std::cout << "filter_res: " << filter_res << std::endl;
  std::cout << "filename: " << filename << std::endl;

  pcl::PointCloud<pcl::PointXYZI>::Ptr map_ptr(new pcl::PointCloud<pcl::PointXYZI>(map));//声明点云原始地图以及过滤处理地图的指针
  pcl::PointCloud<pcl::PointXYZI>::Ptr map_filtered(new pcl::PointCloud<pcl::PointXYZI>());
  map_ptr->header.frame_id = "map";
  map_filtered->header.frame_id = "map";
  sensor_msgs::PointCloud2::Ptr map_msg_ptr(new sensor_msgs::PointCloud2);

  // Apply voxelgrid filter
  if (filter_res == 0.0)
  {
    std::cout << "Original: " << map_ptr->points.size() << " points." << std::endl;//如果分辨率为零,则输出原始点云地图的尺寸
    pcl::toROSMsg(*map_ptr, *map_msg_ptr);
  }
  else
  {
    pcl::VoxelGrid<pcl::PointXYZI> voxel_grid_filter;//声明体素滤波
    voxel_grid_filter.setLeafSize(filter_res, filter_res, filter_res);//设置体素滤波网格大小
    voxel_grid_filter.setInputCloud(map_ptr);//价格map作为输入地图
    voxel_grid_filter.filter(*map_filtered);//进行体素下采样,并保存结果至map_filtered
    std::cout << "Original: " << map_ptr->points.size() << " points." << std::endl;
    std::cout << "Filtered: " << map_filtered->points.size() << " points." << std::endl;//输出过滤后的点云尺寸
    pcl::toROSMsg(*map_filtered, *map_msg_ptr);
  }

  ndt_map_pub.publish(*map_msg_ptr);

  // Writing Point Cloud data to PCD file 将点云数据写入pcd文件
  if (filter_res == 0.0)
  {
    pcl::io::savePCDFileASCII(filename, *map_ptr);//如果未进行体素过滤,则将初始原地图写入pcd文件
    std::cout << "Saved " << map_ptr->points.size() << " data points to " << filename << "." << std::endl;
  }
  else
  {
    pcl::io::savePCDFileASCII(filename, *map_filtered);//否则将过滤后的地图写入pcd文件
    std::cout << "Saved " << map_filtered->points.size() << " data points to " << filename << "." << std::endl;
  }
}

利用ndt进行地图配准的时候,每一次都要获取初始位置,获取初始位置的方法有1:利用初始化位置赋予初值;2:利用gps进行位置的更新来获取初值,3:利用imu惯性导航数据处理来获取初值;4:利用odm里程计来获取初值;以及利用imu与odm组合数据获取初值。

利用里程计来计算ndt配准时的初始值,首先获取两帧的时间差,利用微小时间内角速度线速度变化微小原理来计算微小时间间隔内的里程计角度变化量,利用前一帧位置旋转加上变化角度,更新当前odm位置的角度。然后计算odom的偏差距离和偏差角度,最后对初始位置进行修正=前一帧位置+偏差位置,得到利用odom计算的初始位置。

static void odom_calc(ros::Time current_time)
{
  static ros::Time previous_time = current_time;
  double diff_time = (current_time - previous_time).toSec();//获取前后两帧时间差

  double diff_odom_roll = odom.twist.twist.angular.x * diff_time;//计算两帧时间间隔内的里程计旋转角度
  double diff_odom_pitch = odom.twist.twist.angular.y * diff_time;
  double diff_odom_yaw = odom.twist.twist.angular.z * diff_time;

  current_pose_odom.roll += diff_odom_roll;//更新当前odm位置的角度,前一帧位置旋转加上变化角度
  current_pose_odom.pitch += diff_odom_pitch;
  current_pose_odom.yaw += diff_odom_yaw;

  double diff_distance = odom.twist.twist.linear.x * diff_time;//在x方向的变化距离,然后计算由于车身不稳定造成的计算偏差,具体计算原理不太明白
  offset_odom_x += diff_distance * cos(-current_pose_odom.pitch) * cos(current_pose_odom.yaw);
  offset_odom_y += diff_distance * cos(-current_pose_odom.pitch) * sin(current_pose_odom.yaw);
  offset_odom_z += diff_distance * sin(-current_pose_odom.pitch);

  offset_odom_roll += diff_odom_roll;
  offset_odom_pitch += diff_odom_pitch;
  offset_odom_yaw += diff_odom_yaw;

  guess_pose_odom.x = previous_pose.x + offset_odom_x;//对初始位置进行修正=前一帧位置+偏差位置
  guess_pose_odom.y = previous_pose.y + offset_odom_y;
  guess_pose_odom.z = previous_pose.z + offset_odom_z;
  guess_pose_odom.roll = previous_pose.roll + offset_odom_roll;
  guess_pose_odom.pitch = previous_pose.pitch + offset_odom_pitch;
  guess_pose_odom.yaw = previous_pose.yaw + offset_odom_yaw;
  previous_time = current_time;
}

利用imu来计算初始位置。

static void imu_calc(ros::Time current_time)
{
  static ros::Time previous_time = current_time;
  double diff_time = (current_time - previous_time).toSec();

  double diff_imu_roll = imu.angular_velocity.x * diff_time;
  double diff_imu_pitch = imu.angular_velocity.y * diff_time;
  double diff_imu_yaw = imu.angular_velocity.z * diff_time;

  current_pose_imu.roll += diff_imu_roll;
  current_pose_imu.pitch += diff_imu_pitch;
  current_pose_imu.yaw += diff_imu_yaw;

  double accX1 = imu.linear_acceleration.x;
  double accY1 = std::cos(current_pose_imu.roll) * imu.linear_acceleration.y -
                 std::sin(current_pose_imu.roll) * imu.linear_acceleration.z;
  double accZ1 = std::sin(current_pose_imu.roll) * imu.linear_acceleration.y +
                 std::cos(current_pose_imu.roll) * imu.linear_acceleration.z;

  double accX2 = std::sin(current_pose_imu.pitch) * accZ1 + std::cos(current_pose_imu.pitch) * accX1;
  double accY2 = accY1;
  double accZ2 = std::cos(current_pose_imu.pitch) * accZ1 - std::sin(current_pose_imu.pitch) * accX1;

  double accX = std::cos(current_pose_imu.yaw) * accX2 - std::sin(current_pose_imu.yaw) * accY2;
  double accY = std::sin(current_pose_imu.yaw) * accX2 + std::cos(current_pose_imu.yaw) * accY2;
  double accZ = accZ2;

  offset_imu_x += current_velocity_imu_x * diff_time + accX * diff_time * diff_time / 2.0;
  offset_imu_y += current_velocity_imu_y * diff_time + accY * diff_time * diff_time / 2.0;
  offset_imu_z += current_velocity_imu_z * diff_time + accZ * diff_time * diff_time / 2.0;

  current_velocity_imu_x += accX * diff_time;
  current_velocity_imu_y += accY * diff_time;
  current_velocity_imu_z += accZ * diff_time;

  offset_imu_roll += diff_imu_roll;
  offset_imu_pitch += diff_imu_pitch;
  offset_imu_yaw += diff_imu_yaw;

  guess_pose_imu.x = previous_pose.x + offset_imu_x;
  guess_pose_imu.y = previous_pose.y + offset_imu_y;
  guess_pose_imu.z = previous_pose.z + offset_imu_z;
  guess_pose_imu.roll = previous_pose.roll + offset_imu_roll;
  guess_pose_imu.pitch = previous_pose.pitch + offset_imu_pitch;
  guess_pose_imu.yaw = previous_pose.yaw + offset_imu_yaw;

  previous_time = current_time;
}

利用里程计与imu联合计算当前初始位置。

/
static void imu_odom_calc(ros::Time current_time)
{
  static ros::Time previous_time = current_time;
  double diff_time = (current_time - previous_time).toSec();

  double diff_imu_roll = imu.angular_velocity.x * diff_time;
  double diff_imu_pitch = imu.angular_velocity.y * diff_time;
  double diff_imu_yaw = imu.angular_velocity.z * diff_time;

  current_pose_imu_odom.roll += diff_imu_roll;
  current_pose_imu_odom.pitch += diff_imu_pitch;
  current_pose_imu_odom.yaw += diff_imu_yaw;

  double diff_distance = odom.twist.twist.linear.x * diff_time;
  offset_imu_odom_x += diff_distance * cos(-current_pose_imu_odom.pitch) * cos(current_pose_imu_odom.yaw);
  offset_imu_odom_y += diff_distance * cos(-current_pose_imu_odom.pitch) * sin(current_pose_imu_odom.yaw);
  offset_imu_odom_z += diff_distance * sin(-current_pose_imu_odom.pitch);

  offset_imu_odom_roll += diff_imu_roll;
  offset_imu_odom_pitch += diff_imu_pitch;
  offset_imu_odom_yaw += diff_imu_yaw;

  guess_pose_imu_odom.x = previous_pose.x + offset_imu_odom_x;
  guess_pose_imu_odom.y = previous_pose.y + offset_imu_odom_y;
  guess_pose_imu_odom.z = previous_pose.z + offset_imu_odom_z;
  guess_pose_imu_odom.roll = previous_pose.roll + offset_imu_odom_roll;
  guess_pose_imu_odom.pitch = previous_pose.pitch + offset_imu_odom_pitch;
  guess_pose_imu_odom.yaw = previous_pose.yaw + offset_imu_odom_yaw;

  previous_time = current_time;
}

imu回调函数主要是在接收到imu消息之后进行处理,首先获取此时imu的时间,然后计算前后两次消息的时间偏差,接着通过imu消息来获取此时的imu的rpy角度,并将角度化为弧度,保持180度内,获取imu在x方向上的线性加速度,然后计算imu的瞬时角速度,当diff_time=0时,将角速度归零。利用imu计算位置初值,更新前一帧时间。

static void imu_callback(const sensor_msgs::Imu::Ptr& input)//回调函数参数为接收到的imu消息
{
  // std::cout << __func__ << std::endl;

  if (_imu_upside_down)//???
    imuUpsideDown(input);

  const ros::Time current_time = input->header.stamp;//获取imu此时的时间
  static ros::Time previous_time = current_time;
  const double diff_time = (current_time - previous_time).toSec();//计算前后两次消息的时间偏差

  double imu_roll, imu_pitch, imu_yaw;
  tf::Quaternion imu_orientation;//imu旋转四元数
  tf::quaternionMsgToTF(input->orientation, imu_orientation);//imu四元数消息转化为四元数存入imu_orientation
  tf::Matrix3x3(imu_orientation).getRPY(imu_roll, imu_pitch, imu_yaw);//获取imu此时的rpy旋转角度

  imu_roll = wrapToPmPi(imu_roll);//将角度化为弧度
  imu_pitch = wrapToPmPi(imu_pitch);
  imu_yaw = wrapToPmPi(imu_yaw);

  static double previous_imu_roll = imu_roll, previous_imu_pitch = imu_pitch, previous_imu_yaw = imu_yaw;//
  const double diff_imu_roll = calcDiffForRadian(imu_roll, previous_imu_roll);
  const double diff_imu_pitch = calcDiffForRadian(imu_pitch, previous_imu_pitch);
  const double diff_imu_yaw = calcDiffForRadian(imu_yaw, previous_imu_yaw);

  imu.header = input->header;
  imu.linear_acceleration.x = input->linear_acceleration.x;//获取imu在x方向上的线性加速度
  // imu.linear_acceleration.y = input->linear_acceleration.y;
  // imu.linear_acceleration.z = input->linear_acceleration.z;
  imu.linear_acceleration.y = 0;
  imu.linear_acceleration.z = 0;

  if (diff_time != 0)
  {
    imu.angular_velocity.x = diff_imu_roll / diff_time;//计算imu的瞬时角速度
    imu.angular_velocity.y = diff_imu_pitch / diff_time;
    imu.angular_velocity.z = diff_imu_yaw / diff_time;
  }
  else
  {
    imu.angular_velocity.x = 0;
    imu.angular_velocity.y = 0;
    imu.angular_velocity.z = 0;
  }

  imu_calc(input->header.stamp);//利用imu计算位置初值

  previous_time = current_time;//更新前一帧时间
  previous_imu_roll = imu_roll;
  previous_imu_pitch = imu_pitch;
  previous_imu_yaw = imu_yaw;
}

然后是具体接收到点云数据之后的points_callback回调函数,当接受到点云消息之后会进入到回调函数来处理点云数据。

void points_callback(const sensor_msgs::PointCloud2::ConstPtr& input)

首先声明两个点云对象tmp与scan,并且实例化点云指针filtered_scan_ptr,transformed_scan_ptr分别用来表示过滤点云与转换点云。然后获取当前点云扫描时间,并且将当前点云pcl消息类型转化为ros类型,并存入tmp,然后对点云中每一个激光点的数据进行处理,去除不符合距离范围内的点云数据。主要过滤掉离车辆过近与过远的点

double r;//点云到车的距离
  pcl::PointXYZI p;
  pcl::PointCloud<pcl::PointXYZI> tmp, scan;//声明点云对象tmp, scan
  pcl::PointCloud<pcl::PointXYZI>::Ptr filtered_scan_ptr(new pcl::PointCloud<pcl::PointXYZI>());//实例化过滤点云
  pcl::PointCloud<pcl::PointXYZI>::Ptr transformed_scan_ptr(new pcl::PointCloud<pcl::PointXYZI>());//实例化转化点云
  tf::Quaternion q;

  Eigen::Matrix4f t_localizer(Eigen::Matrix4f::Identity());
  Eigen::Matrix4f t_base_link(Eigen::Matrix4f::Identity());
  static tf::TransformBroadcaster br;//声明tf发布者
  tf::Transform transform;

  current_scan_time = input->header.stamp;//获取当前点云扫描时间

  pcl::fromROSMsg(*input, tmp);//将当前点云pcl消息类型转化为ros类型,并存入tmp

  for (pcl::PointCloud<pcl::PointXYZI>::const_iterator item = tmp.begin(); item != tmp.end(); item++)//将tmp点云容器内的点进行逐一处理,去除不符合距离范围内的点云数据
  {
    p.x = (double)item->x;
    p.y = (double)item->y;
    p.z = (double)item->z;
    p.intensity = (double)item->intensity;

    r = sqrt(pow(p.x, 2.0) + pow(p.y, 2.0));//计算点与激光雷达的距离r,若小于最小距离或大于最大距离则滤除该点
    if (min_scan_range < r && r < max_scan_range)
    {
      scan.push_back(p);
    }
  }

然后将初始化点云加入至地图,若点云地图没有初始化载入,则将第一帧图像作为初始图像,然后将配准之后的图像逐帧加入map。通过tf_btol变换矩阵将原始点云进行转化。tf_btol是车辆在起始位置是不在全局地图原点时的变换矩阵。然后对原始输入点云进行体素过滤,选择不同的方法进行参数设置

pcl::PointCloud<pcl::PointXYZI>::Ptr scan_ptr(new pcl::PointCloud<pcl::PointXYZI>(scan));

  // Add initial point cloud to velodyne_map 将初始化点云加入至地图
  if (initial_scan_loaded == 0)//若点云地图没有初始化载入,则
  {
    pcl::transformPointCloud(*scan_ptr, *transformed_scan_ptr, tf_btol);//通过tf_btol变换矩阵将原始点云进行转化。tf_btol是车辆在起始位置是不在全局地图原点时的变换矩阵
    map += *transformed_scan_ptr;
    initial_scan_loaded = 1;
  }

  // Apply voxelgrid filter 对原始输入点云进行体素过滤
  pcl::VoxelGrid<pcl::PointXYZI> voxel_grid_filter;
  voxel_grid_filter.setLeafSize(voxel_leaf_size, voxel_leaf_size, voxel_leaf_size);
  voxel_grid_filter.setInputCloud(scan_ptr);
  voxel_grid_filter.filter(*filtered_scan_ptr);

  pcl::PointCloud<pcl::PointXYZI>::Ptr map_ptr(new pcl::PointCloud<pcl::PointXYZI>(map));//实例化点云对象map_ptr

  if (_method_type == MethodType::PCL_GENERIC)//进行ndt参数设置
  {
    ndt.setTransformationEpsilon(trans_eps);
    ndt.setStepSize(step_size);
    ndt.setResolution(ndt_res);
    ndt.setMaximumIterations(max_iter);
    ndt.setInputSource(filtered_scan_ptr);
  }
  else if (_method_type == MethodType::PCL_ANH)
  {
    anh_ndt.setTransformationEpsilon(trans_eps);
    anh_ndt.setStepSize(step_size);
    anh_ndt.setResolution(ndt_res);
    anh_ndt.setMaximumIterations(max_iter);
    anh_ndt.setInputSource(filtered_scan_ptr);
  }
#ifdef CUDA_FOUND
  else if (_method_type == MethodType::PCL_ANH_GPU)
  {
    anh_gpu_ndt.setTransformationEpsilon(trans_eps);
    anh_gpu_ndt.setStepSize(step_size);
    anh_gpu_ndt.setResolution(ndt_res);
    anh_gpu_ndt.setMaximumIterations(max_iter);
    anh_gpu_ndt.setInputSource(filtered_scan_ptr);
  }
#endif
#ifdef USE_PCL_OPENMP
  else if (_method_type == MethodType::PCL_OPENMP)
  {
    omp_ndt.setTransformationEpsilon(trans_eps);
    omp_ndt.setStepSize(step_size);
    omp_ndt.setResolution(ndt_res);
    omp_ndt.setMaximumIterations(max_iter);
    omp_ndt.setInputSource(filtered_scan_ptr);
  }
#endif

guess_pose是ndt配准时候的初始位置,该位置一般由前一帧位置加上微小时间段内的变化,当采用imu或odom时可以利用其进行辅助精确定位初始位置。如果未使用imu以及odom则使用原来的guess_pose

guess_pose.x = previous_pose.x + diff_x;//初始位置=前一帧位置+位置的变化
  guess_pose.y = previous_pose.y + diff_y;
  guess_pose.z = previous_pose.z + diff_z;
  guess_pose.roll = previous_pose.roll;
  guess_pose.pitch = previous_pose.pitch;
  guess_pose.yaw = previous_pose.yaw + diff_yaw;

  if (_use_imu == true && _use_odom == true)//选择使用初值计算方法
    imu_odom_calc(current_scan_time);
  if (_use_imu == true && _use_odom == false)
    imu_calc(current_scan_time);
  if (_use_imu == false && _use_odom == true)
    odom_calc(current_scan_time);

  pose guess_pose_for_ndt;
  if (_use_imu == true && _use_odom == true)
    guess_pose_for_ndt = guess_pose_imu_odom;
  else if (_use_imu == true && _use_odom == false)
    guess_pose_for_ndt = guess_pose_imu;
  else if (_use_imu == false && _use_odom == true)
    guess_pose_for_ndt = guess_pose_odom;
  else
    guess_pose_for_ndt = guess_pose;

利用不同的方法,来进行ndt配准。

if (_method_type == MethodType::PCL_GENERIC)
  {
    ndt.align(*output_cloud, init_guess);//开始ndt配准
    fitness_score = ndt.getFitnessScore();//计算适应分数
    t_localizer = ndt.getFinalTransformation();
    has_converged = ndt.hasConverged();
    final_num_iteration = ndt.getFinalNumIteration();
    transformation_probability = ndt.getTransformationProbability();
  }
  else if (_method_type == MethodType::PCL_ANH)
  {
    anh_ndt.align(init_guess);
    fitness_score = anh_ndt.getFitnessScore();
    t_localizer = anh_ndt.getFinalTransformation();
    has_converged = anh_ndt.hasConverged();
    final_num_iteration = anh_ndt.getFinalNumIteration();
  }
#ifdef CUDA_FOUND
  else if (_method_type == MethodType::PCL_ANH_GPU)
  {
    anh_gpu_ndt.align(init_guess);
    fitness_score = anh_gpu_ndt.getFitnessScore();
    t_localizer = anh_gpu_ndt.getFinalTransformation();
    has_converged = anh_gpu_ndt.hasConverged();
    final_num_iteration = anh_gpu_ndt.getFinalNumIteration();
  }
#endif
#ifdef USE_PCL_OPENMP
  else if (_method_type == MethodType::PCL_OPENMP)
  {
    omp_ndt.align(*output_cloud, init_guess);
    fitness_score = omp_ndt.getFitnessScore();
    t_localizer = omp_ndt.getFinalTransformation();
    has_converged = omp_ndt.hasConverged();
    final_num_iteration = omp_ndt.getFinalNumIteration();
  }
#endif

将ndt配准的位置作为当前位置,并且以当前位置设置坐标系,并发布坐标变换信息。

  current_pose.x = ndt_pose.x;//将ndt配准之后的位置作为当前位置
  current_pose.y = ndt_pose.y;
  current_pose.z = ndt_pose.z;
  current_pose.roll = ndt_pose.roll;
  current_pose.pitch = ndt_pose.pitch;
  current_pose.yaw = ndt_pose.yaw;

  transform.setOrigin(tf::Vector3(current_pose.x, current_pose.y, current_pose.z));//以当前位置作为坐标原点
  q.setRPY(current_pose.roll, current_pose.pitch, current_pose.yaw);//根据当前位置旋转角度rpy,设置旋转四元数q
  transform.setRotation(q);//利用q来设置旋转

  br.sendTransform(tf::StampedTransform(transform, current_scan_time, "map", "base_link"));//发布坐标变换信息

总结:理解程度还比较浅显,还需进一步阅读。

  • 15
    点赞
  • 68
    收藏
    觉得还不错? 一键收藏
  • 5
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值