1、原理
读入每一帧的点云,和地图做ndt配准,得到当前帧的pose,使用这个pose作为下一帧配准的guess_pose,如果当前帧是关键帧(关键帧如何选取?- 根据与上一关键帧的距离或者每隔几帧设置为关键帧),那么更新地图并且将当前帧更新为关键帧。
2、几点需要注意的
1. 关键帧的选取
当前帧距离上一个关键帧的位移超过阈值(1m),则认为当前帧为关键帧。
// 如果当前帧和上一帧的平移超过了min_add_scan_shift_,那么更新地图,并更新previous帧,发布地图
// .否则,previous帧不变
double shift = sqrt(pow(current_pose_.x - previous_pose_.x, 2.0) + pow(current_pose_.y - previous_pose_.y, 2.0));
if (shift >= min_add_scan_shift_)
{
map_ += *transformed_scan_ptr;
previous_pose_.x = current_pose_.x;previous_pose_.y = current_pose_.y;previous_pose_.z = current_pose_.z;
previous_pose_.roll = current_pose_.roll;previous_pose_.pitch = current_pose_.pitch;previous_pose_.yaw = current_pose_.yaw;
ndt.setInputTarget(map_ptr);
sensor_msgs::PointCloud2::Ptr map_msg_ptr(new sensor_msgs::PointCloud2);
pcl::toROSMsg(*map_ptr, *map_msg_ptr);
ndt_map_pub_.publish(*map_msg_ptr);
}
2. 更新当前帧pose
ndt结束后根据输出的矩阵来更新当前帧的pose
// 配准后的输出点云
pcl::PointCloud<pcl::PointXYZI>::Ptr output_cloud(new pcl::PointCloud<pcl::PointXYZI>);
ndt.align(*output_cloud, init_guess); // 这个点云是经过下采样的,我们不使用
t_localizer = ndt.getFinalTransformation();
t_base_link = t_localizer * tf_ltob_; // 变换到第一帧的坐标系下
pcl::transformPointCloud(*scan_ptr, *transformed_scan_ptr, t_localizer);
tf::Matrix3x3 mat_b;
mat_b.setValue(static_cast<double>(t_base_link(0, 0)), static_cast<double>(t_base_link(0, 1)),
static_cast<double>(t_base_link(0, 2)), static_cast<double>(t_base_link(1, 0)),
static_cast<double>(t_base_link(1, 1)), static_cast<double>(t_base_link(1, 2)),
static_cast<double>(t_base_link(2, 0)), static_cast<double>(t_base_link(2, 1)),
static_cast<double>(t_base_link(2, 2)));
// Update current_pose_.
// 更新当前pose
current_pose_.x = t_base_link(0, 3);current_pose_.y = t_base_link(1, 3);current_pose_.z = t_base_link(2, 3);
mat_b.getRPY(current_pose_.roll, current_pose_.pitch, current_pose_.yaw, 1);//mat2rpy
transform.setOrigin(tf::Vector3(current_pose_.x, current_pose_.y, current_pose_.z));
q.setRPY(current_pose_.roll, current_pose_.pitch, current_pose_.yaw); //q from rpy
transform.setRotation(q);//trans from q
br_.sendTransform(tf::StampedTransform(transform, input->header.stamp, "map", "base_link"));
3. 设置guess_pose
第二帧和第一帧(map)之间的guess_pose是0变换
// 设置估计的pose 用来配准
Eigen::Matrix4f init_guess =
(init_translation * init_rotation_z * init_rotation_y * init_rotation_x).matrix() * tf_btol_; // 默认是0
后续的帧到地图上的guess_pose是由上一帧配准后输出的current_pose决定的
// 初始值为(0,0,0,0,0,0)
Eigen::Translation3f init_translation(current_pose_.x, current_pose_.y, current_pose_.z);
Eigen::AngleAxisf init_rotation_x(current_pose_.roll, Eigen::Vector3f::UnitX());
Eigen::AngleAxisf init_rotation_y(current_pose_.pitch, Eigen::Vector3f::UnitY());
Eigen::AngleAxisf init_rotation_z(current_pose_.yaw, Eigen::Vector3f::UnitZ());
// 设置估计的pose 用来配准
Eigen::Matrix4f init_guess =
(init_translation * init_rotation_z * init_rotation_y * init_rotation_x).matrix() * tf_btol_; // 默认是0
3、效果
视频
使用ndt配准实现简单的激光里程计
4、参考
https://github.com/rsasaki0109/ndt_mapping
5、代码
https://github.com/suyunzzz/aiimooc_lesson/tree/master/aiimooc_syz5
ndt_mapping.cpp
/*
* Copyright 2015-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Localization and mapping program using Normal Distributions Transform
Yuki KITSUKAWA
*/
#include "ndt_mapping.h"
using namespace std;
ndt_mapping::ndt_mapping()
{
// 初始化订阅者,发布者
points_sub_ = nh_.subscribe("/rslidar_points", 100000, &ndt_mapping::points_callback,this);
ndt_map_pub_ = nh_.advertise<sensor_msgs::PointCloud2>("/ndt_map", 1000);
current_pose_pub_ = nh_.advertise<geometry_msgs::PoseStamped>("/current_pose", 1000);
// Default values
nh_.param("max_iter", max_iter_, 30); // ndt最大迭代次数
nh_.param("step_size", step_size_, 0.1); // ndt步长
nh_.param("ndt_res", ndt_res_, 5.0); // ndt分辨率
nh_.param("trans_eps", trans_eps_, 0.01); // ndt容许的误差 越小越严苛
nh_.param("voxel_leaf_size", voxel_leaf_size_, 2.0); // voxel的分辨率
nh_.param("scan_rate", scan_rate_, 10.0); // scan rate
nh_.param("min_scan_range", min_scan_range_, 5.0); // min_scan_range
nh_.param("max_scan_range", max_scan_range_, 200.0); // max scan range
nh_.param("use_imu", use_imu_, false); // 不使用imu
initial_scan_loaded = 0; // 未载入地图
min_add_scan_shift_ = 1.0; // 超过1m设置为关键帧
_tf_x=0.0, _tf_y=0.0, _tf_z=0.0, _tf_roll=0.0, _tf_pitch=0.0, _tf_yaw=0.0;
std::cout << "incremental_voxel_update: " << _incremental_voxel_update << std::endl; // voxel 增量
std::cout << "(tf_x,tf_y,tf_z,tf_roll,tf_pitch,tf_yaw): (" << _tf_x << ", " << _tf_y << ", " << _tf_z << ", "
<< _tf_roll << ", " << _tf_pitch << ", " << _tf_yaw << ")" << std::endl;
Eigen::Translation3f tl_btol(_tf_x, _tf_y, _tf_z); // tl: translation base--->lidar
Eigen::AngleAxisf rot_x_btol(_tf_roll, Eigen::Vector3f::UnitX()); // rot: rotation
Eigen::AngleAxisf rot_y_btol(_tf_pitch, Eigen::Vector3f::UnitY()