点云建图流程
第一轮优化:使用LIO作为相邻帧运动观测,使用RTK位姿作为绝对坐标观测,优化整条轨迹并判定各关键帧的RTK有效性。
第二轮优化:把得到的信息放到统一的位姿图中进行优化,消除累计误差带来的重影,同时移除RTK失效区域。
回环检测:检测算法可以简单地使用基于欧氏距离的检测,并使用NDT或常见的配准算法计算它们的相对位姿。本例使用多分辨率的NDT匹配。
前端实现
src/ch9/frontend.cc
- 数据包解算
- 提取RTK数据
- RTK位姿观测
- 提取关键帧
void Frontend::Run() {
sad::RosbagIO rosbag_io(bag_path_, DatasetType::NCLT);
// 先提取RTK pose,注意NCLT只有平移部分
rosbag_io
.AddAutoRTKHandle([this](GNSSPtr gnss) {
gnss_.emplace(gnss->unix_time_, gnss);
return true;
})
.Go();
rosbag_io.CleanProcessFunc(); // 不再需要处理RTK
RemoveMapOrigin();
// 再运行LIO
rosbag_io
.AddAutoPointCloudHandle([&](sensor_msgs::PointCloud2::Ptr cloud) -> bool {
lio_->PCLCallBack(cloud);
ExtractKeyFrame(lio_->GetCurrentState());
return true;
})
.AddImuHandle([&](IMUPtr imu) {
lio_->IMUCallBack(imu);
return true;
})
.Go();
lio_->Finish();
// 保存运行结果
SaveKeyframes();
LOG(INFO) << "done.";
}
提取RTK数据:将ROS数据包中的RTK数据提取出来,放在RTK消息队列中,按采集时间排序。
/// 根据数据集自动处理RTK消息
RosbagIO &AddAutoRTKHandle(GNSSHandle f) {
if (dataset_type_ == DatasetType::NCLT) {
return AddHandle(nclt_rtk_topic, [f, this](const rosbag::MessageInstance &m) -> bool {
auto msg = m.instantiate<sensor_msgs::NavSatFix>();
if (msg == nullptr) {
return false;
}
GNSSPtr gnss(new GNSS(msg));
ConvertGps2UTMOnlyTrans(*gnss);
if (std::isnan(gnss->lat_lon_alt_[2])) {
// 貌似有Nan
return false;
}
return f(gnss);
});
} else {
// TODO 其他数据集的RTK转换关系
}
}
RTK位姿观测:用第一个有效的RTK数据作为地图远点,用其他RTK读数减去本原点。
void Frontend::RemoveMapOrigin() {
if (gnss_.empty()) {
return;
}
bool origin_set = false;
for (auto& p : gnss_) {
if (p.second->status_ == GpsStatusType::GNSS_FIXED_SOLUTION) {
map_origin_ = p.second->utm_pose_.translation();
origin_set = true;
LOG(INFO) << "map origin is set to " << map_origin_.transpose();
auto yaml = YAML::LoadFile(config_yaml_);
std::vector<double> ori{map_origin_[0], map_origin_[1], map_origin_[2]};
yaml["origin"] = ori;
std::ofstream fout(config_yaml_);
fout << yaml;
break;
}
}
if (origin_set) {
LOG(INFO) << "removing origin from rtk";
for (auto& p : gnss_) {
p.second->utm_pose_.translation() -= map_origin_;
}
}
}
提取关键帧:用IMU和激光数据运行LIO,按照距离和角度阈值抽取关键帧,保存关键帧结果。抽取关键帧时,也会从LIO中获取它的位姿和点云。
void Frontend::ExtractKeyFrame(const sad::NavStated& state) {
if (last_kf_ == nullptr) {
if (!lio_->GetCurrentScan()) {
// LIO没完成初始化
return;
}
// 第一个帧
auto kf = std::make_shared<Keyframe>(state.timestamp_, kf_id_++, state.GetSE3(), lio_->GetCurrentScan());
FindGPSPose(kf);
kf->SaveAndUnloadScan("./data/ch9/");
keyframes_.emplace(kf->id_, kf);
last_kf_ = kf;
} else {
// 计算当前state与kf之间的相对运动阈值
SE3 delta = last_kf_->lidar_pose_.inverse() * state.GetSE3();
if (delta.translation().norm() > kf_dis_th_ || delta.so3().log().norm() > kf_ang_th_deg_ * math::kDEG2RAD) {
auto kf = std::make_shared<Keyframe>(state.timestamp_, kf_id_++, state.GetSE3(), lio_->GetCurrentScan());
FindGPSPose(kf);
keyframes_.emplace(kf->id_, kf);
kf->SaveAndUnloadScan("./data/ch9/");
LOG(INFO) << "生成关键帧" << kf->id_;
last_kf_ = kf;
}
}
}
查询位姿
void Frontend::FindGPSPose(std::shared_ptr<Keyframe> kf) {
SE3 pose;
GNSSPtr match;
if (math::PoseInterp<GNSSPtr>(
kf->timestamp_, gnss_, [](const GNSSPtr& gnss) -> SE3 { return gnss->utm_pose_; }, pose, match)) {
kf->rtk_pose_ = pose;
kf->rtk_valid_ = true;
} else {
kf->rtk_valid_ = false;
}
}
位姿插值
/**
* pose 插值算法
* @tparam T 数据类型
* @tparam C 数据容器类型
* @tparam FT 获取时间函数
* @tparam FP 获取pose函数
* @param query_time 查找时间
* @param data 数据容器
* @param take_pose_func 从数据中取pose的谓词,接受一个数据,返回一个SE3
* @param result 查询结果
* @param best_match_iter 查找到的最近匹配
*
* NOTE 要求query_time必须在data最大时间和最小时间之间(容许0.5s内误差)
* data的map按时间排序
* @return
*/
template <typename T, typename C, typename FT, typename FP>
inline bool PoseInterp(double query_time, C&& data, FT&& take_time_func, FP&& take_pose_func, SE3& result,
T& best_match, float time_th = 0.5) {
if (data.empty()) {
LOG(INFO) << "cannot interp because data is empty. ";
return false;
}
double last_time = take_time_func(*data.rbegin());
if (query_time > last_time) {
if (query_time < (last_time + time_th)) {
// 尚可接受
result = take_pose_func(*data.rbegin());
best_match = *data.rbegin();
return true;
}
return false;
}
auto match_iter = data.begin();
for (auto iter = data.begin(); iter != data.end(); ++iter) {
auto next_iter = iter;
next_iter++;
if (take_time_func(*iter) < query_time && take_time_func(*next_iter) >= query_time) {
match_iter = iter;
break;
}
}
auto match_iter_n = match_iter;
match_iter_n++;
double dt = take_time_func(*match_iter_n) - take_time_func(*match_iter);
double s = (query_time - take_time_func(*match_iter)) / dt; // s=0 时为第一帧,s=1时为next
// 出现了 dt为0的bug
if (fabs(dt) < 1e-6) {
best_match = *match_iter;
result = take_pose_func(*match_iter);
return true;
}
SE3 pose_first = take_pose_func(*match_iter);
SE3 pose_next = take_pose_func(*match_iter_n);
result = {pose_first.unit_quaternion().slerp(s, pose_next.unit_quaternion()),
pose_first.translation() * (1 - s) + pose_next.translation() * s};
best_match = s < 0.5 ? *match_iter : *match_iter_n;
return true;
}
后端位姿优化
使用后端位姿图优化算法。
定义因子
分别定义以下因子:
- RTK因子
- 雷达里程计
- 回环检测因子
src/common/g2o_types.h
/**
* 只有平移的GNSS
* 此时需要提供RTK外参 TBG,才能正确施加约束
*/
class EdgeGNSSTransOnly : public g2o::BaseUnaryEdge<3, Vec3d, VertexPose> {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
/**
* 指定位姿顶点、RTK观测 t_WG、外参TGB
* @param v
* @param obs
*/
EdgeGNSSTransOnly(VertexPose* v, const Vec3d& obs, const SE3& TBG) : TBG_(TBG) {
setVertex(0, v);
setMeasurement(obs);
}
void computeError() override {
VertexPose* v = (VertexPose*)_vertices[0];
_error = (v->estimate() * TBG_).translation() - _measurement;
};
// void linearizeOplus() override {
// VertexPose* v = (VertexPose*)_vertices[0];
// // jacobian 6x6
// _jacobianOplusXi.setZero();
// _jacobianOplusXi.block<3, 3>(0, 0) = (_measurement.so3().inverse() * v->estimate().so3()).jr_inv(); // dR/dR
// _jacobianOplusXi.block<3, 3>(3, 3) = Mat3d::Identity(); // dp/dp
// }
};
/**
* 6 自由度相对运动
* 误差的平移在前,角度在后
* 观测:T12
*/
class EdgeRelativeMotion : public g2o::BaseBinaryEdge<6, SE3, VertexPose, VertexPose> {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
EdgeRelativeMotion() = default;
EdgeRelativeMotion(VertexPose* v1, VertexPose* v2, const SE3& obs) {
setVertex(0, v1);
setVertex(1, v2);
setMeasurement(obs);
}
void computeError() override {
VertexPose* v1 = (VertexPose*)_vertices[0];
VertexPose* v2 = (VertexPose*)_vertices[1];
SE3 T12 = v1->estimate().inverse() * v2->estimate();
_error = (_measurement.inverse() * v1->estimate().inverse() * v2->estimate()).log();
};
构建优化问题
VertexPose:表达位姿的顶点。
EdgeGNSSTransOnly:单天线的GNSS观测。
EdgeRelativeMotion:相对运动约束,描述雷达和回环检测的观测。
src/ch9/optimization.cc
void Optimization::BuildProblem() {
using BlockSolverType = g2o::BlockSolverX;
using LinearSolverType = g2o::LinearSolverEigen<BlockSolverType::PoseMatrixType>;
auto* solver = new g2o::OptimizationAlgorithmLevenberg(
g2o::make_unique<BlockSolverType>(g2o::make_unique<LinearSolverType>()));
optimizer_.setAlgorithm(solver);
AddVertices();
AddRTKEdges();
AddLidarEdges();
AddLoopEdges();
}
回环检测
回环检测步骤
1、遍历第一轮优化轨迹中的关键帧。
对每两个在空间上相隔较近,但在时间上存在一定距离的关键帧进行一次回环检测。这样一对关键帧称为检查点。为了防止检查点数据太大,设置一个ID间隔(这里设定为5),即每隔多少个关键帧检查一次。
2、对每个关键帧对,使用Scan to Map的配准方式,在关键帧附近抽取一定数量的点云作为子地图,然后对扫描和子地图进行配准。这种方式可以避免单个扫描数据量太少,点云纹理结构不充分的问题,缺点时计算量大。
3、由于每个检查点的计算都是独立的,使用并发编程让第2步能够在机器上并发执行,以加快速度。
4、配准的实际执行由NDT完成,使用NDT的分值作为回环有效性的判断依据。与里程计方法不同,在回环检测配准的过程中,经常要面对初值位姿估计很差的情况,希望算法不太依赖与给定的位姿初值。因此,给NDT方法增加一个由粗到精的配准过程。
5、记录所有成功的回环,并在下次调用优化算法时,读取回环检测的结果。
回环检测结果的描述形式
src/ch9/loopclosure.h
/**
* 回环检测候选帧
*/
struct LoopCandidate {
LoopCandidate() {}
LoopCandidate(IdType id1, IdType id2, SE3 Tij) : idx1_(id1), idx2_(id2), Tij_(Tij) {}
IdType idx1_ = 0;
IdType idx2_ = 0;
SE3 Tij_;
double ndt_score_ = 0.0;
};
检测算法
src/ch9/loopclosure.cc
void LoopClosure::Run() {
DetectLoopCandidates();
ComputeLoopCandidates();
SaveResults();
}
void LoopClosure::DetectLoopCandidates() {
KFPtr check_first = nullptr;
KFPtr check_second = nullptr;
LOG(INFO) << "detecting loop candidates from pose in stage 1";
// 本质上是两重循环
for (auto iter_first = keyframes_.begin(); iter_first != keyframes_.end(); ++iter_first) {
auto kf_first = iter_first->second;
if (check_first != nullptr && abs(int(kf_first->id_) - int(check_first->id_)) <= skip_id_) {
// 两个关键帧之前ID太近
continue;
}
for (auto iter_second = iter_first; iter_second != keyframes_.end(); ++iter_second) {
auto kf_second = iter_second->second;
if (check_second != nullptr && abs(int(kf_second->id_) - int(check_second->id_)) <= skip_id_) {
// 两个关键帧之前ID太近
continue;
}
if (abs(int(kf_first->id_) - int(kf_second->id_)) < min_id_interval_) {
/// 在同一条轨迹中,如果间隔太近,就不考虑回环
continue;
}
Vec3d dt = kf_first->opti_pose_1_.translation() - kf_second->opti_pose_1_.translation();
double t2d = dt.head<2>().norm(); // x-y distance
double range_th = min_distance_;
if (t2d < range_th) {
LoopCandidate c(kf_first->id_, kf_second->id_,
kf_first->opti_pose_1_.inverse() * kf_second->opti_pose_1_);
loop_candiates_.emplace_back(c);
check_first = kf_first;
check_second = kf_second;
}
}
}
LOG(INFO) << "detected candidates: " << loop_candiates_.size();
}
void LoopClosure::ComputeLoopCandidates() {
// 执行计算
std::for_each(std::execution::par_unseq, loop_candiates_.begin(), loop_candiates_.end(),
[this](LoopCandidate& c) { ComputeForCandidate(c); });
// 保存成功的候选
std::vector<LoopCandidate> succ_candidates;
for (const auto& lc : loop_candiates_) {
if (lc.ndt_score_ > ndt_score_th_) {
succ_candidates.emplace_back(lc);
}
}
LOG(INFO) << "success: " << succ_candidates.size() << "/" << loop_candiates_.size();
loop_candiates_.swap(succ_candidates);
}
计算回环
void LoopClosure::ComputeForCandidate(sad::LoopCandidate& c) {
LOG(INFO) << "aligning " << c.idx1_ << " with " << c.idx2_;
const int submap_idx_range = 40;
KFPtr kf1 = keyframes_.at(c.idx1_), kf2 = keyframes_.at(c.idx2_);
auto build_submap = [this](int given_id, bool build_in_world) -> CloudPtr {
CloudPtr submap(new PointCloudType);
for (int idx = -submap_idx_range; idx < submap_idx_range; idx += 4) {
int id = idx + given_id;
if (id < 0) {
continue;
}
auto iter = keyframes_.find(id);
if (iter == keyframes_.end()) {
continue;
}
auto kf = iter->second;
CloudPtr cloud(new PointCloudType);
pcl::io::loadPCDFile("./data/ch9/" + std::to_string(id) + ".pcd", *cloud);
sad::RemoveGround(cloud, 0.1);
if (cloud->empty()) {
continue;
}
// 转到世界系下
SE3 Twb = kf->opti_pose_1_;
if (!build_in_world) {
Twb = keyframes_.at(given_id)->opti_pose_1_.inverse() * Twb;
}
CloudPtr cloud_trans(new PointCloudType);
pcl::transformPointCloud(*cloud, *cloud_trans, Twb.matrix());
*submap += *cloud_trans;
}
return submap;
};
auto submap_kf1 = build_submap(kf1->id_, true);
CloudPtr submap_kf2(new PointCloudType);
pcl::io::loadPCDFile("./data/ch9/" + std::to_string(kf2->id_) + ".pcd", *submap_kf2);
if (submap_kf1->empty() || submap_kf2->empty()) {
c.ndt_score_ = 0;
return;
}
pcl::NormalDistributionsTransform<PointType, PointType> ndt;
ndt.setTransformationEpsilon(0.05);
ndt.setStepSize(0.7);
ndt.setMaximumIterations(40);
Mat4f Tw2 = kf2->opti_pose_1_.matrix().cast<float>();
/// 不同分辨率下的匹配
CloudPtr output(new PointCloudType);
std::vector<double> res{10.0, 5.0, 4.0, 3.0};
for (auto& r : res) {
ndt.setResolution(r);
auto rough_map1 = VoxelCloud(submap_kf1, r * 0.1);
auto rough_map2 = VoxelCloud(submap_kf2, r * 0.1);
ndt.setInputTarget(rough_map1);
ndt.setInputSource(rough_map2);
ndt.align(*output, Tw2);
Tw2 = ndt.getFinalTransformation();
}
Mat4d T = Tw2.cast<double>();
Quatd q(T.block<3, 3>(0, 0));
q.normalize();
Vec3d t = T.block<3, 1>(0, 3);
c.Tij_ = kf1->opti_pose_1_.inverse() * SE3(q, t);
c.ndt_score_ = ndt.getTransformationProbability();
}
地图导出
对建好的点云进行分块,比如按100米边长来切分。
点云切分:根据点坐标来计算点所在的网格,把点投入到对应的网格中。
根据实际情况,需要设计地图块的加载和卸载接口。
src/ch9/split_map.cc
int main(int argc, char** argv) {
google::InitGoogleLogging(argv[0]);
FLAGS_stderrthreshold = google::INFO;
FLAGS_colorlogtostderr = true;
google::ParseCommandLineFlags(&argc, &argv, true);
using namespace sad;
std::map<IdType, KFPtr> keyframes;
if (!LoadKeyFrames("./data/ch9/keyframes.txt", keyframes)) {
LOG(ERROR) << "failed to load keyframes";
return 0;
}
std::map<Vec2i, CloudPtr, less_vec<2>> map_data; // 以网格ID为索引的地图数据
pcl::VoxelGrid<PointType> voxel_grid_filter;
float resolution = FLAGS_voxel_size;
voxel_grid_filter.setLeafSize(resolution, resolution, resolution);
// 逻辑和dump map差不多,但每个点个查找它的网格ID,没有的话会创建
for (auto& kfp : keyframes) {
auto kf = kfp.second;
kf->LoadScan("./data/ch9/");
CloudPtr cloud_trans(new PointCloudType);
pcl::transformPointCloud(*kf->cloud_, *cloud_trans, kf->opti_pose_2_.matrix());
// voxel size
CloudPtr kf_cloud_voxeled(new PointCloudType);
voxel_grid_filter.setInputCloud(cloud_trans);
voxel_grid_filter.filter(*kf_cloud_voxeled);
LOG(INFO) << "building kf " << kf->id_ << " in " << keyframes.size();
// add to grid
for (const auto& pt : kf_cloud_voxeled->points) {
int gx = floor((pt.x - 50.0) / 100);
int gy = floor((pt.y - 50.0) / 100);
Vec2i key(gx, gy);
auto iter = map_data.find(key);
if (iter == map_data.end()) {
// create point cloud
CloudPtr cloud(new PointCloudType);
cloud->points.emplace_back(pt);
cloud->is_dense = false;
cloud->height = 1;
map_data.emplace(key, cloud);
} else {
iter->second->points.emplace_back(pt);
}
}
}
// 存储点云和索引文件
LOG(INFO) << "saving maps, grids: " << map_data.size();
std::system("mkdir -p ./data/ch9/map_data/");
std::system("rm -rf ./data/ch9/map_data/*"); // 清理一下文件夹
std::ofstream fout("./data/ch9/map_data/map_index.txt");
for (auto& dp : map_data) {
fout << dp.first[0] << " " << dp.first[1] << std::endl;
dp.second->width = dp.second->size();
sad::VoxelGrid(dp.second, 0.1);
sad::SaveCloudToFile(
"./data/ch9/map_data/" + std::to_string(dp.first[0]) + "_" + std::to_string(dp.first[1]) + ".pcd",
*dp.second);
}
fout.close();
LOG(INFO) << "done.";
return 0;
}