hector源码浅析

最近在看hector算法,前后有段时间了,写个文档梳理一下:

1、main函数

hector的main函数在hector_mapping/src文件夹下,这个文件其实没有什么内容,里面包含了一个ROS的标准main函数格式,并调用了HectorMappingRos类。

2、HectorMappingRos

HectorMappingRos.cc文件非常的长,但是仔细分析其实东西不算多,其中最重要也是最主要的内容在激光回调函数中,也就是void scancallback函数。

整个代码太长,截取出void HectorMappingRos::scanCallback函数分析

//激光数据回调函数
void HectorMappingRos::scanCallback(const sensor_msgs::LaserScan& scan)
{
  if (hectorDrawings)//绘图函数,可视化属性似乎应该由这里决定
  {
	//HectorDrawings* hectorDrawings;指向HectorDrawings类
    hectorDrawings->setTime(scan.header.stamp);
  }

  ros::WallTime startTime = ros::WallTime::now();
  //如果p_use_tf_scan_transformation_为false,则进入循环.但是前面定义了 p_use_tf_scan_transformation_,true,所以这里好像是不执行的?
  if (!p_use_tf_scan_transformation_)
  {
	//此函数的作用为将激光点的距离数据转化为激光坐标系下的x,y坐标(应该是已经按照比例尺缩放到地图的坐标)存放于dataContainer类中。
    if (rosLaserScanToDataContainer(scan, laserScanContainer,slamProcessor->getScaleToMap()))
    {
	  //updata()函数传入值为:dataContainer类、上一帧的位姿(x,y,theta)第一个参数,激光数据的封装包,第二个参数,上一次机器人在世界坐标系下的位姿
	  //前面定义了指针hectorslam::HectorSlamProcessor* slamProcessor;指向HectorSlamProcessor类
      slamProcessor->update(laserScanContainer,slamProcessor->getLastScanMatchPose());
    }
  }
  else
  {  //等待0.5秒
    ros::Duration dur (0.5);
	//等待tf数据p_base_frame_一般指代base_link,也就是机器人中心,scan.header.frame_id指代雷达的坐标中心。scan.header.stamp为scan的时间戳
    if (tf_.waitForTransform(p_base_frame_,scan.header.frame_id, scan.header.stamp,dur))
    {
      tf::StampedTransform laserTransform;//tf::StampedTransform 是tf::Transforms的子类,它是带时间戳的Transform.
	  //查找两个坐标系的变换,返回的变换的方向是从target_frame到source_frame.这个方法会抛出TF异常.在时间time上使用从source_frame到target_frame的变换填充transform
	  //void tf::TransformListener::lookupTransform (const std::string &target_frame, const std::string &source_frame, const ros::Time &time, StampedTransform &transform) const
      tf_.lookupTransform(p_base_frame_,scan.header.frame_id, scan.header.stamp, laserTransform);

      //projector_.transformLaserScanToPointCloud(p_base_frame_ ,scan, pointCloud,tf_);
	  //转换数据为点云
      projector_.projectLaser(scan, laser_point_cloud_,30.0);
	  //发布点云
      if (scan_point_cloud_publisher_.getNumSubscribers() > 0){
        scan_point_cloud_publisher_.publish(laser_point_cloud_);
      }
	  //定义三维向量

      Eigen::Vector3f startEstimate(Eigen::Vector3f::Zero());

	  //rosPointCloudToDataContainer为点云处理函数,对点云数据滤波,除去不在最大最小阈值之内的点,满足要求的点存放在date容器中,函数返回true
      if(rosPointCloudToDataContainer(laser_point_cloud_, laserTransform, laserScanContainer, slamProcessor->getScaleToMap()))
      {  
		//若初始位姿设置为false,则startEstimate为初始位姿,前面定义了initial_pose_set_ 为false,所以直接往下走
        if (initial_pose_set_){
          initial_pose_set_ = false;
          startEstimate = initial_pose_;
        }else if (p_use_tf_pose_start_estimate_){//p_use_tf_pose_start_estimate_为true进入判断,但是前面设置了: p_use_tf_pose_start_estimate_,false

          try
          {
            tf::StampedTransform stamped_pose;
			//等待tf变换
            tf_.waitForTransform(p_map_frame_,p_base_frame_, scan.header.stamp, ros::Duration(0.5));
            tf_.lookupTransform(p_map_frame_, p_base_frame_,  scan.header.stamp, stamped_pose);
			//这里使用了欧拉角表示位姿???
            tfScalar yaw, pitch, roll;  //根据函数头文件定义知tfScalar实际上是btScalar,为单精度或双精度浮点数。
            stamped_pose.getBasis().getEulerYPR(yaw, pitch, roll);//由位姿得到欧拉角。stamped_pose.getBasis()为已知量,getEulerYPR(yaw, pitch, roll)为待求量
			//初始位姿,startEstimate为建图初始位姿,这里由tf变换求得
            startEstimate = Eigen::Vector3f(stamped_pose.getOrigin().getX(),stamped_pose.getOrigin().getY(), yaw);
          }
          catch(tf::TransformException e)
          {//若失败,打印由p_map_frame_.c_str()到p_base_frame_.c_str()的位姿变换失败,通常是因为没有给出
            ROS_ERROR("Transform from %s to %s failed\n", p_map_frame_.c_str(), p_base_frame_.c_str());
            startEstimate = slamProcessor->getLastScanMatchPose();
          }

        }else{
			//hector_mapping/include/hector_slam_lib/slam_main/HectorSlamProcessor.h
			//这句对应于HectorSlamProcessor.h中的 const Eigen::Vector3f& getLastScanMatchPose() const { return lastScanMatchPose; }
			//最后返回得到一个最新估计的位姿
          startEstimate = slamProcessor->getLastScanMatchPose();
        }

		//更新 位姿  这里的update函数是整个算法的核心
        if (p_map_with_known_poses_){//若位姿已知,则通过update函数更新,这里前面定义了false
		  //前面定义了指针hectorslam::HectorSlamProcessor* slamProcessor;指向HectorSlamProcessor类
          slamProcessor->update(laserScanContainer, startEstimate, true);
        }else{
          slamProcessor->update(laserScanContainer, startEstimate);
        }
      }

    }else{
      ROS_INFO("lookupTransform %s to %s timed out. Could not transform laser scan into base_frame.", p_base_frame_.c_str(), scan.header.frame_id.c_str());
      return;
    }
  }

  if (p_timing_output_)//p_timing_output_,false
  {
    ros::WallDuration duration = ros::WallTime::now() - startTime;//时间间隔函数
    ROS_INFO("HectorSLAM Iter took: %f milliseconds", duration.toSec()*1000.0f );
  }

  //If we're just building a map with known poses, we're finished now. Code below this point publishes the localization results.
  //如果我们只是用已知的姿势绘制地图,现在就完成了。此处下面的代码将发布本地化结果。
  if (p_map_with_known_poses_)
  {
    return;
  }
  //位姿容器?更新位姿
  poseInfoContainer_.update(slamProcessor->getLastScanMatchPose(), slamProcessor->getLastScanMatchCovariance(), scan.header.stamp, p_map_frame_);
  //发布位姿?
  poseUpdatePublisher_.publish(poseInfoContainer_.getPoseWithCovarianceStamped());
  posePublisher_.publish(poseInfoContainer_.getPoseStamped());
  //里程计处理函数?前面pub_odometry, p_pub_odometry_,false这里应该代表不使用该函数
  if(p_pub_odometry_)
  {
    nav_msgs::Odometry tmp;
    tmp.pose = poseInfoContainer_.getPoseWithCovarianceStamped().pose;

    tmp.header = poseInfoContainer_.getPoseWithCovarianceStamped().header;
    tmp.child_frame_id = p_base_frame_;
    odometryPublisher_.publish(tmp);
  }
  //里程计tf变换函数?可能理解有问题,但是既然里程计为false了为什么前面会定义这个为true,似乎没有道理
  if (p_pub_map_odom_transform_)
  {
    tf::StampedTransform odom_to_base;

    try
    {
      tf_.waitForTransform(p_odom_frame_, p_base_frame_, scan.header.stamp, ros::Duration(0.5));
      tf_.lookupTransform(p_odom_frame_, p_base_frame_, scan.header.stamp, odom_to_base);
    }
    catch(tf::TransformException e)
    {
      ROS_ERROR("Transform failed during publishing of map_odom transform: %s",e.what());
      odom_to_base.setIdentity();
    }
    map_to_odom_ = tf::Transform(poseInfoContainer_.getTfTransform() * odom_to_base.inverse());
    tfB_->sendTransform( tf::StampedTransform (map_to_odom_, scan.header.stamp, p_map_frame_, p_odom_frame_));
  }

  if (p_pub_map_scanmatch_transform_){
    tfB_->sendTransform( tf::StampedTransform(poseInfoContainer_.getTfTransform(), scan.header.stamp, p_map_frame_, p_tf_map_scanmatch_transform_frame_name_));
  }
}

这个函数其实是一个位姿估计函数,它主要执行了三部分:
1、调用rosLaserScanToDataContainer函数对点云处理,这个函数同样在HectorMappingRos.cc中定义,作用是除去一些不满足要求的点,例如点云中距离太远的点,类似于一个滤波器。然后把所有满足的点放到一个DataContainer容器中。
2、根据startEstimate = slamProcessor->getLastScanMatchPose();处理函数获得上一时刻的地图位姿。注意这里的上一时刻不是指激光扫描的上一时刻而是指上一次地图更新。因为后面你会发现激光扫描返回的位姿并不是每一次都会使地图更新的。
3、最后,通过slamProcessor->update(laserScanContainer, startEstimate);这个函数更新位姿。这个函数是定义在hector_mapping/include/hector_slam_lib/slam_main/hectorslamprocessor.h文件中的,我们转到这个文件下:

3、hectorslamprocessor

这个文件中,我们主要看一个函数:

update:

  void update(const DataContainer& dataContainer, const Eigen::Vector3f& poseHintWorld, bool map_without_matching = false)
  {
    //std::cout << "\nph:\n" << poseHintWorld << "\n";
    //定义三维向量newPoseEstimateWorld
    Eigen::Vector3f newPoseEstimateWorld;
	//前面map_without_matching = false赋值为false所以执行mapRep->matchData
    if (!map_without_matching){
		//arg1 : t-1时刻机器人在世界坐标系下的位姿
        //arg2 : 激光数据
        //arg3 : t-1时刻扫描匹配的协方差
		//这里通过调用ScanMatcher.h中的matchData函数运算,该函数最后返回一个新的位姿
		//根据MapRepresentationInterface* mapRep;这里指针指向的是MapRepresentationInterface类,但是这里面定义的是一些虚函数,实际的实现应该不在这里
		//运算过程可能在这里?hector_mapping/include/hector_slam_lib/matcher/ScanMatcher.h
        newPoseEstimateWorld = (mapRep->matchData(poseHintWorld, dataContainer, lastScanMatchCov));
    }else{//否则newPoseEstimateWorld = poseHintWorld
        newPoseEstimateWorld = poseHintWorld;
    }
	//根据上面判断得到的newPoseEstimateWorld对最新的位姿进行赋值
    lastScanMatchPose = newPoseEstimateWorld;

    //std::cout << "\nt1:\n" << newPoseEstimateWorld << "\n";

    //std::cout << "\n1";
    //std::cout << "\n" << lastScanMatchPose << "\n";
	//hector_mapping/include/hector_slam_lib/util/UtilFunctions.h
	//判断位移增量与角度增量是否符合要求,满足函数会返回true,进入if语句
	//根据下面的函数void setMapUpdateMinDistDiff以及void setMapUpdateMinAngleDiff得到paramMinDistanceDiffForMapUpdate=0.4,paramMinAngleDiffForMapUpdate=0.13
    if(util::poseDifferenceLargerThan(newPoseEstimateWorld, lastMapUpdatePose, paramMinDistanceDiffForMapUpdate, paramMinAngleDiffForMapUpdate) || map_without_matching){
     //hector_mapping/include/hector_slam_lib/map/OccGridMapBase.h
      mapRep->updateByScan(dataContainer, newPoseEstimateWorld);
	  //hector_mapping/include/hector_slam_lib/slam_main/MapRepMultiMap.h
      mapRep->onMapUpdated();
      lastMapUpdatePose = newPoseEstimateWorld;//lastScanMatchPose与lastMapUpdatePose
    }


	//hector_marker_drawing/include/hector_marker_drawing/HectorDrawings.h?
	//hector_mapping/src/HectorDrawings.h?
	//hector_marker_drawing/include/hector_marker_drawing/DrawInterface.h
    if(drawInterface){//若drawInterface为true,则进入if语句
	  //绘图界面参数设置
      const GridMap& gridMapRef (mapRep->getGridMap());
      drawInterface->setColor(1.0, 0.0, 0.0);
      drawInterface->setScale(0.15);

	  //hector_mapping/src/HectorDrawings.h
      drawInterface->drawPoint(gridMapRef.getWorldCoords(Eigen::Vector2f::Zero()));
      drawInterface->drawPoint(gridMapRef.getWorldCoords((gridMapRef.getMapDimensions().array()-1).cast<float>()));
      drawInterface->drawPoint(Eigen::Vector2f(1.0f, 1.0f));

      drawInterface->sendAndResetData();
    }
	//hector_mapping/include/hector_slam_lib/matcher/ScanMatcher.h
    if (debugInterface)
    {
      debugInterface->sendAndResetData();
    }
  }

update函数可以分为两个部分:
1、求取位姿

if (!map_without_matching){
		//arg1 : t-1时刻机器人在世界坐标系下的位姿
        //arg2 : 激光数据
        //arg3 : t-1时刻扫描匹配的协方差
		//这里通过调用ScanMatcher.h中的matchData函数运算,该函数最后返回一个新的位姿
		//根据MapRepresentationInterface* mapRep;这里指针指向的是MapRepresentationInterface类,但是这里面定义的是一些虚函数,实际的实现应该不在这里
		//运算过程可能在这里?hector_mapping/include/hector_slam_lib/matcher/ScanMatcher.h
        newPoseEstimateWorld = (mapRep->matchData(poseHintWorld, dataContainer, lastScanMatchCov));
    }else{//否则newPoseEstimateWorld = poseHintWorld
        newPoseEstimateWorld = poseHintWorld;
    }

这一部分通过前面传参的上一时刻机器人的位姿以及滤波后的激光数据求得一个新的世界坐标点。具体的函数定义在hector_mapping/include/hector_slam_lib/matcher/ScanMatcher函数下。待会儿再分析(标题4)。
2、判断位姿是否更新

if(util::poseDifferenceLargerThan(newPoseEstimateWorld, lastMapUpdatePose, paramMinDistanceDiffForMapUpdate, paramMinAngleDiffForMapUpdate) || map_without_matching){
     //hector_mapping/include/hector_slam_lib/map/OccGridMapBase.h
      mapRep->updateByScan(dataContainer, newPoseEstimateWorld);
	  //hector_mapping/include/hector_slam_lib/slam_main/MapRepMultiMap.h
      mapRep->onMapUpdated();
      lastMapUpdatePose = newPoseEstimateWorld;//lastScanMatchPose与lastMapUpdatePose
    }

这里就是位姿更新的重点了,只有当util::poseDifferenceLargerThan为true的时候才执行更新。也就是说,虽然我每一次新的激光数据进来都能算出来一个pose,但是如果这个pose不能满足我这里的判断条件的话其实地图是不会更新的。函数参看标题5.
3、剩下后面的部分应该是用于地图可视化参数,就不仔细分析了。
接下来我们来看一下ScanMatcher函数

4、ScanMatcher

前面我们说了这个函数是用来返回最新位姿的,这里我们展开看一下:

/*
arg1 : t-1时刻机器人在世界坐标系下的位姿
arg2 : 栅格地图
arg3 : 激光数据
arg4 : 当前时刻 hassian  矩阵
arg5 : 最大迭代次数
return : t时刻机器人在世界坐标系下的位姿
*/
  Eigen::Vector3f matchData(const Eigen::Vector3f& beginEstimateWorld, ConcreteOccGridMapUtil& gridMapUtil, const DataContainer& dataContainer, Eigen::Matrix3f& covMatrix, int maxIterations)
  {
    if (drawInterface){
      drawInterface->setScale(0.05f);
      drawInterface->setColor(0.0f,1.0f, 0.0f);
      drawInterface->drawArrow(beginEstimateWorld);

      Eigen::Vector3f beginEstimateMap(gridMapUtil.getMapCoordsPose(beginEstimateWorld));

      drawScan(beginEstimateMap, gridMapUtil, dataContainer);

      drawInterface->setColor(1.0,0.0,0.0);
    }
	//将世界坐标系下的坐标,换算成栅格地图坐标 
    if (dataContainer.getSize() != 0) {

      Eigen::Vector3f beginEstimateMap(gridMapUtil.getMapCoordsPose(beginEstimateWorld));

      Eigen::Vector3f estimate(beginEstimateMap);

	  //这个函数是重点,将计算 hessian 矩阵 ,并估计t时刻,机器人的位姿
      estimateTransformationLogLh(estimate, gridMapUtil, dataContainer);
      //bool notConverged = estimateTransformationLogLh(estimate, gridMapUtil, dataContainer);

      /*
      const Eigen::Matrix2f& hessian (H.block<2,2>(0,0));


      Eigen::SelfAdjointEigenSolver<Eigen::Matrix2f> eig(hessian);

      const Eigen::Vector2f& eigValues (eig.eigenvalues());

      float cond = eigValues[1] / eigValues[0];
      float determinant = (hessian.determinant());
      */
      //std::cout << "\n cond: " << cond << " det: " << determinant << "\n";

	  //最大的迭代次数?
	  //多次迭代计算,以达到更优的结果
      int numIter = maxIterations;


      for (int i = 0; i < numIter; ++i) {
        //std::cout << "\nest:\n" << estimate;

        estimateTransformationLogLh(estimate, gridMapUtil, dataContainer);
        //notConverged = estimateTransformationLogLh(estimate, gridMapUtil, dataContainer);

        if(drawInterface){
          float invNumIterf = 1.0f/static_cast<float> (numIter);
          drawInterface->setColor(static_cast<float>(i)*invNumIterf,0.0f, 0.0f);
          drawInterface->drawArrow(gridMapUtil.getWorldCoordsPose(estimate));
          //drawInterface->drawArrow(Eigen::Vector3f(0.0f, static_cast<float>(i)*0.05, 0.0f));
        }

        if(debugInterface){
          debugInterface->addHessianMatrix(H);
        }
      }

      if (drawInterface){
        drawInterface->setColor(0.0,0.0,1.0);
        drawScan(estimate, gridMapUtil, dataContainer);
      }
        /*
        Eigen::Matrix2f testMat(Eigen::Matrix2f::Identity());
        testMat(0,0) = 2.0f;

        float angleWorldCoords = util::toRad(30.0f);
        float sinAngle = sin(angleWorldCoords);
        float cosAngle = cos(angleWorldCoords);

        Eigen::Matrix2f rotMat;
        rotMat << cosAngle, -sinAngle, sinAngle, cosAngle;
        Eigen::Matrix2f covarianceRotated (rotMat * testMat * rotMat.transpose());

        drawInterface->setColor(0.0,0.0,1.0,0.5);
        drawInterface->drawCovariance(gridMapUtil.getWorldCoordsPoint(estimate.start<2>()), covarianceRotated);
        */



        /*
        Eigen::Matrix3f covMatMap (gridMapUtil.getCovarianceForPose(estimate, dataContainer));
        std::cout << "\nestim:" << estimate;
        std::cout << "\ncovMap\n" << covMatMap;
        drawInterface->setColor(0.0,0.0,1.0,0.5);


        Eigen::Matrix3f covMatWorld(gridMapUtil.getCovMatrixWorldCoords(covMatMap));
         std::cout << "\ncovWorld\n" << covMatWorld;

        drawInterface->drawCovariance(gridMapUtil.getWorldCoordsPoint(estimate.start<2>()), covMatMap.block<2,2>(0,0));

        drawInterface->setColor(1.0,0.0,0.0,0.5);
        drawInterface->drawCovariance(gridMapUtil.getWorldCoordsPoint(estimate.start<2>()), covMatWorld.block<2,2>(0,0));

        std::cout << "\nH:\n" << H;

        float determinant = H.determinant();
        std::cout << "\nH_det: " << determinant;
        */

        /*
        Eigen::Matrix2f covFromHessian(H.block<2,2>(0,0) * 1.0f);
        //std::cout << "\nCovFromHess:\n" << covFromHessian;

        drawInterface->setColor(0.0, 1.0, 0.0, 0.5);
        drawInterface->drawCovariance(gridMapUtil.getWorldCoordsPoint(estimate.start<2>()),covFromHessian.inverse());

        Eigen::Matrix3f covFromHessian3d(H * 1.0f);
        //std::cout << "\nCovFromHess:\n" << covFromHessian;

        drawInterface->setColor(1.0, 0.0, 0.0, 0.8);
        drawInterface->drawCovariance(gridMapUtil.getWorldCoordsPoint(estimate.start<2>()),(covFromHessian3d.inverse()).block<2,2>(0,0));
        */


      estimate[2] = util::normalize_angle(estimate[2]);

      covMatrix = Eigen::Matrix3f::Zero();
      //covMatrix.block<2,2>(0,0) = (H.block<2,2>(0,0).inverse());
      //covMatrix.block<2,2>(0,0) = (H.block<2,2>(0,0));


      /*
      covMatrix(0,0) = 1.0/(0.1*0.1);
      covMatrix(1,1) = 1.0/(0.1*0.1);
      covMatrix(2,2) = 1.0/((M_PI / 18.0f) * (M_PI / 18.0f));
      */


      covMatrix = H;

      return gridMapUtil.getWorldCoordsPose(estimate);
    }

    return beginEstimateWorld;
  }

protected:

  bool estimateTransformationLogLh(Eigen::Vector3f& estimate, ConcreteOccGridMapUtil& gridMapUtil, const DataContainer& dataPoints)
  {
	//计算hessian 矩阵   dTr 这个向量计算的是公式12 的求和符号后面的部分,没有乘H-1
    gridMapUtil.getCompleteHessianDerivs(estimate, dataPoints, H, dTr);
    //std::cout << "\nH\n" << H  << "\n";
    //std::cout << "\ndTr\n" << dTr  << "\n";


    if ((H(0, 0) != 0.0f) && (H(1, 1) != 0.0f)) {

	  //计算优化方程的解,也就是论文中公式12
      //H += Eigen::Matrix3f::Identity() * 1.0f;
      Eigen::Vector3f searchDir (H.inverse() * dTr);

      //std::cout << "\nsearchdir\n" << searchDir  << "\n";

      if (searchDir[2] > 0.2f) {
        searchDir[2] = 0.2f;
        std::cout << "SearchDir angle change too large\n";
      } else if (searchDir[2] < -0.2f) {
        searchDir[2] = -0.2f;
        std::cout << "SearchDir angle change too large\n";
      }
      //位姿更新?estimate=estimate+searchDir
	  //更新姿态的估计
      updateEstimatedPose(estimate, searchDir);
      return true;
    }
    return false;
  }

  void updateEstimatedPose(Eigen::Vector3f& estimate, const Eigen::Vector3f& change)
  {
    estimate += change;
  }

可以看到matchData函数的功能,它是一个迭代最优解的过程。而执行这个解最优解的过程是通过调用了下面的estimateTransformationLogLh(estimate, gridMapUtil, dataContainer);函数执行的。

在这个函数中我们大致看出来它首先调用了一个gridMapUtil.getCompleteHessianDerivs(estimate, dataPoints, H, dTr);(标题6)函数得到一个hessian矩阵(最优化内容)然后通过hessian矩阵得到最优解相对于现在pose的方向与步长,最后更新这个最优解。也就是新算出来的pose

这个步长返回给matchData函数,多次迭代后可以得到一个程序认为的较为准确的pose,而这个pose就是我们认为的由当前位姿以及最新的激光数据计算后的新的机器人位姿。

下一步我们考虑是否更新地图,也就是将这个位姿送给util::poseDifferenceLargerThan函数分析

5、UtilFunctions

看其中最重要的部分:

static bool poseDifferenceLargerThan(const Eigen::Vector3f& pose1, const Eigen::Vector3f& pose2, float distanceDiffThresh, float angleDiffThresh)
{
  //check distance
  //若姿态1与姿态2的距离差值大于距离阈值,则返回true
  //head<2>这里应该指代取了pose的前两项,作为一个2D的pose,它存在3个参数:X轴坐标、Y轴坐标、指向角度,前两项可以反映出两个pose之间的位移关系第三项反映两个pose之间的旋转关系
  if ( ( (pose1.head<2>() - pose2.head<2>()).norm() ) > distanceDiffThresh){
    return true;
  }
  //计算角度差值
  float angleDiff = (pose1.z() - pose2.z());
  //角度差值大于PI以及小于PI的取值
  //通过if函数将参数angleDiff的值限定在[-PI,PI]之间
  if (angleDiff > M_PI) {
	//若角度差值大于PI,则取angleDiff=angleDiff-M_PI * 2.0f
    angleDiff -= M_PI * 2.0f;
  } else if (angleDiff < -M_PI) {
	//若角度差值小于-PI,则取angleDiff=angleDiff+M_PI * 2.0f
    angleDiff += M_PI * 2.0f;
  }
  //若角度差值的绝对值大于阈值则返回true,abs为取绝对值,因为angleDiff可能小于零
  if (abs(angleDiff) > angleDiffThresh){
    return true;
  }
  return false;
}

简单明了,函数设定了两个阈值,一个距离阈值一个角度阈值,只有当上一时刻的位姿与下一时刻的位姿的位移增量或者旋转增量超过阈值时才会更新地图,否则地图其实是不更新的,也就是说如果你的机器人一直在原地不动,虽然每一次激光数据返回给hector算法,它通过新的激光数据得到新的位姿,但是由于新的位姿与原来的位姿增量没有超过阈值所以地图就不会更新,这样可以减少地图的漂移。

6、OccGridMapUtil

这部分是位姿真正计算的地方,也就是根据激光数据得到hessian矩阵,需要结合原论文中的公式去理解,还没有详细分析过,先张贴一部分注释代码,这部分注释来自于:hector源码分析。以后有机会补齐

#ifndef __OccGridMapUtil_h_
#define __OccGridMapUtil_h_

#include <cmath>

#include "../scan/DataPointContainer.h"
#include "../util/UtilFunctions.h"

namespace hectorslam {

template<typename ConcreteOccGridMap, typename ConcreteCacheMethod>
class OccGridMapUtil
{
public:

  OccGridMapUtil(const ConcreteOccGridMap* gridMap)
    : concreteGridMap(gridMap)
    , size(0)
  {
    mapObstacleThreshold = gridMap->getObstacleThreshold();
    cacheMethod.setMapSize(gridMap->getMapDimensions());
  }

  ~OccGridMapUtil()
  {}

public:

  EIGEN_MAKE_ALIGNED_OPERATOR_NEW

  inline Eigen::Vector3f getWorldCoordsPose(const Eigen::Vector3f& mapPose) const { return concreteGridMap->getWorldCoordsPose(mapPose); };
  inline Eigen::Vector3f getMapCoordsPose(const Eigen::Vector3f& worldPose) const { return concreteGridMap->getMapCoordsPose(worldPose); };

  inline Eigen::Vector2f getWorldCoordsPoint(const Eigen::Vector2f& mapPoint) const { return concreteGridMap->getWorldCoords(mapPoint); };

  void getCompleteHessianDerivs(const Eigen::Vector3f& pose, const DataContainer& dataPoints, Eigen::Matrix3f& H, Eigen::Vector3f& dTr)
  {
    int size = dataPoints.getSize();

	//定义3*3的2D平移+旋转矩阵,transform=Po(t-1)xR(t-1) ,这矩阵是为了将载体坐标系下的endpoint变换到统一的地图坐标系下
    Eigen::Affine2f transform(getTransformForState(pose));

    float sinRot = sin(pose[2]);
    float cosRot = cos(pose[2]);

    H = Eigen::Matrix3f::Zero();
    dTr = Eigen::Vector3f::Zero();

    for (int i = 0; i < size; ++i) {

      const Eigen::Vector2f& currPoint (dataPoints.getVecEntry(i));

	  //取地图的梯度,以及偏导数,对应论文中公式(4)、(5)、(6)、(14),
      Eigen::Vector3f transformedPointData(interpMapValueWithDerivatives(transform * currPoint));

      float funVal = 1.0f - transformedPointData[0];
	  //dTr 这个向量计算的是公式12 的求和符号后面的部分,没有乘H-1
      dTr[0] += transformedPointData[1] * funVal;
      dTr[1] += transformedPointData[2] * funVal;
	  //这个对应论文中公式 13,14 ,计算 M的梯度叉乘S对旋转角度的偏导数
      float rotDeriv = ((-sinRot * currPoint.x() - cosRot * currPoint.y()) * transformedPointData[1] + (cosRot * currPoint.x() - sinRot * currPoint.y()) * transformedPointData[2]);

      dTr[2] += rotDeriv * funVal;
	  //这里是求平方,下面的对应于论文中公式13(这个公式有问题,掉了个求和符号,代码是正确的),计算hessian 矩阵
      H(0, 0) += util::sqr(transformedPointData[1]);
      H(1, 1) += util::sqr(transformedPointData[2]);
      H(2, 2) += util::sqr(rotDeriv);

      H(0, 1) += transformedPointData[1] * transformedPointData[2];
      H(0, 2) += transformedPointData[1] * rotDeriv;
      H(1, 2) += transformedPointData[2] * rotDeriv;
    }

    H(1, 0) = H(0, 1);
    H(2, 0) = H(0, 2);
    H(2, 1) = H(1, 2);

  }

  Eigen::Matrix3f getCovarianceForPose(const Eigen::Vector3f& mapPose, const DataContainer& dataPoints)
  {

    float deltaTransX = 1.5f;
    float deltaTransY = 1.5f;
    float deltaAng = 0.05f;

    float x = mapPose[0];
    float y = mapPose[1];
    float ang = mapPose[2];

    Eigen::Matrix<float, 3, 7> sigmaPoints;

    sigmaPoints.block<3, 1>(0, 0) = Eigen::Vector3f(x + deltaTransX, y, ang);
    sigmaPoints.block<3, 1>(0, 1) = Eigen::Vector3f(x - deltaTransX, y, ang);
    sigmaPoints.block<3, 1>(0, 2) = Eigen::Vector3f(x, y + deltaTransY, ang);
    sigmaPoints.block<3, 1>(0, 3) = Eigen::Vector3f(x, y - deltaTransY, ang);
    sigmaPoints.block<3, 1>(0, 4) = Eigen::Vector3f(x, y, ang + deltaAng);
    sigmaPoints.block<3, 1>(0, 5) = Eigen::Vector3f(x, y, ang - deltaAng);
    sigmaPoints.block<3, 1>(0, 6) = mapPose;

    Eigen::Matrix<float, 7, 1> likelihoods;

    likelihoods[0] = getLikelihoodForState(Eigen::Vector3f(x + deltaTransX, y, ang), dataPoints);
    likelihoods[1] = getLikelihoodForState(Eigen::Vector3f(x - deltaTransX, y, ang), dataPoints);
    likelihoods[2] = getLikelihoodForState(Eigen::Vector3f(x, y + deltaTransY, ang), dataPoints);
    likelihoods[3] = getLikelihoodForState(Eigen::Vector3f(x, y - deltaTransY, ang), dataPoints);
    likelihoods[4] = getLikelihoodForState(Eigen::Vector3f(x, y, ang + deltaAng), dataPoints);
    likelihoods[5] = getLikelihoodForState(Eigen::Vector3f(x, y, ang - deltaAng), dataPoints);
    likelihoods[6] = getLikelihoodForState(Eigen::Vector3f(x, y, ang), dataPoints);

    float invLhNormalizer = 1 / likelihoods.sum();

    std::cout << "\n lhs:\n" << likelihoods;

    Eigen::Vector3f mean(Eigen::Vector3f::Zero());

    for (int i = 0; i < 7; ++i) {
      mean += (sigmaPoints.block<3, 1>(0, i) * likelihoods[i]);
    }

    mean *= invLhNormalizer;

    Eigen::Matrix3f covMatrixMap(Eigen::Matrix3f::Zero());

    for (int i = 0; i < 7; ++i) {
      Eigen::Vector3f sigPointMinusMean(sigmaPoints.block<3, 1>(0, i) - mean);
      covMatrixMap += (likelihoods[i] * invLhNormalizer) * (sigPointMinusMean * (sigPointMinusMean.transpose()));
    }

    return covMatrixMap;

    //covMatrix.cwise() * invLhNormalizer;
    //transform = getTransformForState(Eigen::Vector3f(x-deltaTrans, y, ang);
  }

  Eigen::Matrix3f getCovMatrixWorldCoords(const Eigen::Matrix3f& covMatMap)
  {

    //std::cout << "\nCovMap:\n" << covMatMap;

    Eigen::Matrix3f covMatWorld;

    float scaleTrans = concreteGridMap->getCellLength();
    float scaleTransSq = util::sqr(scaleTrans);

    covMatWorld(0, 0) = covMatMap(0, 0) * scaleTransSq;
    covMatWorld(1, 1) = covMatMap(1, 1) * scaleTransSq;

    covMatWorld(1, 0) = covMatMap(1, 0) * scaleTransSq;
    covMatWorld(0, 1) = covMatWorld(1, 0);

    covMatWorld(2, 0) = covMatMap(2, 0) * scaleTrans;
    covMatWorld(0, 2) = covMatWorld(2, 0);

    covMatWorld(2, 1) = covMatMap(2, 1) * scaleTrans;
    covMatWorld(1, 2) = covMatWorld(2, 1);

    covMatWorld(2, 2) = covMatMap(2, 2);

    return covMatWorld;
  }

  float getLikelihoodForState(const Eigen::Vector3f& state, const DataContainer& dataPoints)
  {
    float resid = getResidualForState(state, dataPoints);

    return getLikelihoodForResidual(resid, dataPoints.getSize());
  }

  float getLikelihoodForResidual(float residual, int numDataPoints)
  {
    float numDataPointsA = static_cast<int>(numDataPoints);
    float sizef = static_cast<float>(numDataPointsA);

    return 1 - (residual / sizef);
  }

  float getResidualForState(const Eigen::Vector3f& state, const DataContainer& dataPoints)
  {
    int size = dataPoints.getSize();

    int stepSize = 1;
    float residual = 0.0f;


    Eigen::Affine2f transform(getTransformForState(state));

    for (int i = 0; i < size; i += stepSize) {

      float funval = 1.0f - interpMapValue(transform * dataPoints.getVecEntry(i));
      residual += funval;
    }

    return residual;
  }

  float getUnfilteredGridPoint(Eigen::Vector2i& gridCoords) const
  {
    return (concreteGridMap->getGridProbabilityMap(gridCoords.x(), gridCoords.y()));
  }

  float getUnfilteredGridPoint(int index) const
  {
    return (concreteGridMap->getGridProbabilityMap(index));
  }

  float interpMapValue(const Eigen::Vector2f& coords)
  {
    //check if coords are within map limits.
    if (concreteGridMap->pointOutOfMapBounds(coords)){
      return 0.0f;
    }

    //map coords are alway positive, floor them by casting to int
    Eigen::Vector2i indMin(coords.cast<int>());

    //get factors for bilinear interpolation
    Eigen::Vector2f factors(coords - indMin.cast<float>());

    int sizeX = concreteGridMap->getSizeX();

    int index = indMin[1] * sizeX + indMin[0];

    // get grid values for the 4 grid points surrounding the current coords. Check cached data first, if not contained
    // filter gridPoint with gaussian and store in cache.
    if (!cacheMethod.containsCachedData(index, intensities[0])) {
      intensities[0] = getUnfilteredGridPoint(index);
      cacheMethod.cacheData(index, intensities[0]);
    }

    ++index;

    if (!cacheMethod.containsCachedData(index, intensities[1])) {
      intensities[1] = getUnfilteredGridPoint(index);
      cacheMethod.cacheData(index, intensities[1]);
    }

    index += sizeX-1;

    if (!cacheMethod.containsCachedData(index, intensities[2])) {
      intensities[2] = getUnfilteredGridPoint(index);
      cacheMethod.cacheData(index, intensities[2]);
    }

    ++index;

    if (!cacheMethod.containsCachedData(index, intensities[3])) {
      intensities[3] = getUnfilteredGridPoint(index);
      cacheMethod.cacheData(index, intensities[3]);
    }

    float xFacInv = (1.0f - factors[0]);
    float yFacInv = (1.0f - factors[1]);

    return
      ((intensities[0] * xFacInv + intensities[1] * factors[0]) * (yFacInv)) +
      ((intensities[2] * xFacInv + intensities[3] * factors[0]) * (factors[1]));

  }

  Eigen::Vector3f interpMapValueWithDerivatives(const Eigen::Vector2f& coords)
  {
    //check if coords are within map limits.
	  /*检查目标点(就是激光的endpoint)是否超出地图范围
    coords = Po(t-1)xR(t-1)xPe(t)
    将载体坐标系下的endpoint变换到统一的地图坐标系下
    */
    if (concreteGridMap->pointOutOfMapBounds(coords)){
      return Eigen::Vector3f(0.0f, 0.0f, 0.0f);
    }

    //map coords are always positive, floor them by casting to int
	//地图坐标系总是正的,向下取整
    Eigen::Vector2i indMin(coords.cast<int>());

    //get factors for bilinear interpolation
	这个向量是用于双线性差值用的
    Eigen::Vector2f factors(coords - indMin.cast<float>());

    int sizeX = concreteGridMap->getSizeX();
	//地图是以一维数组的形式存储的,每个元素有2个值,val(概率),index(元素索引,初始化为-1)
    //这么解释,就应该懂下一句的意思了,就是计算endpoint 在一维数组中的索引值
    int index = indMin[1] * sizeX + indMin[0];

    // get grid values for the 4 grid points surrounding the current coords. Check cached data first, if not contained
    // filter gridPoint with gaussian and store in cache.
	//下面四个if,就是获取当前实数坐标点相邻的四个栅格坐标,并获取该栅格的占用概率(没占用<0.4,占用>0.6),
	//这里使用了个小技巧,就是用缓存,先判断当前缓存中数据是否有效,如果有效就读缓存,无效的话就直接读栅格地图中的值
    if (!cacheMethod.containsCachedData(index, intensities[0])) {
      intensities[0] = getUnfilteredGridPoint(index);
      cacheMethod.cacheData(index, intensities[0]);
    }
	/*
以下所有都是用于双线性差值用的,具体的就是对照论文中公式4、5、6,简单讲解下(已论文中符号为准)
M(P00) = intensities[0]
M(P10) = intensities[1]
M(P01) = intensities[2]
M(P11) = intensities[3]
x1-x0 = y1-y0 =1
x-x0 = factors[0]
y-y0 = factors[1]
 
dx1 = M(P00) -M(P10) 
dx2 = M(P01) -M(P11)
dy1 = M(P00) -M(P01)
dy2 = M(P10) -M(P11)
xFacInv = x1 - x
yFacInv = y1 - y
剩下的就是自己套公式了
(这个函数的返回值应该是,函数值加偏导数,函数值的公式和论文是一致的,但是偏导数感觉不对啊???哪里有问题???
按照论文 应该是
dx = -(dx1*yFacInv + dx2*factors[1])
dy = -(dy1*xFacInv + dy2*factors[0])
)
*/
    ++index;

    if (!cacheMethod.containsCachedData(index, intensities[1])) {
      intensities[1] = getUnfilteredGridPoint(index);
      cacheMethod.cacheData(index, intensities[1]);
    }

    index += sizeX-1;

    if (!cacheMethod.containsCachedData(index, intensities[2])) {
      intensities[2] = getUnfilteredGridPoint(index);
      cacheMethod.cacheData(index, intensities[2]);
    }

    ++index;

    if (!cacheMethod.containsCachedData(index, intensities[3])) {
      intensities[3] = getUnfilteredGridPoint(index);
      cacheMethod.cacheData(index, intensities[3]);
    }

    float dx1 = intensities[0] - intensities[1];
    float dx2 = intensities[2] - intensities[3];

    float dy1 = intensities[0] - intensities[2];
    float dy2 = intensities[1] - intensities[3];

    float xFacInv = (1.0f - factors[0]);
    float yFacInv = (1.0f - factors[1]);

    return Eigen::Vector3f(
      ((intensities[0] * xFacInv + intensities[1] * factors[0]) * (yFacInv)) +
      ((intensities[2] * xFacInv + intensities[3] * factors[0]) * (factors[1])),
      -((dx1 * xFacInv) + (dx2 * factors[0])),
      -((dy1 * yFacInv) + (dy2 * factors[1]))
    );
  }

  Eigen::Affine2f getTransformForState(const Eigen::Vector3f& transVector) const
  {
    return Eigen::Translation2f(transVector[0], transVector[1]) * Eigen::Rotation2Df(transVector[2]);
  }

  Eigen::Translation2f getTranslationForState(const Eigen::Vector3f& transVector) const
  {
    return Eigen::Translation2f(transVector[0], transVector[1]);
  }

  void resetCachedData()
  {
    cacheMethod.resetCache();
  }

  void resetSamplePoints()
  {
    samplePoints.clear();
  }

  const std::vector<Eigen::Vector3f>& getSamplePoints() const
  {
    return samplePoints;
  }

protected:

  Eigen::Vector4f intensities;

  ConcreteCacheMethod cacheMethod;

  const ConcreteOccGridMap* concreteGridMap;

  std::vector<Eigen::Vector3f> samplePoints;

  int size;

  float mapObstacleThreshold;
};

}


#endif

  • 2
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

一叶执念

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值