本文将介绍ompl库中,RRT*算法在路径导航中的应用
设置2D导航地图中的边界条件,机器人的状态可以用(x,y,theta)来描述
// set the bounds for the A=R^6
ob::RealVectorBounds Qbounds(n);
Qbounds.setLow(0, MIN_X); // x_min
Qbounds.setHigh(0, MAX_X); // x_max
Qbounds.setLow(1, MIN_Y); // y_min
Qbounds.setHigh(1, MAX_Y); // y_max
Qbounds.setLow(2, -PI); // theta_min
Qbounds.setHigh(2, PI); // theta_max
为起始点和目标点赋值
// create a random start state
ob::ScopedState<ob::RealVectorStateSpace> start(Qspace);
for (int i = 0; i < n; i++)
start->as<ob::RealVectorStateSpace::StateType>()->values[i] = q_start[i];
// create a random goal state
ob::ScopedState<ob::RealVectorStateSpace> goal(Qspace);
for (int i = 0; i < n; i++)
goal->as<ob::RealVectorStateSpace::StateType>()->values[i] = q_goal[i];
制定优化目标,这里是目标的长度
p_type = PLANNER_RRTSTAR;
o_type = OBJECTIVE_PATHLENGTH;
pdef->setOptimizationObjective(allocateObjective(si, o_type));
为该空间设置规划器
// create a planner for the defined space
// To add a planner, the #include library must be added above
ob::PlannerPtr planner = allocatePlanner(si, p_type);//在这里引入了地图数据
在allocatePlanner()可以选择rrt和rrt*
ob::PlannerPtr plan_slam::allocatePlanner(ob::SpaceInformationPtr si, plannerType p_type)
{
switch (p_type)
{
case PLANNER_RRT:
{
return std::make_shared<og::RRT>(si, MD, FloorMap, feature_thres);
break;
}
case PLANNER_RRTSTAR:
{
return std::make_shared<og::RRTstar>(si, MD, FloorMap, feature_thres);
break;
}
default:
{
OMPL_ERROR("Planner-type enum is not implemented in allocation function.");
return ob::PlannerPtr(); // Address compiler warning re: no return value.
break;
}
}
}
returnd 的函数具体如下
ompl::geometric::RRTstar::RRTstar(const base::SpaceInformationPtr &si, map_data MD, ppMatrix FloorMap, int thres) :
base::Planner(si, "RRTstar"),
goalBias_(0.05),
maxDistance_(0.0),
useKNearest_(true),
rewireFactor_(1.1),
k_rrg_(0u),
r_rrg_(0.0),
delayCC_(true),
lastGoalMotion_(nullptr),
useTreePruning_(false),
pruneThreshold_(0.05),
usePrunedMeasure_(false),
useInformedSampling_(false),
useRejectionSampling_(false),
useNewStateRejection_(false),
useAdmissibleCostToCome_(true),
numSampleAttempts_ (100u),
bestCost_(std::numeric_limits<double>::quiet_NaN()),
prunedCost_(std::numeric_limits<double>::quiet_NaN()),
prunedMeasure_(0.0),
iterations_(0u),
StateValidChecker(si, MD, FloorMap, thres)
判断是否满足f(x)>thres
//判断是否满足f(x)>thres
bool camera::IsStateVisiblilty(double x_w, double y_w, double theta_rad_w, int ths) {
int cur_ths;
if (ths == -1)
cur_ths = feature_threshold;
else
cur_ths = ths;
return countVisible(x_w, y_w, theta_rad_w) > cur_ths;
}
(x,y,theta)处f(x)的计算,筛除掉视角不好的点
proj_info camera::isInFrustum(std::vector<float> MapPoint_s, float upper_limit, float lower_limit, Eigen::Matrix4f T_sc, Eigen::Matrix4f T_cs, float max_range, float min_range) const {
//convert map points into carmera frame
Eigen::Matrix<float, 4, 1> map_point_s(MapPoint_s.data());
map_point_s(3,0) = 1;
Eigen::Matrix<float, 4, 1> map_point_c=T_cs*map_point_s;
float PcX = map_point_c(0,0);
float PcY= map_point_c(1,0);
float PcZ = map_point_c(2,0);
proj_info map_projection;
// step 1: rule out the points with depth smaller than 0.05
if(PcZ<0.05)
return map_projection;
// step 2: rule out the points which are out of current view
float inv_z = 1.0f/PcZ;
float u=fx*PcX*inv_z+cx;
float v=fy*PcY*inv_z+cy;
if(u<MinX || u>MaxX)
return map_projection;
if(v<MinY || v>MaxY)
return map_projection;
// step 3: rule out the points which are too close or too far away
float dist=sqrt(PcZ*PcZ+PcY*PcY+PcX*PcX);
if(dist<min_dist || dist>max_dist)
return map_projection;
if(dist<min_range || dist>max_range)
return map_projection;
//% step 4: rule out the points whose viewing direction is out of 95% t-distribution
float delta_x_s=map_point_s(0,0)-T_sc(0,3);
float delta_z_s=map_point_s(2,0)-T_sc(2,3);
float theta_robot_s=atan2(delta_x_s,delta_z_s);
if(theta_robot_s<lower_limit || theta_robot_s>upper_limit)
return map_projection;
map_projection.success = true;
std::vector<int>pos_xy = posInGrid(u,v);
map_projection.x_grid = pos_xy[0];
map_projection.y_grid = pos_xy[1];
return map_projection;
}