百度apollo自动驾驶planning代码学习-Apollo\modules\planning\common\TrajectoryEvaluator类代码详解

70 篇文章 244 订阅
42 篇文章 7 订阅

概述

TrajectoryEvaluator类是apollo planning模块下modules\planning\common\trajectory_evaluator.cc/.h实现

从类名来看,应该是轨迹插值类?
*Apollo里Evaluate通常指插值

从代码来看TrajectoryEvaluator类主要是实现:
输入一条轨迹ADCTrajectory(自车规划轨迹)/障碍物轨迹/预测轨迹,然后将其按照输入的时间起始点,输入的轨迹点时间间隔计算到新的相对时间,去输入的原始轨迹上插值得到轨迹点的其他信息,用新的相对时间和插值得到的轨迹点其他信息一起塞到新的轨迹对象或者塞回输入的轨迹上?

言而总之,就是按照我们想要的时间间隔,对原始轨迹重新采样获得新的轨迹?同时可以变换轨迹的时间起始点(相对时间为0的点)

trajectory_evaluator.h

#pragma once

#include <string>
#include <utility>
#include <vector>

#include "modules/planning/proto/learning_data.pb.h"

namespace apollo {
namespace planning {

class TrajectoryEvaluator {
 public:
  ~TrajectoryEvaluator() = default;

//插值ADC轨迹?
//输入参数:起始点时间戳start_point_timestamp_sec
//输入参数:时间间隔delta_time
//输入参数:learning_data_frame学习数据帧?应该是用于基于学习的决策?
//起始就是根据learning_data_frame里原始的轨迹信息,然后根据输入的起始点start_point_timestamp_sec,时间间隔delta_time,用新的相对时间去插值得到新的轨迹点又塞回learning_data_frame里的轨迹里?
  void EvaluateADCTrajectory(const double start_point_timestamp_sec,
                             const double delta_time,
                             LearningDataFrame* learning_data_frame);

//插值自车将来轨迹?也就是相对时间0到最大时间trajectory_time_length 8.0s的轨迹按输入的时间间隔重新组织一下新的相对时间插值得到的轨迹点再塞到evaluated_adc_future_trajectory
  void EvaluateADCFutureTrajectory(
      const int frame_num,
      const std::vector<TrajectoryPointFeature>& adc_future_trajectory,
      const double start_point_timestamp_sec, const double delta_time,
      std::vector<TrajectoryPointFeature>* evaluated_adc_future_trajectory);

//插值障碍物轨迹?估计又是根据起始时间戳,以及时间间隔去重新组织输入轨迹的相对时间,用新的相对时间去原轨迹插值轨迹点,再一一塞到轨迹
//这里输入的轨迹是learning_data_frame里的obstacle里的obstacle_trajectory,也就是障碍物轨迹,把障碍物轨迹按输入的时间起始点以及时间间隔重新插值得到新的轨迹点,然后塞回learning_data_frame里的障碍物轨迹
  void EvaluateObstacleTrajectory(const double start_point_timestamp_sec,
                                  const double delta_time,
                                  LearningDataFrame* learning_data_frame);

//插值障碍物的预测轨迹,也是根据起始时间,时间间隔重新插值轨迹点塞到轨迹
  void EvaluateObstaclePredictionTrajectory(
      const double start_point_timestamp_sec, const double delta_time,
      LearningDataFrame* learning_data_frame);

 private:
 //这里根据时间去轨迹上插值轨迹点  其实是按照输入的时间间隔delta_time去轨迹上重新插值
//的轨迹点再重新组织成轨迹放入 指针evaluated_trajectory?
  void EvaluateTrajectoryByTime(
      const int frame_num, const std::string& obstacle_id,
      const std::vector<std::pair<double, CommonTrajectoryPointFeature>>&
          trajectory,
      const double start_point_timestamp_sec, const double delta_time,
      std::vector<TrajectoryPointFeature>* evaluated_trajectory);


//转化?将轨迹点特征对象转化为轨迹点对象
//起始就是将输入参数的tp转化成trajectory_point,最后转化结果放在指针trajectory_point
//输入参数:轨迹点特征,相对时间,轨迹点
  void Convert(const CommonTrajectoryPointFeature& tp,
               const double relative_time,
               common::TrajectoryPoint* trajectory_point);
 
 //转化函数,将输入轨迹点tp转化为轨迹点特征对象trajectory_point
  void Convert(const common::TrajectoryPoint& tp,
               const double timestamp_sec,
               TrajectoryPointFeature* trajectory_point);

  void WriteLog(const std::string& msg);
};

}  // namespace planning
}  // namespace apollo

trajectory_evaluator.cc

#include "modules/planning/common/trajectory_evaluator.h"

#include <algorithm>
#include <limits>

#include "absl/strings/str_cat.h"
#include "cyber/common/file.h"
#include "modules/planning/common/planning_gflags.h"
#include "modules/planning/common/trajectory/discretized_trajectory.h"
#include "modules/planning/pipeline/evaluator_logger.h"

namespace apollo {
namespace planning {

//写入log,看上去似乎要打开规划离线学习才会生效
void TrajectoryEvaluator::WriteLog(const std::string& msg) {
  AERROR << msg;
  if (FLAGS_planning_offline_learning) {
    EvaluatorLogger::GetStream() << msg << std::endl;
  }
}

//凡是在Apollo代码里看到Evaluate的函数多半是插值
//这里根据时间去轨迹上插值轨迹点  其实是按照输入的时间间隔delta_time去轨迹上重新插值
//的轨迹点再重新组织成轨迹放入 指针evaluated_trajectory?
//输入参数:frame_num帧数量?
//输入参数:obstacle_id障碍物Id
//输入参数:trajectory 是一个数字(时间戳?)和轨迹点特征类的vector?
//输入参数:start_point_timestamp_sec起始点的时间戳
//输入参数:delta_time?时间间隔?
//输入参数:evaluated_trajectory插值轨迹指针?用于存放用delta重新组织时间的轨迹
void TrajectoryEvaluator::EvaluateTrajectoryByTime(
    const int frame_num, const std::string& obstacle_id,
    const std::vector<std::pair<double, CommonTrajectoryPointFeature>>&
        trajectory,
    const double start_point_timestamp_sec, const double delta_time,
    std::vector<TrajectoryPointFeature>* evaluated_trajectory) {
  //如果轨迹维空 或 (轨迹点第一个点与起始点时间戳相差不超过输入的时间间隔 且 轨迹点最后一个点与起始点时间戳相差不超过输入的时间间隔)就直接返回
  if (trajectory.empty() || (fabs(trajectory.front().first -
                                  start_point_timestamp_sec) < delta_time &&
                             fabs(trajectory.back().first -
                                  start_point_timestamp_sec) < delta_time)) {
    return;
  }

//定义一个轨迹点vector updated_trajectory 更新的轨迹?
  std::vector<apollo::common::TrajectoryPoint> updated_trajectory;
 //遍历输入的轨迹
  for (const auto& tp : trajectory) {
    // CommonTrajectoryPointFeature => common::TrajectoryPoint
    //获取轨迹点的相对时间
    double relative_time = tp.first - start_point_timestamp_sec;
    //定义一个空的轨迹点
    apollo::common::TrajectoryPoint trajectory_point;
    //把输入的轨迹点以及相对时间放入trajectory_point里
    Convert(tp.second, relative_time, &trajectory_point);
    //再将trajectory_point塞入updated_trajectory里
    //所以updated_trajectory就是把相对起始点的相对时间和路径点一起塞入
    updated_trajectory.push_back(trajectory_point);
  }

//如果轨迹点的第一个点的时间 大于 轨迹点的最后一个点的时间
//反转一下updated_trajectory,其实就是将updated_trajectory里的轨迹点按相对是按相对时间的升序排列
  if (trajectory.front().first > trajectory.back().first) {
    std::reverse(updated_trajectory.begin(), updated_trajectory.end());
  }

//先定义了个空的离散轨迹对象
  DiscretizedTrajectory discretized_trajectory;
  //上一次相对时间,先定义为一个double能表示的最小的数,就是一个负数?
  double last_relative_time = std::numeric_limits<double>::lowest();
  //遍历updated_trajectory上的轨迹点
  for (const auto& tp : updated_trajectory) {
    //过滤掉异常的感知数据
    //如果轨迹点的相对时间> 上一个的轨迹点的相对时间
    if (tp.relative_time() > last_relative_time) {
      //又把updated_trajectory转移到discretized_trajectory里了?
      discretized_trajectory.AppendTrajectoryPoint(tp);
      last_relative_time = tp.relative_time();
    } else {
    //如果轨迹点的相对时间 <= 上一个的轨迹点的相对时间 就写入log
      const std::string msg = absl::StrCat(
          "DISCARD trajectory point: frame_num[", frame_num, "] obstacle_id[",
          obstacle_id, "] last_relative_time[", last_relative_time,
          "] relatice_time[", tp.relative_time(), "] relative_time_diff[",
          tp.relative_time() - last_relative_time, "]");
      WriteLog(msg);
    }
  }

//用更新轨迹的第一个点的相对时间 / 时间间隔delta_time,就是更新轨迹里第一个点和起始点之间的点个数?
//ceil函数:获取大于参数的最小数 向上取值 下边界最小-150
  const int low_bound =
      std::max(-150.0,
               ceil(updated_trajectory.front().relative_time() / delta_time));
  //上边界最大150
  const int high_bound =
      std::min(150.0,
               floor(updated_trajectory.back().relative_time() / delta_time));
  //上面是在起始点往前最多150个点,往后最多150个点,实际上就是限制一下updated_trajectory上轨迹点的个数,以及相对时间过远的筛掉
  ADEBUG << "frame_num[" << frame_num << "] obstacle_id[" << obstacle_id
         << "] low[" << low_bound << "] high[" << high_bound << "]";
  //按照计算的轨迹点的个数将轨迹点按照新的相对时间再重新插值组织一下?
  for (int i = low_bound; i <= high_bound; ++i) {
    double timestamp_sec = start_point_timestamp_sec + i * delta_time;
    double relative_time = i * delta_time;
    auto tp = discretized_trajectory.Evaluate(relative_time);

    // common::TrajectoryPoint => TrajectoryPointFeature
    TrajectoryPointFeature trajectory_point;
    Convert(tp, timestamp_sec, &trajectory_point);

    evaluated_trajectory->push_back(trajectory_point);
  }
}

//插值ADC轨迹?
//输入参数:起始点时间戳start_point_timestamp_sec
//输入参数:时间间隔delta_time
//输入参数:learning_data_frame学习数据帧?应该是用于基于学习的决策?
void TrajectoryEvaluator::EvaluateADCTrajectory(
    const double start_point_timestamp_sec, const double delta_time,
    LearningDataFrame* learning_data_frame) {
  //定义一个空轨迹
  std::vector<std::pair<double, CommonTrajectoryPointFeature>> trajectory;
  //遍历输入参数learning_data_frame里的adc轨迹点
  for (const auto& adc_tp : learning_data_frame->adc_trajectory_point()) {
    //往上面刚刚定义的轨迹里塞learning_data_frame里的轨迹点?
    trajectory.push_back(
        std::make_pair(adc_tp.timestamp_sec(), adc_tp.trajectory_point()));
  }

//如果轨迹里轨迹点的数量小于等于1,轨迹太短了返回
  if (trajectory.size() <= 1) {
    const std::string msg = absl::StrCat(
        "too short adc_trajectory_point. frame_num[",
        learning_data_frame->frame_num(), "] size[", trajectory.size(), "]");
    WriteLog(msg);
    return;
  }

//清空输入参数learning_data_frame里的轨迹点
  learning_data_frame->clear_adc_trajectory_point();

//trajectory里第一个点的时间戳若与起始点时间戳相差超过一个时间间隔的话
  if (fabs(trajectory.front().first - start_point_timestamp_sec) <=
      delta_time) {
    //learning_data_frame清空了又来增加一个轨迹点adc_trajectory_point ?
    auto adc_trajectory_point = learning_data_frame->add_adc_trajectory_point();
    //设置adc_trajectory_point的时间戳设置为轨迹trajectory最后一个点的时间戳,以及轨迹点
    adc_trajectory_point->set_timestamp_sec(trajectory.back().first);
    adc_trajectory_point->mutable_trajectory_point()->CopyFrom(
        trajectory.back().second);
//又写入msg到log
    const std::string msg =
        absl::StrCat("too short adc_trajectory. frame_num[",
                     learning_data_frame->frame_num(), "] size[",
                     trajectory.size(), "] timestamp_diff[",
                     start_point_timestamp_sec - trajectory.front().first, "]");
    WriteLog(msg);
    return;
  }

//又定义了个空的轨迹evaluated_trajectory
  std::vector<TrajectoryPointFeature> evaluated_trajectory;
  //又但根据输入的起始时间戳,时间间隔,和起始点时间戳重新组织轨迹点的时间塞入evaluated_trajectory
  EvaluateTrajectoryByTime(learning_data_frame->frame_num(), "adc_trajectory",
                           trajectory, start_point_timestamp_sec, delta_time,
                           &evaluated_trajectory);
  ADEBUG << "frame_num[" << learning_data_frame->frame_num()
         << "] orig adc_trajectory[" << trajectory.size()
         << "] evaluated_trajectory_size[" << evaluated_trajectory.size()
         << "]";

//遍历evaluated_trajectory里的每一个轨迹点
  for (const auto& tp : evaluated_trajectory) {
  //如果轨迹点相对时间<=0.0 且 轨迹点的相对时间 >= -8s?
    if (tp.trajectory_point().relative_time() <= 0.0 &&
        tp.trajectory_point().relative_time() >=
            -FLAGS_trajectory_time_length) {
      //又反过来把轨迹点再塞回到learning_data_frame...这是在做什么?不过这里好像是时间<=0.0的部分?
      auto adc_trajectory_point =
          learning_data_frame->add_adc_trajectory_point();
      adc_trajectory_point->set_timestamp_sec(tp.timestamp_sec());
      adc_trajectory_point->mutable_trajectory_point()->CopyFrom(
          tp.trajectory_point());
    } else {
    //
      const std::string msg =
          absl::StrCat("DISCARD adc_trajectory_point. frame_num[",
                       learning_data_frame->frame_num(), "] size[",
                       evaluated_trajectory.size(), "] relative_time[",
                       tp.trajectory_point().relative_time(), "]");
      WriteLog(msg);
    }
  }
}

//插值自车将来轨迹?也就是相对时间0到最大时间trajectory_time_length 8.0s的轨迹按输入的时间间隔重新组织一下新的相对时间插值得到的轨迹点再塞到evaluated_adc_future_trajectory
void TrajectoryEvaluator::EvaluateADCFutureTrajectory(
    const int frame_num,
    const std::vector<TrajectoryPointFeature>& adc_future_trajectory,
    const double start_point_timestamp_sec, const double delta_time,
    std::vector<TrajectoryPointFeature>* evaluated_adc_future_trajectory) {
  evaluated_adc_future_trajectory->clear();

  std::vector<std::pair<double, CommonTrajectoryPointFeature>> trajectory;
  for (const auto& adc_future_trajectory_point : adc_future_trajectory) {
    trajectory.push_back(
        std::make_pair(adc_future_trajectory_point.timestamp_sec(),
                       adc_future_trajectory_point.trajectory_point()));
  }

  if (trajectory.size() <= 1) {
    const std::string msg =
        absl::StrCat("too short adc_future_trajectory. frame_num[", frame_num,
                     "] size[", trajectory.size(), "]");
    WriteLog(msg);
    return;
  }

  if (fabs(trajectory.back().first - start_point_timestamp_sec) <= delta_time) {
    const std::string msg =
        absl::StrCat("too short adc_future_trajectory. frame_num[", frame_num,
                     "] size[", trajectory.size(), "] time_range[",
                     trajectory.back().first - start_point_timestamp_sec, "]");
    WriteLog(msg);
    return;
  }

  std::vector<TrajectoryPointFeature> evaluated_trajectory;
  EvaluateTrajectoryByTime(frame_num, "adc_future_trajectory", trajectory,
                           start_point_timestamp_sec, delta_time,
                           &evaluated_trajectory);

  ADEBUG << "frame_num[" << frame_num << "] orig adc_future_trajectory["
         << trajectory.size() << "] evaluated_trajectory_size["
         << evaluated_trajectory.size() << "]";

  if (evaluated_trajectory.empty()) {
    const std::string msg = absl::StrCat(
        "WARNING: adc_future_trajectory not long enough. ", "frame_num[",
        frame_num, "] size[", evaluated_trajectory.size(), "]");
    WriteLog(msg);
  } else {
    const double time_range =
        evaluated_trajectory.back().timestamp_sec() - start_point_timestamp_sec;
    if (time_range < FLAGS_trajectory_time_length) {
      const std::string msg = absl::StrCat(
          "WARNING: adc_future_trajectory not long enough. ", "frame_num[",
          frame_num, "] size[", evaluated_trajectory.size(), "] time_range[",
          time_range, "]");
      WriteLog(msg);
    }
  }

  for (const auto& tp : evaluated_trajectory) {
    if (tp.trajectory_point().relative_time() > 0.0 &&
        tp.trajectory_point().relative_time() <= FLAGS_trajectory_time_length) {
      evaluated_adc_future_trajectory->push_back(tp);
    } else {
      const std::string msg = absl::StrCat(
          "DISCARD adc_future_trajectory_point. frame_num[", frame_num,
          "] size[", evaluated_trajectory.size(), "] relative_time[",
          tp.trajectory_point().relative_time(), "]");
      WriteLog(msg);
    }
  }
}

//插值障碍物轨迹?估计又是根据起始时间戳,以及时间间隔去重新组织输入轨迹的相对时间,用新的相对时间去原轨迹插值轨迹点,再一一塞到轨迹
//这里输入的轨迹是learning_data_frame里的obstacle里的obstacle_trajectory,也就是障碍物轨迹,把障碍物轨迹按输入的时间起始点以及时间间隔重新插值得到新的轨迹点,然后塞回learning_data_frame里的障碍物轨迹
void TrajectoryEvaluator::EvaluateObstacleTrajectory(
    const double start_point_timestamp_sec, const double delta_time,
    LearningDataFrame* learning_data_frame) {
  for (int i = 0; i < learning_data_frame->obstacle_size(); ++i) {
    const int obstacle_id = learning_data_frame->obstacle(i).id();
    const auto obstacle_trajectory =
        learning_data_frame->obstacle(i).obstacle_trajectory();
    std::vector<std::pair<double, CommonTrajectoryPointFeature>> trajectory;
    for (const auto& perception_obstacle :
         obstacle_trajectory.perception_obstacle_history()) {
      CommonTrajectoryPointFeature trajectory_point;
      trajectory_point.mutable_path_point()->set_x(
          perception_obstacle.position().x());
      trajectory_point.mutable_path_point()->set_y(
          perception_obstacle.position().y());
      trajectory_point.mutable_path_point()->set_z(
          perception_obstacle.position().z());
      trajectory_point.mutable_path_point()->set_theta(
          perception_obstacle.theta());

      const double v = std::sqrt(perception_obstacle.velocity().x() *
                                     perception_obstacle.velocity().x() +
                                 perception_obstacle.velocity().y() *
                                     perception_obstacle.velocity().y());
      trajectory_point.set_v(v);

      const double a = std::sqrt(perception_obstacle.acceleration().x() *
                                     perception_obstacle.acceleration().x() +
                                 perception_obstacle.acceleration().y() *
                                     perception_obstacle.acceleration().y());
      trajectory_point.set_a(a);

      trajectory.push_back(std::make_pair(perception_obstacle.timestamp_sec(),
                                          trajectory_point));
    }

    std::vector<TrajectoryPointFeature> evaluated_trajectory;
    if (trajectory.size() == 1 ||
        fabs(trajectory.front().first - start_point_timestamp_sec)
            <= delta_time ||
        fabs(trajectory.front().first - trajectory.back().first)
            <= delta_time) {
      ADEBUG << "too short obstacle_trajectory. frame_num["
             << learning_data_frame->frame_num() << "] obstacle_id["
             << obstacle_id << "] size[" << trajectory.size()
             << "] timestamp_diff["
             << start_point_timestamp_sec - trajectory.front().first
             << "] time_range["
             << fabs(trajectory.front().first - trajectory.back().first) << "]";

      // pick at lease one point regardless of short timestamp,
      // to avoid model failure
      TrajectoryPointFeature trajectory_point;
      trajectory_point.set_timestamp_sec(trajectory.front().first);
      trajectory_point.mutable_trajectory_point()->CopyFrom(
          trajectory.front().second);
      evaluated_trajectory.push_back(trajectory_point);
    } else {
      EvaluateTrajectoryByTime(learning_data_frame->frame_num(),
                               std::to_string(obstacle_id), trajectory,
                               start_point_timestamp_sec, delta_time,
                               &evaluated_trajectory);

      ADEBUG << "frame_num[" << learning_data_frame->frame_num()
             << "] obstacle_id[" << obstacle_id
             << "] orig obstacle_trajectory[" << trajectory.size()
             << "] evaluated_trajectory_size[" << evaluated_trajectory.size()
             << "]";
    }

    // update learning_data
    learning_data_frame->mutable_obstacle(i)
        ->mutable_obstacle_trajectory()
        ->clear_evaluated_trajectory_point();
    for (const auto& tp : evaluated_trajectory) {
      auto evaluated_trajectory_point = learning_data_frame->mutable_obstacle(i)
                                            ->mutable_obstacle_trajectory()
                                            ->add_evaluated_trajectory_point();
      evaluated_trajectory_point->set_timestamp_sec(tp.timestamp_sec());
      evaluated_trajectory_point->mutable_trajectory_point()->CopyFrom(
          tp.trajectory_point());
    }
  }
}

//插值障碍物的预测轨迹,也是根据起始时间,时间间隔重新插值轨迹点塞到轨迹
void TrajectoryEvaluator::EvaluateObstaclePredictionTrajectory(
    const double start_point_timestamp_sec, const double delta_time,
    LearningDataFrame* learning_data_frame) {
  for (int i = 0; i < learning_data_frame->obstacle_size(); ++i) {
    const int obstacle_id = learning_data_frame->obstacle(i).id();
    const auto obstacle_prediction =
        learning_data_frame->obstacle(i).obstacle_prediction();
    for (int j = 0; j < obstacle_prediction.trajectory_size(); ++j) {
      const auto obstacle_prediction_trajectory =
          obstacle_prediction.trajectory(j);

      std::vector<std::pair<double, CommonTrajectoryPointFeature>> trajectory;
      for (const auto& trajectory_point :
           obstacle_prediction_trajectory.trajectory_point()) {
        const double timestamp_sec =
            obstacle_prediction.timestamp_sec() +
            trajectory_point.trajectory_point().relative_time();
        trajectory.push_back(
            std::make_pair(timestamp_sec, trajectory_point.trajectory_point()));
      }
      if (fabs(trajectory.back().first - start_point_timestamp_sec) <=
          delta_time) {
        ADEBUG << "too short obstacle_prediction_trajectory. frame_num["
               << learning_data_frame->frame_num() << "] obstacle_id["
               << obstacle_id << "] size[" << trajectory.size()
               << "] timestamp_diff["
               << trajectory.back().first - start_point_timestamp_sec << "]";
        continue;
      }

      std::vector<TrajectoryPointFeature> evaluated_trajectory;
      EvaluateTrajectoryByTime(learning_data_frame->frame_num(),
                               std::to_string(obstacle_id), trajectory,
                               start_point_timestamp_sec, delta_time,
                               &evaluated_trajectory);

      ADEBUG << "frame_num[" << learning_data_frame->frame_num()
             << "] obstacle_id[" << obstacle_id
             << "orig obstacle_prediction_trajectory[" << trajectory.size()
             << "] evaluated_trajectory_size[" << evaluated_trajectory.size()
             << "]";

      // update learning_data
      learning_data_frame->mutable_obstacle(i)
          ->mutable_obstacle_prediction()
          ->mutable_trajectory(j)
          ->clear_trajectory_point();
      for (const auto& tp : evaluated_trajectory) {
        auto obstacle_prediction_trajectory_point =
            learning_data_frame->mutable_obstacle(i)
                ->mutable_obstacle_prediction()
                ->mutable_trajectory(j)
                ->add_trajectory_point();
        obstacle_prediction_trajectory_point->set_timestamp_sec(
            tp.timestamp_sec());
        obstacle_prediction_trajectory_point->mutable_trajectory_point()
            ->CopyFrom(tp.trajectory_point());
      }
    }
  }
}

//转化?将轨迹点特征对象转化为轨迹点对象
//起始就是将输入参数的tp转化成trajectory_point,最后转化结果放在指针trajectory_point
//输入参数:轨迹点特征,相对时间,轨迹点
void TrajectoryEvaluator::Convert(const CommonTrajectoryPointFeature& tp,
                                  const double relative_time,
                                  common::TrajectoryPoint* trajectory_point) {
 //获取输入轨迹点里的路径点 轨迹=路径+速度规划
  auto path_point = trajectory_point->mutable_path_point();
  //用输入的轨迹点特征tp设置输入轨迹点trajectory_point的路径点path_point的x,y,z,theta,s,lane_id,v,a,t,gaussian_info
  path_point->set_x(tp.path_point().x());
  path_point->set_y(tp.path_point().y());
  path_point->set_z(tp.path_point().z());
  path_point->set_theta(tp.path_point().theta());
  path_point->set_s(tp.path_point().s());
  path_point->set_lane_id(tp.path_point().lane_id());
  trajectory_point->set_v(tp.v());
  trajectory_point->set_a(tp.a());
  trajectory_point->set_relative_time(relative_time);
  trajectory_point->mutable_gaussian_info()->CopyFrom(tp.gaussian_info());
}

//转化函数,将输入轨迹点tp转化为轨迹点特征对象trajectory_point
void TrajectoryEvaluator::Convert(const common::TrajectoryPoint& tp,
                                  const double timestamp_sec,
                                  TrajectoryPointFeature* trajectory_point) {
  trajectory_point->set_timestamp_sec(timestamp_sec);
  auto path_point =
      trajectory_point->mutable_trajectory_point()->mutable_path_point();
  path_point->set_x(tp.path_point().x());
  path_point->set_y(tp.path_point().y());
  path_point->set_z(tp.path_point().z());
  path_point->set_theta(tp.path_point().theta());
  path_point->set_s(tp.path_point().s());
  path_point->set_lane_id(tp.path_point().lane_id());
  trajectory_point->mutable_trajectory_point()->set_v(tp.v());
  trajectory_point->mutable_trajectory_point()->set_a(tp.a());
  trajectory_point->mutable_trajectory_point()->set_relative_time(
      tp.relative_time());
  trajectory_point->mutable_trajectory_point()
      ->mutable_gaussian_info()
      ->CopyFrom(tp.gaussian_info());
}

}  // namespace planning
}  // namespace apollo

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

wujiangzhu_xjtu

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值