Apollo2.0自动驾驶之apollo/modules/perception/obstacle/camera/common/cnn_adapter.h

/****************Apollo源码分析****************************

Copyright 2018 The File Authors & zouyu. All Rights Reserved.
Contact with: 1746430162@qq.com 181663309504

源码主要是c++实现的,也有少量python,git下载几百兆,其实代码不太多,主要是地图和数据了大量空间,主要程序
在apollo/modules目录中,
我们把它分成以下几部分(具体说明见各目录下的modules):
感知:感知当前位置,速度,障碍物等等
Apollo/modules/perception
预测:对场景下一步的变化做出预测
Apollo/modules/prediction
规划:
(1) 全局路径规划:通过起点终点计算行驶路径
Apollo/modules/routing
(2) 规划当前轨道:通过感知,预测,路径规划等信息计算轨道
Apollo/modules/planning
(3) 规划转换成命令:将轨道转换成控制汽车的命令(加速,制动,转向等)
Apollo/modules/control
其它
(1) 输入输出
i. Apollo/modules/drivers 设备驱动
ii. Apollo/modules/localization 位置信息
iii. Apollo/modules/monitor 监控模块
iv. Apollo/modules/canbus 与汽车硬件交互
v. Apollo/modules/map 地图数据
vi. Apollo/modules/third_party_perception 三方感知器支持
(2) 交互
i. Apollo/modules/dreamview 可视化模块
ii. Apollo/modules/hmi 把汽车当前状态显示给用户
(3) 工具
i. Apollo/modules/calibration 标注工具
ii. Apollo/modules/common 支持其它模块的公共工具
iii. Apollo/modules/data 数据工具
iv. Apollo/modules/tools 一些Python工具
(4) 其它
i. Apollo/modules/elo 高精度定位系统,无源码,但有文档
ii. Apollo/modules/e2e 收集传感器数据给PX2,ROS

自动驾驶系统先通过起点终点规划出整体路径(routing);然后在行驶过程中感知(perception)当前环境
(识别车辆行人路况标志等),并预测下一步发展;然后把已知信息都传入规划模块(planning),规划出之后的轨道;
控制模块(control)将轨道数据转换成对车辆的控制信号,通过汽车交互模块(canbus)控制汽车.
我觉得这里面算法技术含量最高的是感知perception和规划planning,具体请见本博客中各模块的分析代码。
/****************************************************************************************



/******************************************************************************
 * Copyright 2018 The Apollo Authors. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *****************************************************************************/

#include "modules/perception/obstacle/camera/common/cnn_adapter.h"

#include <string>
#include <vector>

namespace apollo {
namespace perception {

// CNNCaffe
bool CNNCaffe::init(const std::vector<std::string> &input_names,
                    const std::vector<std::string> &output_names,
                    const std::string &proto_file,
                    const std::string &weight_file, int gpu_id,
                    const std::string &model_root) {
  if (gpu_id >= 0) {
    caffe::Caffe::SetDevice(gpu_id);
    caffe::Caffe::set_mode(caffe::Caffe::GPU);
    caffe::Caffe::DeviceQuery();
  } else {
    caffe::Caffe::set_mode(caffe::Caffe::CPU);
  }

  // init Net
  net_.reset(new caffe::Net<float>(proto_file, caffe::TEST));
  CHECK((net_ != nullptr));
  net_->CopyTrainedLayersFrom(weight_file);
  gpu_id_ = gpu_id;
  return true;
}

void CNNCaffe::forward() { net_->Forward(); }

boost::shared_ptr<caffe::Blob<float>> CNNCaffe::get_blob_by_name(
    const std::string &name) {
  return net_->blob_by_name(name);
}
// CNNCaffe END

/*
// CNNTensorRT
bool CNNTensorRT::init(const std::vector<std::string> &input_names,
                       const std::vector<std::string> &output_names,
                       const std::string &proto_file,
                       const std::string &weight_file, int gpu_id,
                       const std::string &model_root) {
  // input_names_ = input_names;
  // output_names_ = output_names;
  // init GPU
  gpu_id_ = gpu_id;

  if (gpu_id >= 0) {
    CUDA_CHECK(cudaSetDevice(gpu_id));
  } else {
    AINFO << "must use gpu mode";
    return false;
  }
  anakin::BatchStream calibrationStream;
  nvinfer1::Int8EntropyCalibrator calibrator(calibrationStream, 0, true,
                                             model_root);
  if (int8_flag_) {
    AINFO << model_root;
    inference_.initNet(proto_file.c_str(), weight_file.c_str(), &calibrator);
  } else {
    inference_.initNet(proto_file.c_str(), weight_file.c_str(), nullptr);
  }
  const std::vector<anakin::Tensor<float> *> &input_tensors =
      inference_.getInputTensors();
  const std::vector<anakin::Tensor<float> *> &output_tensors =
      inference_.getOutputTensors();
  //  CHECK(input_tensors.size() == input_names.size()) << "number of input
  //  tensor and name should be equal!"; CHECK(output_tensors.size() ==
  //  output_names.size()) << "number of output tensor and name should be
  //  equal!";

  inference_.execute();
  std::vector<void *> buffers = inference_.getBuffers();
  int cnt = 0;
  for (int i = 0; i < input_tensors.size(); i++, cnt++) {
    boost::shared_ptr<caffe::Blob<float>> blob;
    blob.reset(new caffe::Blob<float>);
    blob->Reshape(input_tensors[i]->dims());

    blobs_.insert(std::make_pair(input_names[i], blob));
    name_buffers_.insert(std::make_pair(input_names[i], buffers[cnt]));
    input_names_.push_back(input_names[i]);
  }
  for (int i = 0; i < output_tensors.size(); i++, cnt++) {
    boost::shared_ptr<caffe::Blob<float>> blob;
    blob.reset(new caffe::Blob<float>);
    blob->Reshape(output_tensors[i]->dims());

    name_buffers_.insert(std::make_pair(output_names[i], buffers[cnt]));
    blobs_.insert(std::make_pair(output_names[i], blob));
    output_names_.push_back(output_names[i]);
  }

  for (auto tmp : input_names) {
    AINFO << tmp;
  }
  for (auto tmp : output_names) {
    AINFO << tmp;
  }
  return true;
}

void CNNTensorRT::forward() {
  CUDA_CHECK(cudaSetDevice(gpu_id_));
  sync_blob(input_names_, false);
  inference_.execute();
  cudaDeviceSynchronize();
  sync_blob(output_names_, true);
}

boost::shared_ptr<caffe::Blob<float>> CNNTensorRT::get_blob_by_name(
    const std::string &name) {
  auto iter = blobs_.find(name);
  if (iter == blobs_.end()) {
    return nullptr;
  }
  return iter->second;
}
// CNNTensorRT END
*/

}  // namespace perception
}  // namespace apollo



/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/

#include "modules/perception/obstacle/camera/common/cnn_adapter.h"

#include <string>
#include <vector>

namespace apollo {
namespace perception {

// CNNCaffe 阿波罗中的深度学习框架用的是CAFFE
bool CNNCaffe::init ( const std::vector < std::string > & input_names, //CNNCaffe输入名
const std::vector < std::string > & output_names, //CNNCaffe输出名
const std::string & proto_file, //数据文件
const std::string & weight_file, int gpu_id, //权重文件 是否用GPU
const std::string & model_root) { //训练出来的模型文件,这个是一个非常重要的文件
//端到端的测试都靠这个模型文件了
if (gpu_id >= 0 ) { //如果有GPU在系统中存在,就用GPU运行程序
caffe::Caffe::SetDevice (gpu_id); //如果有多个GPU,则选择GPU的ID号,选择用哪一个来跑程序
caffe::Caffe::set_mode (caffe::Caffe::GPU);
caffe::Caffe::DeviceQuery (); //GPU查询
} else {
caffe::Caffe::set_mode (caffe::Caffe::CPU); //如果没有GPU,则用CPU了
}

// init Net 初始化网络文件
net_. reset ( new caffe::Net < float > (proto_file, caffe::TEST)); //加载数据文件,测试
CHECK ((net_ != nullptr ));
net_-> CopyTrainedLayersFrom (weight_file); //为训练网络加载权重文件
gpu_id_ = gpu_id;
return true ;
}

void CNNCaffe::forward () { net_-> Forward (); }

boost::shared_ptr < caffe::Blob < float >> CNNCaffe::get_blob_by_name (
const std::string & name) {
return net_-> blob_by_name (name);
}
// CNNCaffe END

/*
// CNNTensorRT
bool CNNTensorRT::init(const std::vector<std::string> &input_names,
const std::vector<std::string> &output_names,
const std::string &proto_file,
const std::string &weight_file, int gpu_id,
const std::string &model_root) {
// input_names_ = input_names;
// output_names_ = output_names;
// init GPU
gpu_id_ = gpu_id;

if (gpu_id >= 0) {
CUDA_CHECK(cudaSetDevice(gpu_id));
} else {
AINFO << "must use gpu mode";
return false;
}
anakin::BatchStream calibrationStream;
nvinfer1::Int8EntropyCalibrator calibrator(calibrationStream, 0, true,
model_root);
if (int8_flag_) {
AINFO << model_root;
inference_.initNet(proto_file.c_str(), weight_file.c_str(), &calibrator);
} else {
inference_.initNet(proto_file.c_str(), weight_file.c_str(), nullptr);
}
const std::vector<anakin::Tensor<float> *> &input_tensors =
inference_.getInputTensors();
const std::vector<anakin::Tensor<float> *> &output_tensors =
inference_.getOutputTensors();
// CHECK(input_tensors.size() == input_names.size()) << "number of input
// tensor and name should be equal!"; CHECK(output_tensors.size() ==
// output_names.size()) << "number of output tensor and name should be
// equal!";

inference_.execute();
std::vector<void *> buffers = inference_.getBuffers();
int cnt = 0;
for (int i = 0; i < input_tensors.size(); i++, cnt++) {
boost::shared_ptr<caffe::Blob<float>> blob;
blob.reset(new caffe::Blob<float>);
blob->Reshape(input_tensors[i]->dims());

blobs_.insert(std::make_pair(input_names[i], blob));
name_buffers_.insert(std::make_pair(input_names[i], buffers[cnt]));
input_names_.push_back(input_names[i]);
}
for (int i = 0; i < output_tensors.size(); i++, cnt++) {
boost::shared_ptr<caffe::Blob<float>> blob;
blob.reset(new caffe::Blob<float>);
blob->Reshape(output_tensors[i]->dims());

name_buffers_.insert(std::make_pair(output_names[i], buffers[cnt]));
blobs_.insert(std::make_pair(output_names[i], blob));
output_names_.push_back(output_names[i]);
}

for (auto tmp : input_names) {
AINFO << tmp;
}
for (auto tmp : output_names) {
AINFO << tmp;
}
return true;
}

void CNNTensorRT::forward() {
CUDA_CHECK(cudaSetDevice(gpu_id_));
sync_blob(input_names_, false);
inference_.execute();
cudaDeviceSynchronize();
sync_blob(output_names_, true);
}

boost::shared_ptr<caffe::Blob<float>> CNNTensorRT::get_blob_by_name(
const std::string &name) {
auto iter = blobs_.find(name);
if (iter == blobs_.end()) {
return nullptr;
}
return iter->second;
}
// CNNTensorRT END
*/

} // namespace perception
} // namespace apollo


帮我修改下面的代码。要求建立一次weboscket链接,链接到wss://autopilot-test.t3go.cn:443/api/v1/vehicle/push/message/LFB1FV696M2L43840,当订阅到感知话题调用perceptionCallback时,通过wss发送serialized_data:#include "ros/ros.h" #include "std_msgs/String.h" #include <boost/thread/locks.hpp> #include <boost/thread/shared_mutex.hpp> #include "third_party/apollo/proto/perception/perception_obstacle.pb.h" #include "t3_perception.pb.h" #include <iostream> #include <websocketpp/config/asio_client.hpp> #include <websocketpp/client.hpp> #include <websocketpp/common/thread.hpp> apollo::perception::PerceptionObstacles perception_obstacles_; typedef websocketpp::clientwebsocketpp::config::asio_tls_client client; void perceptionCallback(const std_msgs::String& msg) { ROS_WARN("t3 perceptionCallback parse"); if (perception_obstacles_.ParseFromString(msg.data)) { double timestamp = perception_obstacles_.header().timestamp_sec(); ROS_INFO("t3 perceptionCallback timestamp %f count:%d", timestamp, perception_obstacles_.perception_obstacle().size()); std::string data; perception_obstacles_.SerializeToString(&data); VehData veh_data; veh_data.set_messagetype(5); veh_data.set_messagedes("PerceptionObstacles"); veh_data.set_contents(data); std::string serialized_data; veh_data.SerializeToString(&serialized_data); } else { ROS_ERROR("t3 perceptionCallback parse fail!"); } } int main(int argc, char **argv) { ros::init(argc, argv, "listener"); ros::NodeHandle n; ros::Subscriber sub = n.subscribe("/perception_node/perception_objects", 1000, perceptionCallback); ros::spin(); return 0; }
06-09
In file included from /home/acceler/code/apollo_ros/apollo_ros/src/apollo.ros-1.0.0-master/apollo_common/include/apollo_common/apollo_app.h:46:0, from /home/acceler/code/apollo_ros/apollo_ros/src/apollo.ros-1.0.0-master/apollo_common/src/apollo_app.cc:33: /home/acceler/code/apollo_ros/apollo_ros/src/apollo.ros-1.0.0-master/apollo_common/include/apollo_common/log.h:40:10: fatal error: glog/logging.h: No such file or directory #include <glog/logging.h> ^~~~~~~~~~~~~~~~ compilation terminated. apollo.ros-1.0.0-master/apollo_common/CMakeFiles/apollo_common.dir/build.make:62: recipe for target 'apollo.ros-1.0.0-master/apollo_common/CMakeFiles/apollo_common.dir/src/apollo_app.cc.o' failed make[2]: *** [apollo.ros-1.0.0-master/apollo_common/CMakeFiles/apollo_common.dir/src/apollo_app.cc.o] Error 1 make[2]: *** Waiting for unfinished jobs.... In file included from /home/acceler/code/apollo_ros/apollo_ros/src/apollo.ros-1.0.0-master/apollo_common/include/apollo_common/adapters/adapter_manager.h:48:0, from /home/acceler/code/apollo_ros/apollo_ros/src/apollo.ros-1.0.0-master/apollo_common/src/adapters/adapter_manager.cc:33: /home/acceler/code/apollo_ros/apollo_ros/src/apollo.ros-1.0.0-master/apollo_common/include/apollo_common/adapters/adapter.h:49:10: fatal error: glog/logging.h: No such file or directory #include <glog/logging.h> ^~~~~~~~~~~~~~~~ compilation terminated. apollo.ros-1.0.0-master/apollo_common/CMakeFiles/apollo_common.dir/build.make:110: recipe for target 'apollo.ros-1.0.0-master/apollo_common/CMakeFiles/apollo_common.dir/src/adapters/adapter_manager.cc.o' failed make[2]: *** [apollo.ros-1.0.0-master/apollo_common/CMakeFiles/apollo_common.dir/src/adapters/adapter_manager.cc.o] Error 1 CMakeFiles/Makefile2:3894: recipe for target 'apollo.ros-1.0.0-master/apollo_common/CMakeFiles/apollo_common.dir/all' failed make[1]: *** [apollo.ros-1.0.0-master/apollo_common/CMakeFiles/apollo_common.dir/all] Error 2 make[1]: *** Waiting for unfinished jobs.... [ 54%] Linking CXX executable /home/acceler/code/apollo_ros/apollo_ros/devel/lib/IntegratedNavigation/IntegratedNavigation_node [ 54%] Built target IntegratedNavigation_node [ 55%] Linking CXX executable /home/acceler/code/apollo_ros/apollo_ros/devel/lib/TimeSynchronierProcess/timeSynchronierProcess_node [ 55%] Built target timeSynchronierProcess_node Makefile:140: recipe for target 'all' failed make: *** [all] Error 2 Invoking "make -j4 -l4" failed
07-23
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值