/****************Apollo源码分析****************************
Copyright 2018 The File Authors & zouyu. All Rights Reserved.
Contact with: 1746430162@qq.com 181663309504
源码主要是c++实现的,也有少量python,git下载几百兆,其实代码不太多,主要是地图和数据了大量空间,主要程序
在apollo/modules目录中,
在apollo/modules目录中,
我们把它分成以下几部分(具体说明见各目录下的modules):
感知:感知当前位置,速度,障碍物等等
Apollo/modules/perception
预测:对场景下一步的变化做出预测
Apollo/modules/prediction
规划:
(1) 全局路径规划:通过起点终点计算行驶路径
Apollo/modules/routing
(2) 规划当前轨道:通过感知,预测,路径规划等信息计算轨道
Apollo/modules/planning
(3) 规划转换成命令:将轨道转换成控制汽车的命令(加速,制动,转向等)
Apollo/modules/control
其它
(1) 输入输出
i. Apollo/modules/drivers 设备驱动
ii. Apollo/modules/localization 位置信息
iii. Apollo/modules/monitor 监控模块
iv. Apollo/modules/canbus 与汽车硬件交互
v. Apollo/modules/map 地图数据
vi. Apollo/modules/third_party_perception 三方感知器支持
(2) 交互
i. Apollo/modules/dreamview 可视化模块
ii. Apollo/modules/hmi 把汽车当前状态显示给用户
(3) 工具
i. Apollo/modules/calibration 标注工具
ii. Apollo/modules/common 支持其它模块的公共工具
iii. Apollo/modules/data 数据工具
iv. Apollo/modules/tools 一些Python工具
(4) 其它
i. Apollo/modules/elo 高精度定位系统,无源码,但有文档
ii. Apollo/modules/e2e 收集传感器数据给PX2,ROS
自动驾驶系统先通过起点终点规划出整体路径(routing);然后在行驶过程中感知(perception)当前环境
(识别车辆行人路况标志等),并预测下一步发展;然后把已知信息都传入规划模块(planning),规划出之后的轨道;
控制模块(control)将轨道数据转换成对车辆的控制信号,通过汽车交互模块(canbus)控制汽车.
(识别车辆行人路况标志等),并预测下一步发展;然后把已知信息都传入规划模块(planning),规划出之后的轨道;
控制模块(control)将轨道数据转换成对车辆的控制信号,通过汽车交互模块(canbus)控制汽车.
我觉得这里面算法技术含量最高的是感知perception和规划planning,具体请见本博客中各模块的分析代码。
/****************************************************************************************
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/perception/obstacle/camera/common/cnn_adapter.h"
#include <string>
#include <vector>
namespace apollo {
namespace perception {
// CNNCaffe
bool CNNCaffe::init(const std::vector<std::string> &input_names,
const std::vector<std::string> &output_names,
const std::string &proto_file,
const std::string &weight_file, int gpu_id,
const std::string &model_root) {
if (gpu_id >= 0) {
caffe::Caffe::SetDevice(gpu_id);
caffe::Caffe::set_mode(caffe::Caffe::GPU);
caffe::Caffe::DeviceQuery();
} else {
caffe::Caffe::set_mode(caffe::Caffe::CPU);
}
// init Net
net_.reset(new caffe::Net<float>(proto_file, caffe::TEST));
CHECK((net_ != nullptr));
net_->CopyTrainedLayersFrom(weight_file);
gpu_id_ = gpu_id;
return true;
}
void CNNCaffe::forward() { net_->Forward(); }
boost::shared_ptr<caffe::Blob<float>> CNNCaffe::get_blob_by_name(
const std::string &name) {
return net_->blob_by_name(name);
}
// CNNCaffe END
/*
// CNNTensorRT
bool CNNTensorRT::init(const std::vector<std::string> &input_names,
const std::vector<std::string> &output_names,
const std::string &proto_file,
const std::string &weight_file, int gpu_id,
const std::string &model_root) {
// input_names_ = input_names;
// output_names_ = output_names;
// init GPU
gpu_id_ = gpu_id;
if (gpu_id >= 0) {
CUDA_CHECK(cudaSetDevice(gpu_id));
} else {
AINFO << "must use gpu mode";
return false;
}
anakin::BatchStream calibrationStream;
nvinfer1::Int8EntropyCalibrator calibrator(calibrationStream, 0, true,
model_root);
if (int8_flag_) {
AINFO << model_root;
inference_.initNet(proto_file.c_str(), weight_file.c_str(), &calibrator);
} else {
inference_.initNet(proto_file.c_str(), weight_file.c_str(), nullptr);
}
const std::vector<anakin::Tensor<float> *> &input_tensors =
inference_.getInputTensors();
const std::vector<anakin::Tensor<float> *> &output_tensors =
inference_.getOutputTensors();
// CHECK(input_tensors.size() == input_names.size()) << "number of input
// tensor and name should be equal!"; CHECK(output_tensors.size() ==
// output_names.size()) << "number of output tensor and name should be
// equal!";
inference_.execute();
std::vector<void *> buffers = inference_.getBuffers();
int cnt = 0;
for (int i = 0; i < input_tensors.size(); i++, cnt++) {
boost::shared_ptr<caffe::Blob<float>> blob;
blob.reset(new caffe::Blob<float>);
blob->Reshape(input_tensors[i]->dims());
blobs_.insert(std::make_pair(input_names[i], blob));
name_buffers_.insert(std::make_pair(input_names[i], buffers[cnt]));
input_names_.push_back(input_names[i]);
}
for (int i = 0; i < output_tensors.size(); i++, cnt++) {
boost::shared_ptr<caffe::Blob<float>> blob;
blob.reset(new caffe::Blob<float>);
blob->Reshape(output_tensors[i]->dims());
name_buffers_.insert(std::make_pair(output_names[i], buffers[cnt]));
blobs_.insert(std::make_pair(output_names[i], blob));
output_names_.push_back(output_names[i]);
}
for (auto tmp : input_names) {
AINFO << tmp;
}
for (auto tmp : output_names) {
AINFO << tmp;
}
return true;
}
void CNNTensorRT::forward() {
CUDA_CHECK(cudaSetDevice(gpu_id_));
sync_blob(input_names_, false);
inference_.execute();
cudaDeviceSynchronize();
sync_blob(output_names_, true);
}
boost::shared_ptr<caffe::Blob<float>> CNNTensorRT::get_blob_by_name(
const std::string &name) {
auto iter = blobs_.find(name);
if (iter == blobs_.end()) {
return nullptr;
}
return iter->second;
}
// CNNTensorRT END
*/
} // namespace perception
} // namespace apollo
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include
"modules/perception/obstacle/camera/common/cnn_adapter.h"
#include
<string>
#include
<vector>
namespace
apollo
{
namespace
perception
{
// CNNCaffe 阿波罗中的深度学习框架用的是CAFFE
bool
CNNCaffe::init
(
const
std::vector
<
std::string
>
&
input_names,
//CNNCaffe输入名
const
std::vector
<
std::string
>
&
output_names,
//CNNCaffe输出名
const
std::string
&
proto_file,
//数据文件
const
std::string
&
weight_file,
int
gpu_id,
//权重文件 是否用GPU
const
std::string
&
model_root) {
//训练出来的模型文件,这个是一个非常重要的文件
//端到端的测试都靠这个模型文件了
if
(gpu_id
>=
0
) {
//如果有GPU在系统中存在,就用GPU运行程序
caffe::Caffe::SetDevice
(gpu_id);
//如果有多个GPU,则选择GPU的ID号,选择用哪一个来跑程序
caffe::Caffe::set_mode
(caffe::Caffe::GPU);
caffe::Caffe::DeviceQuery
();
//GPU查询
}
else
{
caffe::Caffe::set_mode
(caffe::Caffe::CPU);
//如果没有GPU,则用CPU了
}
// init Net 初始化网络文件
net_.
reset
(
new
caffe::Net
<
float
>
(proto_file, caffe::TEST));
//加载数据文件,测试
CHECK
((net_
!=
nullptr
));
net_->
CopyTrainedLayersFrom
(weight_file);
//为训练网络加载权重文件
gpu_id_
=
gpu_id;
return
true
;
}
void
CNNCaffe::forward
() { net_->
Forward
(); }
boost::shared_ptr
<
caffe::Blob
<
float
>>
CNNCaffe::get_blob_by_name
(
const
std::string
&
name) {
return
net_->
blob_by_name
(name);
}
// CNNCaffe END
/*
// CNNTensorRT
bool CNNTensorRT::init(const std::vector<std::string> &input_names,
const std::vector<std::string> &output_names,
const std::string &proto_file,
const std::string &weight_file, int gpu_id,
const std::string &model_root) {
// input_names_ = input_names;
// output_names_ = output_names;
// init GPU
gpu_id_ = gpu_id;
if (gpu_id >= 0) {
CUDA_CHECK(cudaSetDevice(gpu_id));
} else {
AINFO << "must use gpu mode";
return false;
}
anakin::BatchStream calibrationStream;
nvinfer1::Int8EntropyCalibrator calibrator(calibrationStream, 0, true,
model_root);
if (int8_flag_) {
AINFO << model_root;
inference_.initNet(proto_file.c_str(), weight_file.c_str(), &calibrator);
} else {
inference_.initNet(proto_file.c_str(), weight_file.c_str(), nullptr);
}
const std::vector<anakin::Tensor<float> *> &input_tensors =
inference_.getInputTensors();
const std::vector<anakin::Tensor<float> *> &output_tensors =
inference_.getOutputTensors();
// CHECK(input_tensors.size() == input_names.size()) << "number of input
// tensor and name should be equal!"; CHECK(output_tensors.size() ==
// output_names.size()) << "number of output tensor and name should be
// equal!";
inference_.execute();
std::vector<void *> buffers = inference_.getBuffers();
int cnt = 0;
for (int i = 0; i < input_tensors.size(); i++, cnt++) {
boost::shared_ptr<caffe::Blob<float>> blob;
blob.reset(new caffe::Blob<float>);
blob->Reshape(input_tensors[i]->dims());
blobs_.insert(std::make_pair(input_names[i], blob));
name_buffers_.insert(std::make_pair(input_names[i], buffers[cnt]));
input_names_.push_back(input_names[i]);
}
for (int i = 0; i < output_tensors.size(); i++, cnt++) {
boost::shared_ptr<caffe::Blob<float>> blob;
blob.reset(new caffe::Blob<float>);
blob->Reshape(output_tensors[i]->dims());
name_buffers_.insert(std::make_pair(output_names[i], buffers[cnt]));
blobs_.insert(std::make_pair(output_names[i], blob));
output_names_.push_back(output_names[i]);
}
for (auto tmp : input_names) {
AINFO << tmp;
}
for (auto tmp : output_names) {
AINFO << tmp;
}
return true;
}
void CNNTensorRT::forward() {
CUDA_CHECK(cudaSetDevice(gpu_id_));
sync_blob(input_names_, false);
inference_.execute();
cudaDeviceSynchronize();
sync_blob(output_names_, true);
}
boost::shared_ptr<caffe::Blob<float>> CNNTensorRT::get_blob_by_name(
const std::string &name) {
auto iter = blobs_.find(name);
if (iter == blobs_.end()) {
return nullptr;
}
return iter->second;
}
// CNNTensorRT END
*/
}
// namespace perception
}
// namespace apollo