在此贴出来apollo源代码,本篇只是源码解读,想要看此文件依赖的相关文件可访问:https://github.com/ApolloAuto/apollo/tree/master/cyber/base
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#ifndef CYBER_BASE_BOUNDED_QUEUE_H_
#define CYBER_BASE_BOUNDED_QUEUE_H_
#include <unistd.h>
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <utility>
#include "cyber/base/macros.h"
#include "cyber/base/wait_strategy.h"
namespace apollo {
namespace cyber {
namespace base {
template <typename T>
class BoundedQueue {
public:
using value_type = T;
using size_type = uint64_t;
public:
BoundedQueue() {}
BoundedQueue& operator=(const BoundedQueue& other) = delete;
BoundedQueue(const BoundedQueue& other) = delete;
~BoundedQueue();
bool Init(uint64_t size);
bool Init(uint64_t size, WaitStrategy* strategy);
bool Enqueue(const T& element);
bool Enqueue(T&& element);
bool WaitEnqueue(const T& element);
bool WaitEnqueue(T&& element);
bool Dequeue(T* element);
bool WaitDequeue(T* element);
uint64_t Size();
bool Empty();
void SetWaitStrategy(WaitStrategy* WaitStrategy);
void BreakAllWait();
uint64_t Head() { return head_.load(); }
uint64_t Tail() { return tail_.load(); }
uint64_t Commit() { return commit_.load(); }
private:
uint64_t GetIndex(uint64_t num);
alignas(CACHELINE_SIZE) std::atomic<uint64_t> head_ = {0};
alignas(CACHELINE_SIZE) std::atomic<uint64_t> tail_ = {1};
alignas(CACHELINE_SIZE) std::atomic<uint64_t> commit_ = {1};
// alignas(CACHELINE_SIZE) std::atomic<uint64_t> size_ = {0};
uint64_t pool_size_ = 0;
T* pool_ = nullptr;
std::unique_ptr<WaitStrategy> wait_strategy_ = nullptr;
volatile bool break_all_wait_ = false;
};
template <typename T>
BoundedQueue<T>::~BoundedQueue() {
if (wait_strategy_) {
BreakAllWait();
}
if (pool_) {
for (uint64_t i = 0; i < pool_size_; ++i) {
pool_[i].~T();
}
std::free(pool_);
}
}
template <typename T>
inline bool BoundedQueue<T>::Init(uint64_t size) {
return Init(size, new SleepWaitStrategy());
}
template <typename T>
bool BoundedQueue<T>::Init(uint64_t size, WaitStrategy* strategy) {
// Head and tail each occupy a space
pool_size_ = size + 2;
pool_ = reinterpret_cast<T*>(std::calloc(pool_size_, sizeof(T)));
if (pool_ == nullptr) {
return false;
}
for (uint64_t i = 0; i < pool_size_; ++i) {
new (&(pool_[i])) T();
}
wait_strategy_.reset(strategy);
return true;
}
template <typename T>
bool BoundedQueue<T>::Enqueue(const T& element) {
uint64_t new_tail = 0;
uint64_t old_commit = 0;
uint64_t old_tail = tail_.load(std::memory_order_acquire);
do {
new_tail = old_tail + 1;
if (GetIndex(new_tail) == GetIndex(head_.load(std::memory_order_acquire))) {
return false;
}
} while (!tail_.compare_exchange_weak(old_tail, new_tail,
std::memory_order_acq_rel,
std::memory_order_relaxed));
pool_[GetIndex(old_tail)] = element;
do {
old_commit = old_tail;
} while (cyber_unlikely(!commit_.compare_exchange_weak(
old_commit, new_tail, std::memory_order_acq_rel,
std::memory_order_relaxed)));
wait_strategy_->NotifyOne();
return true;
}
template <typename T>
bool BoundedQueue<T>::Enqueue(T&& element) {
uint64_t new_tail = 0;
uint64_t old_commit = 0;
uint64_t old_tail = tail_.load(std::memory_order_acquire);
do {
new_tail = old_tail + 1;
if (GetIndex(new_tail) == GetIndex(head_.load(std::memory_order_acquire))) {
return false;
}
} while (!tail_.compare_exchange_weak(old_tail, new_tail,
std::memory_order_acq_rel,
std::memory_order_relaxed));
pool_[GetIndex(old_tail)] = std::move(element);
do {
old_commit = old_tail;
} while (cyber_unlikely(!commit_.compare_exchange_weak(
old_commit, new_tail, std::memory_order_acq_rel,
std::memory_order_relaxed)));
wait_strategy_->NotifyOne();
return true;
}
template <typename T>
bool BoundedQueue<T>::Dequeue(T* element) {
uint64_t new_head = 0;
uint64_t old_head = head_.load(std::memory_order_acquire);
do {
new_head = old_head + 1;
if (new_head == commit_.load(std::memory_order_acquire)) {
return false;
}
*element = pool_[GetIndex(new_head)];
} while (!head_.compare_exchange_weak(old_head, new_head,
std::memory_order_acq_rel,
std::memory_order_relaxed));
return true;
}
template <typename T>
bool BoundedQueue<T>::WaitEnqueue(const T& element) {
while (!break_all_wait_) {
if (Enqueue(element)) {
return true;
}
if (wait_strategy_->EmptyWait()) {
continue;
}
// wait timeout
break;
}
return false;
}
template <typename T>
bool BoundedQueue<T>::WaitEnqueue(T&& element) {
while (!break_all_wait_) {
if (Enqueue(std::move(element))) {
return true;
}
if (wait_strategy_->EmptyWait()) {
continue;
}
// wait timeout
break;
}
return false;
}
template <typename T>
bool BoundedQueue<T>::WaitDequeue(T* element) {
while (!break_all_wait_) {
if (Dequeue(element)) {
return true;
}
if (wait_strategy_->EmptyWait()) {
continue;
}
// wait timeout
break;
}
return false;
}
template <typename T>
inline uint64_t BoundedQueue<T>::Size() {
return tail_ - head_ - 1;
}
template <typename T>
inline bool BoundedQueue<T>::Empty() {
return Size() == 0;
}
template <typename T>
inline uint64_t BoundedQueue<T>::GetIndex(uint64_t num) {
return num - (num / pool_size_) * pool_size_; // faster than %
}
template <typename T>
inline void BoundedQueue<T>::SetWaitStrategy(WaitStrategy* strategy) {
wait_strategy_.reset(strategy);
}
template <typename T>
inline void BoundedQueue<T>::BreakAllWait() {
break_all_wait_ = true;
wait_strategy_->BreakAllWait();
}
} // namespace base
} // namespace cyber
} // namespace apollo
#endif // CYBER_BASE_BOUNDED_QUEUE_H_
1.整个类是个数组,指定大小的数组,意味着数据是连续存储的。
2.Init的时候会申请好相关的内存,大小为(N+2)*单个大小,为什么是n+2?想不通,原文说是储存队列头和尾的,头和尾不只是两个位置的编号吗,需要生成实际内存的?
3.数据的存储如下图所示(图片被旋转了,好烦)
队列大小是固定的,超过指定数量再进行加入会失败。
数据是循环往复利用的,感觉上就是环形内存池
4.之所以是无锁队列是因为底层有原子操作
5.commit_成员是干什么的?
当队列为已满再进行加入数据时候,调用Enqueue会返回false,调用WaitEnqueue会进行等待之后再进行处理,数据队列为空取数据操作策略相同
来看看等待策略,以写数据为例进行分析(默认是SleepWaitStrategy)
1.SleepWaitStrategy 写数据失败,线程进行sleep,之后再进行写,直到成功
2.YieldWaitStrategy 写数据失败,线程进行yield,之后再进行写,直到成功
3.BusySpinWaitStrategy 写数据失败,直接进行再次写,直到成功,(自旋读写,CPU负载较高)
4.TimeoutBlockWaitStrategy 写数据失败,等待指定时间,返回false,放弃写入
测试代码:
#include <iostream>
#include "cyber/cyber.h"
using namespace apollo::cyber::base;
int main(int argc, char* argv[]) {
std::cout << "---------------->start" << std::endl;
BoundedQueue<int> queue;
queue.Init(3, new TimeoutBlockWaitStrategy(1000));
bool b = queue.WaitEnqueue(1);std::cout << b << std::endl;
b = queue.WaitEnqueue(2);std::cout << b << std::endl;
b = queue.WaitEnqueue(3);std::cout << b << std::endl;
b = queue.WaitEnqueue(4);std::cout << b << std::endl;
std::cout << "---------------->end" << std::endl;
return 0;
}