第五章 基于锁的并发数据结构
5.1 并发设计指南
- 设计并发数据结构要考虑两点,一是确保访问 thread-safe,二是提高并发度
- thread-safe 基本要求如下
- 数据结构的不变量(invariant)被一个线程破坏时,确保不被线程看到此状态
- 提供操作完整的函数来避免数据结构接口中固有的 race condition
- 注意数据结构出现异常时的行为,以确保不变量不被破坏
- 限制锁的范围,避免可能的嵌套锁,最小化死锁的概率
- 作为数据结构的设计者,要提高数据结构的并发度,可以从以下角度考虑
- 部分操作能否在锁的范围外执行
- 数据结构的不同部分是否被不同的 mutex 保护
- 是否所有操作需要同级别的保护
- 在不影响操作语义的前提下,能否对数据结构做简单的修改提高并发度
- 总结为一点,即最小化线程对共享数据的轮流访问,最大化真实的并发量
- thread-safe 基本要求如下
5.2 基于锁的并发数据结构
5.2.1 线程安全栈
#include <exception>
#include <mutex>
#include <stack>
#include <memory>
#include <iostream>
#include <thread>
using namespace std;
template<typename T>
class thread_safe_stack {
private:
stack<T> data;
mutable mutex m;
public:
thread_safe_stack() {}
thread_safe_stack(const thread_safe_stack& other) {
lock_guard<mutex> lock(other.m);
data = other.data;
}
thread_safe_stack& operator=(const thread_safe_stack&) = delete;
void push(T value) {
lock_guard<mutex> lock(m);
data.push(move(value));
}
shared_ptr<T> pop() {
lock_guard<mutex> lock(m);
if (data.empty()) {
throw logic_error("empty");
}
shared_ptr<T> ptr = make_shared<T>(move(data.top()));
data.pop();
return ptr;
}
void pop(T& value) {
lock_guard<mutex> lock(m);
if (data.empty()) {
throw logic_error("empty");
}
value = move(data.top());
data.pop();
}
bool empty() const {
lock_guard<mutex> lock(m);
return data.empty();
}
};
template<typename T>
void push_thread(thread_safe_stack<T>& stack, T value) {
srand(time(nullptr));
int sec = rand() % 6;
this_thread::sleep_for(std::chrono::seconds(sec));
stack.push(value);
}
int main() {
thread_safe_stack<int> my_stack;
thread t1(push_thread<int>, ref(my_stack), 1);
thread t2(push_thread<int>, ref(my_stack), 2);
thread t3(push_thread<int>, ref(my_stack), 3);
thread t4(push_thread<int>, ref(my_stack), 4);
thread t5(push_thread<int>, ref(my_stack), 5);
thread t6(push_thread<int>, ref(my_stack), 6);
t1.join();
t2.join();
t3.join();
t4.join();
t5.join();
t6.join();
while (!my_stack.empty()) {
cout << *my_stack.pop() << " ";
}
}
输出结果:
4 2 6 3 1 5
5.2.2 线程安全队列
- 使用锁和条件变量
- 避免
make_shared
的时候发生异常
#include <exception>
#include <mutex>
#include <queue>
#include <memory>
#include <iostream>
#include <thread>
#include <condition_variable>
using namespace std;
template<typename T>
class thread_safe_queue {
private:
mutable std::mutex mut;
std::queue<std::shared_ptr<T>> data_queue;
std::condition_variable data_cond;
public:
thread_safe_queue() {}
thread_safe_queue(const thread_safe_queue& other) {
lock_guard<mutex> lock(other.mut);
data_queue = other.data_queue;
}
thread_safe_queue& operator=(thread_safe_queue& other) = delete;
void push(T data) {
std::lock_guard<std::mutex> lk(mut);
data_queue.push(make_shared<T>(data));
data_cond.notify_one(); // 1
}
void wait_and_pop(T& value) {
std::unique_lock<std::mutex> lk(mut);
data_cond.wait(lk, [this] {return !data_queue.empty();});
value = std::move(*data_queue.front()); // 1
data_queue.pop();
}
bool try_pop(T& value) {
std::lock_guard<std::mutex> lk(mut);
if (data_queue.empty())
return false;
value = std::move(*data_queue.front()); // 2
data_queue.pop();
return true;
}
std::shared_ptr<T> wait_and_pop() {
std::unique_lock<std::mutex> lk(mut);
data_cond.wait(lk, [this] {return !data_queue.empty();});
std::shared_ptr<T> res = data_queue.front(); // 3
data_queue.pop();
return res;
}
std::shared_ptr<T> try_pop() {
std::lock_guard<std::mutex> lk(mut);
if (data_queue.empty())
return std::shared_ptr<T>();
std::shared_ptr<T> res = data_queue.front(); // 4
data_queue.pop();
return res;
}
bool empty() {
lock_guard<std::mutex> lk(mut);
return data_queue.empty();
}
};
template<typename T>
void push_thread(thread_safe_queue<T>& queue, T value) {
srand(time(nullptr));
int sec = rand() % 6;
this_thread::sleep_for(std::chrono::seconds(sec));
queue.push(value);
}
int main() {
thread_safe_queue<int> my_queue;
thread t1(push_thread<int>, ref(my_queue), 1);
thread t2(push_thread<int>, ref(my_queue), 2);
thread t3(push_thread<int>, ref(my_queue), 3);
thread t4(push_thread<int>, ref(my_queue), 4);
thread t5(push_thread<int>, ref(my_queue), 5);
thread t6(push_thread<int>, ref(my_queue), 6);
t1.join();
t2.join();
t3.join();
t4.join();
t5.join();
t6.join();
while (!my_queue.empty()) {
cout << *my_queue.wait_and_pop() << " ";
}
}
输出结果:
3 1 6 5 2 4
5.2.3 线程安全队列(链表实现细粒度)
- 之前实现过的
thread-safe stack
和queue
都是用一把锁定保护整个数据结构,这限制了并发性,多线程在成员函数中阻塞时,同一时间只有一个线程能工作。这种限制主要是因为内部实现使用的是std::queue
,为了支持更高的并发,需要更换内部的实现方式,使用细粒度的(fine-grained)锁。最简单的实现方式是包含头尾指针的单链表,不考虑并发的单链表实现如下
#include <memory>
#include <iostream>
using namespace std;
template<typename T>
class my_queue {
private:
struct Node {
T value;
unique_ptr<Node> next;
Node(T _value) : value(move(_value)), next(nullptr) {}
};
unique_ptr<Node> head;
Node* tail;
public:
my_queue() = default;
my_queue(const my_queue& other) = delete;
my_queue& operator=(const my_queue& other) = delete;
void push(T x) {
auto new_node = std::make_unique<Node>(std::move(x));
Node* new_tail_node = new_node.get();
if (tail_) {
tail_->next = std::move(new_node);
}
else {
head_ = std::move(new_node);
}
tail_ = new_tail_node;
}
std::shared_ptr<T> try_pop() {
if (!head_) {
return nullptr;
}
auto res = std::make_shared<T>(std::move(head_->v));
std::unique_ptr<Node> head_node = std::move(head_);
head_ = std::move(head_node->next);
return res;
}
};
- 即使用两个 mutex 分别保护头尾指针,这个实现在多线程下也有明显问题。push 可以同时修改头尾指针,会对两个 mutex 上锁,另外仅有一个元素时头尾指针相等,push 写和 try_pop 读的 next 节点是同一对象,产生了竞争,锁的也是同一个 mutex
- 该问题很容易解决,在头节点前初始化一个 dummy 节点即可,这样 push 只访问尾节点,不会再与 try_pop 竞争头节点
#include <memory>
#include <iostream>
using namespace std;
template <typename T>
class Queue {
private:
struct Node {
std::shared_ptr<T> v;
std::unique_ptr<Node> next;
};
std::unique_ptr<Node> head_;
Node* tail_ = nullptr;
public:
Queue() : head_(new Node), tail_(head_.get()) {}
Queue(const Queue&) = delete;
Queue& operator=(const Queue&) = delete;
void push(T x) {
auto new_val = std::make_shared<T>(std::move(x));
auto new_node = std::make_unique<Node>();
Node* new_tail_node = new_node.get();
tail_->v = new_val;
tail_->next = std::move(new_node);
tail_ = new_tail_node;
}
std::shared_ptr<T> try_pop() {
if (head_.get() == tail_) {
return nullptr;
}
std::shared_ptr<T> res = head->v;
std::unique_ptr<Node> head_node = std::move(head_);
head_ = std::move(head_node->next);
return res;
}
};
- 接着加上锁,锁的范围应该尽可能小
#include <memory>
#include <mutex>
#include <utility>
template <typename T>
class ConcurrentQueue {
private:
struct Node {
std::shared_ptr<T> v;
std::unique_ptr<Node> next;
};
std::unique_ptr<Node> head_;
Node* tail_ = nullptr;
std::mutex head_mutex_;
std::mutex tail_mutex_;
std::unique_ptr<Node> pop_head() {
std::lock_guard<std::mutex> l(head_mutex_);
if (head_.get() == get_tail()) {
return nullptr;
}
std::unique_ptr<Node> head_node = std::move(head_);
head_ = std::move(head_node->next);
return head_node;
}
Node* get_tail() {
std::lock_guard<std::mutex> l(tail_mutex_);
return tail_;
}
public:
ConcurrentQueue() : head_(new Node), tail_(head_.get()) {}
ConcurrentQueue(const ConcurrentQueue&) = delete;
ConcurrentQueue& operator=(const ConcurrentQueue&) = delete;
void push(T x) {
auto new_val = std::make_shared<T>(std::move(x));
auto new_node = std::make_unique<Node>();
Node* new_tail_node = new_node.get();
std::lock_guard<std::mutex> l(tail_mutex_);
tail_->v = new_val;
tail_->next = std::move(new_node);
tail_ = new_tail_node;
}
std::shared_ptr<T> try_pop() {
std::unique_ptr<Node> head_node = pop_head();
return head_node ? head_node->v : nullptr;
}
};
- push 中创建新值和新节点都没上锁,多线程可用并发创建新值和新节点。虽然同时只有一个线程能添加新节点,但这只需要一个指针赋值操作,锁住尾节点的时间很短,
try_pop
中对尾节点只是用来做一次比较,持有尾节点的时间同样很短,因此try_pop
和push
几乎可以同时调用。try_pop
中锁住头节点所做的也只是指针赋值操作,开销较大的析构在锁外进行,这意味着虽然同时只有一个线程能pop_head
,但允许多线程删除节点并返回数据,提升了try_pop
的并发调用数量 - 最后再结合
std::condition_variable
实现wait_and_pop
,即得到与之前接口相同但并发度更高的thread-safe queue
#include <condition_variable>
#include <memory>
#include <mutex>
#include <utility>
template <typename T>
class ConcurrentQueue {
public:
ConcurrentQueue() : head_(new Node), tail_(head_.get()) {}
ConcurrentQueue(const ConcurrentQueue&) = delete;
ConcurrentQueue& operator=(const ConcurrentQueue&) = delete;
void push(T x) {
auto new_val = std::make_shared<T>(std::move(x));
auto new_node = std::make_unique<Node>();
Node* new_tail_node = new_node.get();
{
std::lock_guard<std::mutex> l(tail_mutex_);
tail_->v = new_val;
tail_->next = std::move(new_node);
tail_ = new_tail_node;
}
cv_.notify_one();
}
std::shared_ptr<T> try_pop() {
std::unique_ptr<Node> head_node = try_pop_head();
return head_node ? head_node->v : nullptr;
}
bool try_pop(T& res) {
std::unique_ptr<Node> head_node = try_pop_head(res);
return head_node != nullptr;
}
std::shared_ptr<T> wait_and_pop() {
std::unique_ptr<Node> head_node = wait_pop_head();
return head_node->v;
}
void wait_and_pop(T& res) { wait_pop_head(res); }
bool empty() const {
std::lock_guard<std::mutex> l(head_mutex_);
return head_.get() == get_tail();
}
private:
struct Node {
std::shared_ptr<T> v;
std::unique_ptr<Node> next;
};
private:
std::unique_ptr<Node> try_pop_head() {
std::lock_guard<std::mutex> l(head_mutex_);
if (head_.get() == get_tail()) {
return nullptr;
}
return pop_head();
}
std::unique_ptr<Node> try_pop_head(T& res) {
std::lock_guard<std::mutex> l(head_mutex_);
if (head_.get() == get_tail()) {
return nullptr;
}
res = std::move(*head_->v);
return pop_head();
}
std::unique_ptr<Node> wait_pop_head() {
std::unique_lock<std::mutex> l(wait_for_data());
return pop_head();
}
std::unique_ptr<Node> wait_pop_head(T& res) {
std::unique_lock<std::mutex> l(wait_for_data());
res = std::move(*head_->v);
return pop_head();
}
std::unique_lock<std::mutex> wait_for_data() {
std::unique_lock<std::mutex> l(head_mutex_);
cv_.wait(l, [this] { return head_.get() != get_tail(); });
return l;
}
std::unique_ptr<Node> pop_head() {
std::unique_ptr<Node> head_node = std::move(head_);
head_ = std::move(head_node->next);
return head_node;
}
Node* get_tail() const {
std::lock_guard<std::mutex> l(tail_mutex_);
return tail_;
}
private:
std::unique_ptr<Node> head_;
Node* tail_ = nullptr;
mutable std::mutex head_mutex_;
mutable std::mutex tail_mutex_;
std::condition_variable cv_;
};
5.2.4 线程安全查询表
- 并发访问
std::map
和std::unordered_map
的接口的问题在于迭代器,其他线程删除元素时会导致迭代器失效,因此thread-safe-map
的接口设计就要跳过迭代器 - 为了使用细粒度锁,就不应该使用标准库容器。可选的关联容器数据结构有三种,
- 一是二叉树(如红黑树),但每次查找修改都要从访问根节点开始,也就表示根节点需要上锁,尽管沿着树向下访问节点时会解锁,但这个比起覆盖整个数据结构的单个锁好不了多少
- 第二种方式是有序数组,这比二叉树还差,因为无法提前得知一个给定的值应该放在哪,于是同样需要一个覆盖整个数组的锁
- 第三种方式是哈希表。假如有一个固定数量的桶,一个 key 属于哪个桶取决于 key 的属性和哈希函数,这意味着可以安全地分开锁住每个桶。如果使用读写锁,就能将并发度提高相当于桶数量的倍数
#include <algorithm>
#include <functional>
#include <list>
#include <map>
#include <memory>
#include <mutex>
#include <shared_mutex>
#include <utility>
#include <vector>
#include <string>
#include <iostream>
template <typename Key, typename Value, typename Hash = std::hash<Key>>
class Thread_safe_map {
public:
// 桶数默认为 19(一般用 x % 桶数作为 x 的桶索引,桶数为质数可使桶分布均匀)
Thread_safe_map(std::size_t n = 19, const Hash& h = Hash{}) : buckets(n), hasher(h) {
for (auto& x : buckets) {
x.reset(new bucket);
}
}
Thread_safe_map(const Thread_safe_map& other) = delete;
Thread_safe_map& operator=(const Thread_safe_map& other) = delete;
Value Get(const Key& key, const Value& default_value = Value{}) const {
return get_bucket(key).get(key, default_value);
}
void Set(const Key& key, const Value& value) {
get_bucket(key).set(key, value);
}
void Erase(const Key& key) {
get_bucket(key).erase(key);
}
// 为了方便使用,提供一个到 std::map 的映射
std::map<Key, Value> To_map() const {
std::vector<std::unique_lock<std::shared_mutex>> locks;
for (auto& x : buckets) {
locks.emplace_back(std::unique_lock<std::shared_mutex>(x->m));
}
std::map<Key, Value> res;
for (auto& x : buckets) {
for (auto& y : x->data) {
res.emplace(y);
}
}
return res;
}
private:
struct bucket {
std::list<std::pair<Key, Value>> data;
// 每个桶都用这个锁保护
mutable std::shared_mutex mutex;
Value get(const Key& key, const Value& default_value) const {
// 只读锁,可共享
std::shared_lock<std::shared_mutex> lock(mutex);
auto res = std::find_if(data.begin(), data.end(),
[&](auto& x) {return x.first == key;});
return res == data.end() ? default_value : res->second;
}
void set(const Key& key, const Value& value) {
// 写,单独占用
std::unique_lock<std::shared_mutex> lock(mutex);
auto res = std::find_if(data.begin(), data.end(),
[&](auto& x) {return x.first == key;});
if (res != data.end()) {
res->second = value;
}
else {
data.emplace_back(key, value);
}
}
void erase(const Key& key) {
// 写,单独占用
std::unique_lock<std::shared_mutex> lock(mutex);
auto res = std::find_if(data.begin(), data.end(),
[&](Key& x) {return x.first == key;});
if (res != data.end()) {
data.erase(res);
}
}
};
private:
std::vector<std::unique_ptr<bucket>> buckets;
Hash hasher;
bucket& get_bucket(const Key& key) const {
return *buckets[hasher(key) % buckets.size()];
}
};
5.2.5 线程安全链表
#include <memory>
#include <mutex>
#include <utility>
#include <iostream>
template <typename T>
class Thread_safe_list {
public:
Thread_safe_list() = default;
Thread_safe_list(const Thread_safe_list&) = delete;
Thread_safe_list& operator=(const Thread_safe_list&) = delete;
~Thread_safe_list() {
remove_if([](const Node&) { return true; });
}
void push_front(const T& value) {
std::unique_ptr<Node> new_node(new Node(value));
std::lock_guard<std::mutex> lock(head.mutex);
new_node->next = std::move(head.next);
head.next = std::move(new_node);
}
template <typename F>
void for_each(F f) {
Node* cur = &head;
std::unique_lock<std::mutex> cur_lock(head.mutex);
while (Node* next = cur->next.get()) {
std::unique_lock<std::mutex> next_lock(next->mutex);
cur_lock.unlock();
f(*next->value);
cur = next;
cur_lock = move(next_lock);
}
}
template <typename F>
std::shared_ptr<T> find_first_if(F f) {
Node* cur = &head;
std::unique_lock<std::mutex> cur_lock(head.mutex);
while (Node* next = cur->next.get()) {
std::unique_lock<std::mutex> next_lock(next->mutex);
cur_lock.unlock();
if (f(*next->value)) {
return next->value;
}
cur = next;
cur_lock = move(next_lock);
}
return nullptr;
}
template <typename F>
void remove_if(F f) {
Node* cur = &head;
std::unique_lock<std::mutex> cur_lock(head.mutex);
while (Node* next = cur->next.get()) {
std::unique_lock<std::mutex> next_lock(next->mutex);
if (f(*next->value)) {
std::unique_ptr<Node> next_node(std::move(cur->next));
cur->next = std::move(next_node->next);
next_lock.unlock();
}
else {
cur_lock.unlock();
cur = next;
cur_lock = move(next_lock);
}
}
}
private:
struct Node {
std::mutex mutex;
std::shared_ptr<T> value;
std::unique_ptr<Node> next;
Node() = default;
Node(const T& _value) : value(std::make_shared<T>(_value)) {}
};
Node head;
};
int main() {
Thread_safe_list<int> list;
list.push_front(1);
list.push_front(2);
list.push_front(3);
list.for_each([](int& x) {std::cout << x << std::endl;});
}