本文内容:
记录面试常见的c++中多线程问题及其代码
1.单例模式
懒汉模式
在使用该类时才初始化
class singleton {
private:
static singleton* p;
singleton() {}
public:
static singleton* instance();
};
singleton* singleton::instance() {
if (p == nullptr) {
p = new singleton();
}
return p;
}
饿汉模式
class singleton2 {
private:
static singleton2* p;
singleton2() {}
public:
static singleton2* instance();
};
singleton2* singleton2::p = new singleton2();
singleton2* singleton2::instance() { return p; }
c++ 11的较好的实现
class singleton_th {
private:
singleton_th() {}
~singleton_th() {}
singleton_th(const singleton_th&);
singleton_th& operator=(const singleton_th&);
public:
static singleton_th& instance();
};
singleton_th& singleton_th::instance() {
static singleton_th inst;
return inst;
}
2.生产者消费者实现
条件变量实现
#include <condition_variable>
#include <iostream>
#include <mutex>
#include <thread>
#include <vector>
using namespace std;
class Producer_Consumer
{
private:
size_t begin;
size_t end;
size_t cur;
vector<int> buffer;
condition_variable not_full;
condition_variable not_empty;
mutex mut;
public:
// prevent copying
Producer_Consumer(const Producer_Consumer& rhs) = delete;
Producer_Consumer& operator=(const Producer_Consumer& rhs) = delete;
// init
Producer_Consumer(size_t sz): begin(0), end(0), cur(0), buffer(sz) {}
void Produce(int n)
{
{
unique_lock<mutex> lock(mut);
not_full.wait(lock, [=]{return cur < buffer.size();});
// add new
buffer[end] = n;
end = (end + 1) % buffer.size();
++ cur;
}
not_empty.notify_one();
}
int Comsume()
{
unique_lock<mutex> lock(mut);
not_empty.wait(lock, [=]{return cur > 0;});
int n = buffer[begin];
begin = (begin + 1) % buffer.size();
-- cur;
lock.unlock();
not_full.notify_one();
return n;
}
};
Producer_Consumer buffers(2);
mutex io_mutex;
void Producer()
{
int n = 0;
while (n < 10)
{
buffers.Produce(n);
unique_lock<mutex> lock(io_mutex);
cout << "Produce --- " << n << endl;
lock.unlock();
n++;
}
// maker other comsumer delete
buffers.Produce(-1);
}
void Comsumer()
{
thread::id thread_id = this_thread::get_id();
int n = 0;
do
{
n = buffers.Comsume();
unique_lock<mutex> lock(io_mutex);
cout << "Comsume --- " << n << endl;
lock.unlock();
} while (n != -1);
buffers.Produce(-1);
}
int main(int argc, char const *argv[])
{
vector<thread> threads;
threads.push_back(thread(&Producer));
threads.push_back(thread(&Comsumer));
threads.push_back(thread(&Comsumer));
threads.push_back(thread(&Comsumer));
for (auto & t : threads)
{
t.join();
}
return 0;
}
c++线程池
参考:https://github.com/progschj/ThreadPool/blob/master/ThreadPool.h
#ifndef THREAD_POOL_H
#define THREAD_POOL_H
#include <vector>
#include <queue>
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
#include <functional>
#include <stdexcept>
class ThreadPool {
public:
ThreadPool(size_t);
template<class F, class... Args>
auto enqueue(F&& f, Args&&... args)
-> std::future<typename std::result_of<F(Args...)>::type>;
~ThreadPool();
private:
// need to keep track of threads so we can join them
std::vector< std::thread > workers;
// the task queue
std::queue< std::function<void()> > tasks;
// synchronization
std::mutex queue_mutex;
std::condition_variable condition;
bool stop;
};
// the constructor just launches some amount of workers
inline ThreadPool::ThreadPool(size_t threads)
: stop(false)
{
for(size_t i = 0;i<threads;++i)
workers.emplace_back(
[this]
{
for(;;)
{
std::function<void()> task;
{
std::unique_lock<std::mutex> lock(this->queue_mutex);
this->condition.wait(lock,
[this]{ return this->stop || !this->tasks.empty(); });
if(this->stop && this->tasks.empty())
return;
task = std::move(this->tasks.front());
this->tasks.pop();
}
task();
}
}
);
}
// add new work item to the pool
template<class F, class... Args>
auto ThreadPool::enqueue(F&& f, Args&&... args)
-> std::future<typename std::result_of<F(Args...)>::type>
{
using return_type = typename std::result_of<F(Args...)>::type;
auto task = std::make_shared< std::packaged_task<return_type()> >(
std::bind(std::forward<F>(f), std::forward<Args>(args)...)
);
std::future<return_type> res = task->get_future();
{
std::unique_lock<std::mutex> lock(queue_mutex);
// don't allow enqueueing after stopping the pool
if(stop)
throw std::runtime_error("enqueue on stopped ThreadPool");
tasks.emplace([task](){ (*task)(); });
}
condition.notify_one();
return res;
}
// the destructor joins all threads
inline ThreadPool::~ThreadPool()
{
{
std::unique_lock<std::mutex> lock(queue_mutex);
stop = true;
}
condition.notify_all();
for(std::thread &worker: workers)
worker.join();
}
#endif