源代码地址: https://github.com/heisai/threadpool
ThreadPool c++17
- 采用多线程多对列,每个线程控制一个队列,替代老的多个线程公用一个队列。
- 将任务拆分多个下发给每个线程,每个线程掌管 M(tasks) / N(threads)个任务
- M(tasks) / N(threads)个任务 公用一个队列。减少竞争。
使用方法:
-
初始化线程池
ThreadPool pool(4); //4个thread std::vector<std::future<int>>vec;
-
添加任务
// 案例1 lambda表达式: for(int i =0;i<20;i++) { vec.emplace_back( pool.AddTask([i]{ std::this_thread::sleep_for(std::chrono::seconds(5)); return i*i; })); } // 案例2:(普通函数,) for(int i =0;i<20;i++) { auto taskfun = pool.AddTask(task,123 + i,"data"); vec.emplace_back(std::move(taskfun)); } // 案例3:(类成员函数) TestObj obj; for(int i =0;i<20;i++) { std::function<int(int,string)> func = bind(&TestObj::task,&obj,placeholders::_1,placeholders::_2); auto taskfun = pool.AddTask(func,123+i,"1234567"); vec.emplace_back(std::move(taskfun)); } // 案例4:(静态函数) for(int i =0;i<20;i++) { std::function<int(int,string)> func = bind(&TestObj::taskstatic,placeholders::_1,placeholders::_2); auto taskfun = pool.AddTask(func,123+i,"1234567"); vec.emplace_back(std::move(taskfun)); }
-
获取返回值 //没有返回值 可忽略
for(auto &&result:vec) { std::cout<<"get_data:" << result.get()<<std::endl; }
-
#include"threadpool.h"
int main()
{
cout << "Hello World!" << endl;
std::vector<std::future<int>>vec;
ThreadPool pool(4);
for(int i =0;i<20;i++)
{
vec.emplace_back(
pool.AddTask([i]{
std::this_thread::sleep_for(std::chrono::seconds(5));
return i*i;
}));
}
for(auto &&result:vec)
{
std::cout<<"get_data:" << result.get()<<std::endl;
}
return 0;
}
#include <iostream>
#include <condition_variable>
#include <mutex>
#include <queue>
#include <functional>
#include <thread>
#include <vector>
#include<future>
using namespace std;
//自定义枷锁队列。业务中不用关心枷锁,解锁。
//职责分化: 业务就负责逻辑,不用过多操心枷锁 解锁操作。
// 枷锁 解锁操作封装进 队列中。可复用。
template <typename T> class Queue {
public:
void push(const T &item)
{
{
std::unique_lock<std::mutex> lock(mtx_);
queue_.push(item);
}
cond_.notify_one();
}
void push(T &&item) {
{
std::unique_lock<std::mutex> lock(mtx_);
queue_.push(std::move(item));
}
cond_.notify_one();
}
bool pop(T &item)
{
std::unique_lock<std::mutex> lock(mtx_);
cond_.wait(lock, [&]() { return !queue_.empty() || stop_; });
if (queue_.empty())
{
return false;
}
item = std::move(queue_.front());
queue_.pop();
return true;
}
std::size_t size() const
{
std::unique_lock<std::mutex> lock(mtx_);
return queue_.size();
}
bool empty() const
{
std::unique_lock<std::mutex> lock(mtx_);
return queue_.empty();
}
void stop()
{
{
std::unique_lock<std::mutex> lock(mtx_);
stop_ = true;
}
cond_.notify_all();
}
private:
std::condition_variable cond_;
mutable std::mutex mtx_;
std::queue<T> queue_;
bool stop_ = false;
};
class ThreadPool {
public:
explicit ThreadPool(size_t thread_num = std::thread::hardware_concurrency())
: queues(thread_num), thread_num(thread_num)
{
//创建线程
for(int i=0;i<thread_num;i++)
{
std::thread th(&ThreadPool::Run,this,i);
workers.emplace_back(std::move(th));
}
}
// 消费者
void Run(int id)
{
for(;;)
{
std::function<void()> task;
if (!queues[id].pop(task))
{
break;
}
#ifdef debuglog
std::cout<<"消费者ID:"<<std::this_thread::get_id()<<std::endl;
#endif
task();
}
}
// 生产者
template<class F, class... Args>
auto AddTask(F&& f, Args&&... args)
{
using return_type = std::invoke_result_t<F, Args...>;
auto task = std::make_shared< std::packaged_task<return_type()> >(
std::bind(std::forward<F>(f), std::forward<Args>(args)...)
);
std::future<return_type> res = task->get_future();
//产生随机数,把task 分配给对应的线程队列中
id = rand() % thread_num;
#ifdef debuglog
std::cout<<"生产者ID:"<<std::this_thread::get_id()
<<" 线程ID:"<<id
<<" 线程队列大小:"<<queues[id].size()
<<std::endl;
#endif
queues[id].push([task = std::move(task)] { (*task)(); });
return res;
}
~ThreadPool()
{
for (auto &queue : queues)
{
queue.stop();
}
for (auto &worker : workers)
{
worker.join();
}
}
private:
std::vector<Queue<std::function<void()>>> queues; //队列
size_t thread_num; //线程数
std::vector<std::thread> workers; //线程队列
int id; //线程ID== workers索引
};