C++11线程池探索

最近在学习线程池(基于C++11),了解了线程池的大概框架后便开始动手写,胡思乱想了很多种写法,也走了很多弯路,以下是学习过程:

zii ThreadPool1.0

#pragma once
#include<mutex>
#include<condition_variable>
#include<thread>
#include<assert.h>
#include<vector>
#include<memory>
#include<atomic>
#include<queue>
#include<chrono>
#include<iostream>

template<class Task>
class ThreadPool
{
public:
    explicit ThreadPool(const int& max_task_num)
        :thread_num(8),task_num(max_task_num),is_run(false),is_detection(true),used_thread_num(0)
        {}
    ThreadPool(const ThreadPool&) = delete;
    ThreadPool& operator=(const ThreadPool&) = delete;
    //ThreaedPool(ThreadPool&&) = delete;
    ~ThreadPool()
    {
        if(is_run)
            Stop();
    }
    void Start();
    void GetTasks(const std::shared_ptr<Task>&);
    void Stop();
private:
    void ThreadLoop();
    void TakeTasks();
    void DetectionTask();
private:
    const int thread_num;
    const int task_num;
    std::atomic_bool is_run;
    std::atomic_bool is_detection;
    std::atomic_int used_thread_num;
    mutable std::mutex mu_1;
    mutable std::mutex mu_2;
    mutable std::condition_variable cv;
    std::vector<std::shared_ptr<std::thread>>threadpool;
    std::priority_queue<std::shared_ptr<Task>>taskqueue;
};
template<class Task> inline void ThreadPool<Task>::GetTasks(const std::shared_ptr<Task>& task){
    while(taskqueue.size() == (size_t)task_num)
        std::this_thread::sleep_for(std::chrono::seconds(1));
    taskqueue.push(task);//此处是否需要加锁?
    cv.notify_one();
}
template<class Task> inline void ThreadPool<Task>::TakeTasks(){
    std::shared_ptr<Task>task;
    {
        std::lock_guard<std::mutex>lock(mu_2);
        task = taskqueue.top();
        taskqueue.pop();
        task->Work();
        std::cout<<"Thread id:"<<std::this_thread::get_id()<<std::endl;
    }
    --used_thread_num;
}
template<class Task> inline void ThreadPool<Task>::ThreadLoop(){
    //printf("Function ThreadLoop\n");
    while(is_run)
    {
        {
            std::unique_lock<std::mutex>lock(mu_1);
            cv.wait(lock/*,[this]{return used_thread_num < (int)taskqueue.size();}*/);
        }
        ++used_thread_num;
        this->TakeTasks();
    }
}
template<class Task> inline void ThreadPool<Task>::Start(){
    assert(threadpool.empty());//只能启动一次
    threadpool.reserve(thread_num);
    is_run = true;
    for(int i = 0;i < thread_num;++i){
        threadpool.emplace_back(std::make_shared<std::thread>(&ThreadPool::ThreadLoop,this));
    }
}
template<class Task> inline void ThreadPool<Task>::Stop(){
    is_run = false;
    is_detection = false;
    cv.notify_all();
    for(auto& thread:threadpool){
        thread->join();
    }
    threadpool.clear();
}

1.0的主要问题如下:

  • 为什么含有两把锁?
  • 每拿到一个任务便唤醒一个线程。如果任务进入任务队列的速度远大于线程处理任务的速度,则任务队列中待处理的任务快速增加,考虑此时突然停止获取任务 ,则任务队列中缓存的大量任务将得不到处理。(原因在于唤醒线程的动作是GetTask函数的调用)。

zii ThreadPool 2.0

#pragma once
#include<iostream>
#include<thread>
#include<mutex>
#include<condition_variable>
#include<vector>
#include<memory>
#include<queue>
#include<future>
#include<assert.h>
#include<atomic>
#include<string>

#define Max_Task_Num 1000

template<class Task> class ThreadPool;

template<class Task>
class Thread
{
public:
    Thread():task_to_be_processed(nullptr),is_run(false),flag(true)
    {
        m_thread = new std::thread(&Thread::Loop,this);
        m_thread->detach();
    }
    ~Thread()
    {

    }
private:
    void Loop();
    void Wait();
    void Wake();
    void TakeTask(Task*);
private:
    friend class ThreadPool<Task>;
    std::thread* m_thread;
    std::mutex mu;
    std::condition_variable cv;
    Task* task_to_be_processed;
    bool is_run;
    bool flag;
};
template<class Task> inline void Thread<Task>::Wait(){
    std::unique_lock<std::mutex>lock(mu);
    cv.wait(lock);
    is_run = true;
}
template<class Task> inline void Thread<Task>::Wake(){
    std::unique_lock<std::mutex>lock(mu);
    cv.notify_one();
}
template<class Task> inline void Thread<Task>::TakeTask(Task* task){
    task_to_be_processed = task;
}
template<class Task> inline void Thread<Task>::Loop(){
    while(flag)
    {
        Wait();
        task_to_be_processed->work();
        delete task_to_be_processed;
        task_to_be_processed = nullptr;
        is_run = false;
    }
}

template<class Task>
class ThreadPool
{
public:
    static ThreadPool<Task>* get_threadpool(){
        if(ThreadPool<Task>::m_threadpool == nullptr){
            ThreadPool<Task>::m_threadpool = new ThreadPool<Task>();
        }
        return ThreadPool<Task>::m_threadpool;
    }
private:
    static ThreadPool<Task>* m_threadpool;
    class Deletor
    {
        public:
            Deletor() = default;
            ~Deletor()
            {
                if(ThreadPool<Task>::m_threadpool != nullptr){
                    delete ThreadPool<Task>::m_threadpool;
                    ThreadPool<Task>::m_threadpool = nullptr;
                }
            }
    };
    static Deletor deletor;
public:
    void Start();
    void GetTask(Task*);
private:
    void AssignedTasks();
    int CountThreadNum()
    {
        int temp_count = 0;
        for(const auto& thread : threadpool){
            if(thread->is_run) ++temp_count;
        }
        return temp_count;
    }
private:
    std::priority_queue<Task*>taskqueue;
    std::vector<std::shared_ptr<Thread<Task>>>threadpool;
    std::thread* m_thread;
    mutable std::mutex mu;
    const int max_thread_num = 8;
private:
    ThreadPool() = default;
    ~ThreadPool() = default;
    ThreadPool(const ThreadPool&) = delete;
    ThreadPool& operator=(const ThreadPool&) = delete;
};
template<class Task> ThreadPool<Task>* ThreadPool<Task>::m_threadpool = nullptr;
template<class Task> inline void ThreadPool<Task>::Start(){
    assert(threadpool.empty());
    threadpool.reserve(max_thread_num);
    for(int i = 0;i < max_thread_num;++i){
        threadpool.emplace_back(std::make_shared<Thread<Task>>());
    }
    m_thread = new std::thread(&ThreadPool::AssignedTasks,this);
    m_thread->detach();
}
template<class Task> inline void ThreadPool<Task>::AssignedTasks(){
    //assert(!taskqueue.empty());
    while(true)
    {
        if(!taskqueue.empty() && CountThreadNum() < max_thread_num)
        {
            for(auto& thread : threadpool){
                if(!thread->is_run){
                    mu.lock();
                    auto task = taskqueue.top();
                    thread->TakeTask(task);
                    taskqueue.pop();
                    mu.unlock();
                    thread->Wake();
                    break;
                }
            }
        }
    }
}
template<class Task> inline void ThreadPool<Task>::GetTask(Task* task){
    printf("taskqueue size:%d\n",(int)taskqueue.size());
    printf("current_work_thread_num:%d\n",CountThreadNum());
    if((int)taskqueue.size() < Max_Task_Num)
    {
        mu.lock();
        taskqueue.push(task);
        mu.unlock();
    }else{
        //Treat it as packet loss
        delete task;
        task = nullptr;
    }
}

2.0版本思考了很久,代码量也急剧膨胀,可回过头看其实是走向了一个错误的方向:

  • 将线程池中的每个线程抽象为一个Thread对象,每个Thread都有各自的锁和条件变量。将ThreadPool作为Thread的友元类,通过遍历ThreadPool中的vector< Thread >,希望达到“有序地”唤醒“指定”线程的效果。(根本是毫无意义的做法,曲解了线程池的作用)
  • 为修复 1.0版本中的问题,强行额外开了一个线程以“监视”任务队列中是否有待处理任务。该线程处于busy-loop。
  • 线程的创建都调用了std::thread::detach(),导致根本不会写析构函数…

3.0版本’返璞归真",并真正解决了如何在未调用GetTask()函数的情况下唤醒线程以处理任务队列中缓存任务的问题:

#pragma once
#include<iostream>
#include<thread>
#include<mutex>
#include<condition_variable>
#include<vector>
#include<memory>
#include<queue>
#include<future>
#include<assert.h>
#include<atomic>
#include<string>

#define max_tasks_num 1000

template<class Task>
class ThreadPool
{
public:
    ThreadPool():is_run(true)
    {
        for(int i = 0;i < thread_num;++i)
        {
            threadpool.emplace_back(std::thread(
                [this]
                {
                    while(is_run)
                    {
                    while(taskqueue.empty())
                    {
                        std::unique_lock<std::mutex>lock(mu);
                        cv.wait(lock);
                    }
                    Task* task;
                    {
                        std::lock_guard<std::mutex>lock(mu);
                        assert(!taskqueue.empty());
                        task = taskqueue.front();
                        taskqueue.pop();
                    }
                    task->work();
                    delete task;
                    task = nullptr;
                    }
                }
            ));
        }
    }
    ~ThreadPool()
    {
        is_run = false;
        for(auto& thread : threadpool)
            thread.join();
        threadpool.clear();
        printf("ThreadPool dtor\n");
    }
    ThreadPool(const ThreadPool&) = delete;
    ThreadPool& operator=(const ThreadPool&) = delete;
    void append(Task* task)
    {
        if((int)taskqueue.size() == max_tasks_num)
        {
            delete task;
            task = nullptr;
            return;
        }
        std::lock_guard<std::mutex>lock(mu);
        taskqueue.push(task);
        cv.notify_all();
    }
private:
    std::vector<std::thread>threadpool;
    std::queue<Task*>taskqueue;
    std::mutex mu;
    std::condition_variable cv;
    bool is_run;
    const int thread_num = 8;
};

转换思路,在有新任务进来时唤醒所有线程,让它们去争抢任务队列中缓存任务,直到任务队列为空。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值