简单线程池实现

文章介绍了线程池的概念,用于管理线程资源并高效执行大量任务。提供了一个简易的C++线程池实现,包括模板类支持,允许处理不同类型的任务。代码示例展示了如何创建线程池、添加任务以及使用C11标准线程库的线程池实现。
摘要由CSDN通过智能技术生成

线程池概念及适用场景

线程池就是有一堆线程等着你去调用,至于池子的概念是因为他是有限个线程。
举个例子:你现在有1000个任务需要执行,他们的功能都一样,就数据有些许不同,当然条件允许的话,你可以开辟1000个线程去执行他们;但实际上我们都知道资源是有限的,如果采用线程池的思维来解决这个问题的话,就可以用4个线程解决这1000个任务。具体来说就是开辟四个线程,然后去维护和调度他们完成这些任务。

简易线程池实现

此处请参考大佬博客Linux线程池概念与实现

带模板的线程池实现

这个也是参考大佬的代码进行修改的

  1. 先定义一个自定义结构体类型装数据
#ifndef MYDATA_H
#define MYDATA_H

struct myData
{
    int id;

};


#endif // MYDATA_H

  1. 修改线程池代码支持模板
#ifndef THREADPOOL_HPP
#define THREADPOOL_HPP
//threadpool.hpp
#include <iostream>
#include <cstdio>
#include <queue>
#include <stdlib.h>
#include <pthread.h>
using namespace std;


#define MAX_THREAD 4
//任务类
template<typename  T >
class ThreadTask
{
public:
    typedef void (*handler_t)(T);
    public:
        ThreadTask()
        {

        }
        //将数据与处理方式打包在一起
        void setTask(T data, handler_t handler)
        {
            _data = data;
            _handler = handler;
        }
        //执行任务函数
        void run()
        {
            return _handler(_data);
        }
    private:
        T _data;//任务中处理的数据
        handler_t _handler;//处理任务方式
};

//线程池类
template<typename  T >
class ThreadPool
{
    public:
        ThreadPool(int thr_max = MAX_THREAD)
            :_thr_max(thr_max)
        {
            pthread_mutex_init(&_mutex, NULL);
            pthread_cond_init(&_cond, NULL);
            for (int i = 0; i < _thr_max; i++)
            {
                pthread_t tid;
                int ret = pthread_create(&tid, NULL, thr_start, this);
                if (ret != 0)
                {
                    printf("thread create error\n");
                    exit(-1);
                }
            }
        }
        ~ThreadPool()
        {
            pthread_mutex_destroy(&_mutex);
            pthread_cond_destroy(&_cond);
        }
        bool taskPush(ThreadTask<T> &task)
        {
            pthread_mutex_lock(&_mutex);
            _queue.push(task);
            pthread_mutex_unlock(&_mutex);
            pthread_cond_signal(&_cond);
            return true;
        }
        //类的成员函数,有默认的隐藏参数this指针
        //置为static,没有this指针,
        static void *thr_start(void *arg)
        {
            ThreadPool *p = (ThreadPool*)arg;
            while (1)
            {
                pthread_mutex_lock(&p->_mutex);
                while (p->_queue.empty())
                {
                    pthread_cond_wait(&p->_cond, &p->_mutex);
                }
                ThreadTask<T> task;
                task =p-> _queue.front();
                p->_queue.pop();
                pthread_mutex_unlock(&p->_mutex);
                task.run();//任务的处理要放在解锁之外
            }
            return NULL;
        }
    private:
        int _thr_max;//线程池中线程的最大数量
        queue<ThreadTask<T> > _queue;//任务缓冲队列
        pthread_mutex_t _mutex; //保护队列操作的互斥量
        pthread_cond_t _cond; //实现从队列中获取结点的同步条件变量
};

#endif // THREADPOOL_HPP

  1. 运行测试
#include <unistd.h>
#include "threadpool.hpp"
#include "myData.h"
#include <mutex>

void printData(myData data);
//测试函数
void test_func(myData data)
{
    printData(data);
}

static std::mutex cout_mutex;
void printData(myData data)
{
    cout_mutex.lock();
    printf("tid:%p -- id = %d \n", pthread_self(),data.id);
    fflush(stdout);
    cout_mutex.unlock();
}

int main()
{
    fflush(stdout);
    ThreadPool<myData> pool;
    for (int i = 0; i < 100; i++)
    {
        ThreadTask<myData> task;
        myData data;
        data.id = i;
        task.setTask(data, (ThreadTask<myData>::handler_t)test_func);


        pool.taskPush(task);
    }
    sleep(1000);
    return 0;
}

小伙伴们可以自己替换对应的数据块来使用,这期间用到了c11的一个锁,所以如果使用不支持c11的编译器,可以自己根据情况去掉代码中使用到锁的代码。当然,如果您的编译器支持C11标准,那请往下看

使用C11标准的线程库来实现线程池

线程池代码:

#include <queue>
#include <vector>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <functional>

class ThreadPool {
public:
    ThreadPool(size_t numThreads) : stop(false) {
        for (size_t i = 0; i < numThreads; ++i)
            threads.emplace_back([this] {
                for (;;) {
                    std::function<void()> task;
                    {
                        std::unique_lock<std::mutex> lock(this->queue_mutex);
                        this->condition.wait(lock, [this] { return this->stop || !this->tasks.empty(); });
                        if (this->stop && this->tasks.empty())
                            return;
                        task = std::move(this->tasks.front());
                        this->tasks.pop();
                    }
                    task();
                }
            });
    }

    template<class F>
    void enqueue(F&& f) {
        {
            std::unique_lock<std::mutex> lock(queue_mutex);
            tasks.emplace(std::forward<F>(f));
        }
        condition.notify_one();
    }

    ~ThreadPool() {
        {
            std::unique_lock<std::mutex> lock(queue_mutex);
            stop = true;
        }
        condition.notify_all();
        for (std::thread& thread : threads)
            thread.join();
    }

private:
    // need to keep track of threads so we can join them
    std::vector<std::thread> threads;
    // the task queue
    std::queue<std::function<void()>> tasks;

    // synchronization
    std::mutex queue_mutex;
    std::condition_variable condition;
    bool stop;
};

测试代码

#include "threadpool.hpp"
#include <atomic>
static atomic<int> id ;

void some_function() {
    // do something
    printf("id = %d \n",id++);
}

int main() {
    ThreadPool pool(4); // create a pool with 4 threads
    for (int i = 0; i < 8; ++i) {
        
        pool.enqueue(some_function); // add 8 tasks to the queue
    }
    return 0;
}

到这里,总觉得不够意思,这个函数就不能带个参数进去吗?我们继续探索emmmm,仔细想想,一个多不合适,一步到位,可变参数。综合代码如下

#ifndef THREADPOOL_HPP
#define THREADPOOL_HPP
//threadpool.hpp
#include <iostream>
#include <queue>
#include <vector>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <functional>
#include <tuple>
#include <type_traits>
#include <utility>

// 保存可变参数包
template<typename... Args>
struct SavedArgs {
    std::tuple<Args...> args;

    explicit SavedArgs(Args&&... args)
        : args(std::forward<Args>(args)...) {}
};



template<size_t N>
struct Apply {
    template<typename F, typename T, typename... A>
    static inline auto apply(F && f, T && t, A &&... a)
    -> decltype(Apply<N-1>::apply(
        ::std::forward<F>(f), ::std::forward<T>(t),
        ::std::get<N-1>(::std::forward<T>(t)),
        ::std::forward<A>(a)...
    ))
    {
        return Apply<N-1>::apply(::std::forward<F>(f),
            ::std::forward<T>(t),
            ::std::get<N-1>(::std::forward<T>(t)),
            ::std::forward<A>(a)...
        );
    }
};

template<>
struct Apply<0> {
    template<typename F, typename T, typename... A>
    static inline auto apply(F && f, T &&, A &&... a)
    -> decltype(::std::forward<F>(f)
    (::std::forward<A>(a)...))
    {
        return ::std::forward<F>(f)(::std::forward<A>
        (a)...);
    }
};

template<typename F, typename T>
inline auto apply(F && f, T && t)
-> decltype(Apply< ::std::tuple_size<typename ::std::decay<T>::type>::value>::apply(::std::forward<F>(f),
::std::forward<T>(t)))
{
    return Apply< ::std::tuple_size<
    typename ::std::decay<T>::type
    >::value>::apply(::std::forward<F>(f),
    ::std::forward<T>(t));
}


class task
{
public:
    task() {}

    struct _funcImpl
    {
        virtual ~_funcImpl() {};
        virtual void _M_run() = 0;
    };
    //using _funcImpl_ptr = unique_ptr<_funcImpl>;
    using _funcImpl_ptr = std::shared_ptr<_funcImpl>;

    template<typename Function,typename... Args>
    struct _funcImpl_ : public _funcImpl
    {
        Function		_M_func;
        SavedArgs<Args...>       _M_Args;

        _funcImpl_(Function&& __f,Args... args) : _M_func(std::forward<Function>(__f)),_M_Args(std::forward<Args...>(args...))//std::forward<Function>(__f)
        { }

        virtual void
        _M_run() override {
            apply(_M_func,_M_Args.args);
        }
    };

    template<typename Function,typename... Args>
    static _funcImpl_ptr
    _S_make_funcImpl(Function&& __f,Args ... args)
    {
        using _Impl = _funcImpl_<Function, Args...>;
        return _funcImpl_ptr{new _Impl{std::forward<Function>(__f),std::forward<Args...>(args...)}};
    }

    template< class Function, class... Args >
    explicit task( Function&& f, Args&&... args )
    {

        //auto bound_function = std::bind(std::forward<Function>(f), std::forward<Args>(args)...);

        //bound_function();
        func_ = _S_make_funcImpl(f,args...);
    }
    void operator()()
    {
        std::cout<<"task operator()()"<<std::endl;
    }




    void run()
    {
        //this->_func();
        this->func_->_M_run();
    }

    ~task() {}
    task( task& __t)
    {
        func_ = __t.func_;
    };
    //    task(const task&) = delete;
    //    task(const task&&) = delete;

    task(task&& __t) noexcept
    { func_ = __t.func_; }

    task& operator=(const task& __t)
    {
        func_ = __t.func_;
        return *this;
    };



private:
    _funcImpl_ptr func_;
};

class ThreadPool {
public:
    ThreadPool(size_t numThreads) : stop(false) {
        for (size_t i = 0; i < numThreads; ++i)
            threads.emplace_back([this] {
                for (;;) {
                    task task;
                    {
                        std::unique_lock<std::mutex> lock(this->queue_mutex);
                        this->condition.wait(lock, [this] { return this->stop || !this->tasks.empty(); });
                        if (this->stop && this->tasks.empty())
                            return;
                        task = std::move(this->tasks.front());
                        this->tasks.pop();
                    }
                    task.run();
                }
            });
    }

    template< class Function, class... Args >
    void enqueue(Function&& f, Args&&... args ) {
        {
            std::unique_lock<std::mutex> lock(queue_mutex);
            //            tasks.emplace(std::forward<f>(f),std::forward<args>(args)...);
            task t = task(f,args...);
            tasks.emplace(t);
        }
        condition.notify_one();
    }

    ~ThreadPool() {
        {
            std::unique_lock<std::mutex> lock(queue_mutex);
            stop = true;
        }
        condition.notify_all();
        for (std::thread& thread : threads)
            thread.join();
    }

private:
    // need to keep track of threads so we can join them
    std::vector<std::thread> threads;
    // the task queue

    std::queue<task> tasks;

    // synchronization
    std::mutex queue_mutex;
    std::condition_variable condition;
    bool stop;
};


#endif // THREADPOOL_HPP


测试代码:


//main.cpp
#include <unistd.h>
#include "threadpool.hpp"
#include "myData.h"
#include <mutex>


static std::mutex cout_mutex;
void printData(myData data)
{
    cout_mutex.lock();
    printf("tid:%p -- id = %d \n", pthread_self(),data.id);
    fflush(stdout);
    cout_mutex.unlock();
}


int main() {
    ThreadPool pool(4); // create a pool with 4 threads
    for (int i = 0; i < 1000; ++i) {
        myData data;
        data.id = i;

        pool.enqueue(printData,data); // add 8 tasks to the queue
    }

    return 0;
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值