提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档
前言
我的技术栈都是在windows积累,然而c++后端开发主要是在Linux平台,这是对我的技术栈的一个迁移。
一、线程池的结构
1. 一条线程安全的任务队列,负责为线程分配任务。
2. 一个线程管理类 handler,负责连接线程池与任务队列,以及线程之间的同步;
3. job struct
4. job_list记录
5. 线程池中线程分为两类同步或者异步,可以主线程发起一个异步线程,异步线程发去同步任务,增加逻辑处理效率。
本质上通过一条任务队列的入队出队,为线程池中的空闲线程分配任务。
二、代码示例
1.前置数据结构
#ifndef _THREADJOB_H_
#define _THREADJOB_H_
#include<semaphore.h>
#include<string>
using namespace std;
typedef void* (*JobFunc)(void*);
typedef enum _jobStatus {INIT=0,RUNNING,FREE,DIE} JobStatus;
typedef enum _jobType {THREAD_JOB=0,ASYIO_JOB} JobType;
struct Parems{
void *args;
JobFunc work_func;
};
struct Job{
JobStatus job_status;
Parems parame;
sem_t *consume_sem;
string consumer_name;
JobType type;
};
#endif
2.线程池
1.类声明
#ifndef _THREADPOOL_H_
#define _THREADPOOL_H_
#include<pthread.h>
#include<list>
#include "Job.h"
#include "ThreadJobQueue.h"
using namespace std;
class threadPool{
private:
int thread_num;
string thread_pool_name;
JobQueue<Job *> job_queue;
list<pthread_t *>thread_list;
bool stopFlag;
public:
threadPool(int thread_num,string thread_pool_name);
~threadPool();
bool pushJob(Job *);
Job *popJob();
void setStopFlag(bool flag);
bool getStopFlag();
void init();
void stop();
};
void InitPoolPtr(int thread_count);
threadPool *getThreadPoolPtr();
#endif
2.类定义
#include<iostream>
#include <semaphore.h>
#include "threadPool.h"
#include <list>
#include "ThreadJobQueue.h"
using namespace std;
static threadPool * thread_pool_ptr;
static string THREAD_POOL_LHL = "lhl_test";
threadPool::threadPool(int thread_num, string thread_pool_name) : thread_num(thread_num), thread_pool_name(thread_pool_name){
stopFlag = true;
}
threadPool::~threadPool(){}
void threadPool::setStopFlag(bool flag){
stopFlag = flag;
}
bool threadPool::getStopFlag(){
return stopFlag;
}
void * work(void *arg){
pthread_t tid = pthread_self();
pthread_detach(tid);
threadPool *lhl = (threadPool*)arg;
while(lhl->getStopFlag()){
Job *job = lhl->popJob();
if(job == NULL)
continue;
switch(job->type){
case THREAD_JOB:
job->job_status = RUNNING;
job->parame.work_func(job->parame.args);
sem_post(job->consume_sem);
break;
case ASYIO_JOB:
job->job_status = RUNNING;
job->parame.work_func(job->parame.args);
delete job;
break;
}
}
}
Job* threadPool::popJob(){
Job *ret = NULL;
job_queue.wait_and_pop(ret);
return ret;
}
bool threadPool::pushJob(Job *job){
if(job == NULL)
return false;
this->job_queue.push(job);
return true;
}
void threadPool::init(){
for(int i = 0; i < thread_num; i++){
pthread_t *thread = new pthread_t;
int err = pthread_create(thread,NULL,work,(void*)this);
if(err != 0){
continue;
}
thread_list.push_back(thread);
}
}
void threadPool::stop(){
this->setStopFlag(false);
list<pthread_t *>::iterator iter = this->thread_list.begin();
for(;iter != this->thread_list.end(); iter++){
delete *iter;
}
}
threadPool *getThreadPoolPtr(){
return thread_pool_ptr;
}
void InitPoolPtr(int thread_count){
thread_pool_ptr = new threadPool(thread_count,THREAD_POOL_LHL);
thread_pool_ptr->init();
}
3.线程安全的队列
#ifndef _THREADJOBQUEUE_H_
#define _THREADJOBQUEUE_H_
#include<pthread.h>
#include<queue>
#include<iostream>
using namespace std;
template<class Date>
class JobQueue{
private:
std::queue<Date> job_queue;
pthread_mutex_t mutex;
pthread_cond_t condition;
public:
JobQueue(){
this->mutex = PTHREAD_MUTEX_INITIALIZER;
this->condition = PTHREAD_COND_INITIALIZER;
}
int queue_size(){
pthread_mutex_lock(&this->mutex);
int size = this->job_queue.size();
pthread_mutex_unlock(&this->mutex);
return size;
}
~JobQueue(){}
void push(Date &date){
pthread_mutex_lock(&mutex);
job_queue.push(date);
pthread_mutex_unlock(&mutex);
pthread_cond_signal(&condition);
}
void wait_and_pop(Date &date){
pthread_mutex_lock(&mutex);
while(job_queue.empty()){
pthread_cond_wait(&condition,&mutex);
}
date = job_queue.front();
job_queue.pop();
pthread_mutex_unlock(&mutex);
}
};
#endif
4.threadHandler
#ifndef _THREADHANDLER_H_
#define _THREADHANDLER_H_
#include <semaphore.h>
#include "Job.h"
#include "threadPool.h"
using namespace std;
class Handler{
public:
Handler(threadPool *thread_pool_ptr);
~Handler();
bool push(Job *job);
void wait();
int JobSize();
Job *emptyJob();
private:
void deleteJob();
threadPool *thread_pool_ptr;
sem_t *handler_sem;
list<Job *> job_list;
};
#endif
懒得贴代码,诸位自己想想吧。
5.测试用例
#include "threadHandler.h"
#include <iostream>
#include "threadPool.h"
#include <unistd.h>
void *test(void *test){
std::cout<<*(string *)test<<std::endl;
}
void *testAsyio(void *ll){
string str = "this is test";
threadPool *ptr = getThreadPoolPtr();
Handler handler(ptr);
Job * job = handler.emptyJob();
job->type = THREAD_JOB;
job->parame.args = (void *) &str;
job->parame.work_func = test;
handler.push(job);
handler.wait();
}
int main(){
pthread_mutex_t mutex_test;
mutex_test = PTHREAD_MUTEX_INITIALIZER;
InitPoolPtr(5);
threadPool *ptr = getThreadPoolPtr();
Handler handler(ptr);
Job * job = handler.emptyJob();
job->type = ASYIO_JOB;
job->parame.args = NULL;
job->parame.work_func = testAsyio;
handler.push(job);
pthread_mutex_lock(&mutex_test);
sleep(10);
pthread_mutex_unlock(&mutex_test);
}
总结
出于熟悉Linux下的线程同步问题,我写了这线程池。