1.什么是线程池,为需要线程池?
当今计算机软硬件发展的极其迅猛,计算机大神们都各显神通的将计算机的性能发挥到极致。线程池的核心思想都是,减少用户层向内核操作系统申请资源的频率,从而减少了线程创建和销毁的开销。一般在线程执行结束之后,会对线程进行回收处理。线程池是一次性向操作系统申请固定数量的线程,并在用户层管理分配完成的线程:在某个线程业务逻辑执行完后并不会立马的释放,而是在应用层上提供一个管理线程的一系列接口函数,重复利用已经向操作系统内核申请的线程。
2.线程池实现代码
#ifndef _THREAD_POOL_H
#define _THREAD_POOL_H
typedef struct thrdpool_s thrdpool_t;
// 任务执行的规范 ctx 上下文
typedef void (*handler_pt)(void * /* ctx */);
#ifdef __cplusplus
extern "C"
{
#endif
// 对称处理
thrdpool_t *thrdpool_create(int thrd_count);
void thrdpool_terminate(thrdpool_t * pool);
int thrdpool_post(thrdpool_t *pool, handler_pt func, void *arg);
void thrdpool_waitdone(thrdpool_t *pool);
#ifdef __cplusplus
}
#endif
#endif
#include <pthread.h>
#include <stdatomic.h>
#include <stdint.h>
#include <stdlib.h>
#include "thread_pool.h"
#include "spinlock.h"
/**
* shell: gcc thrd_pool.c -c -fPIC
* shell: gcc -shared thrd_pool.o -o libthrd_pool.so -I./ -L./ -lpthread
* usage: include thrd_pool.h & link libthrd_pool.so
*/
typedef struct spinlock spinlock_t ;
typedef struct taskex_s{
void *xnext;
handler_pt func;
void *arg;//堆上进行分配
void *arg2;
} taskex_t;
typedef struct task_s{
void* next;//链接下一个任务
handler_pt func;
void *arg;//堆上进行分配
}task_t ;
typedef struct task_queue_s{
void *head;
void **tail;
int block;//标识队列的状态
spinlock_t lock;
pthread_mutex_t mutex;
pthread_cond_t cond;
}task_queue_t ;
struct thrdpool_s {
task_queue_t *task_queue;
atomic_int quit;
int thrd_count;
pthread_t *threads;
};
/*资源的创建*/
//资源创建 回滚式编程
//业务逻辑 防御式编程
static task_queue_t* _taskqueue_create(){
int ret;
task_queue_t *queue=(task_queue_t*)malloc(sizeof(task_queue_t));
if(queue){
ret =pthread_mutex_init(&queue->mutex,NULL);
if(ret==0){
ret = pthread_cond_init(&queue->cond, NULL);
if(ret==0){
spinlock_init(&queue->lock);
queue->head=NULL;
queue->tail=&queue->head;
queue->block=1;
return queue;
}
pthread_mutex_destroy(&queue->mutex);
}
free(queue);
}
return NULL;
}
static void _nonblock(task_queue_t *queue){//将某个线程设置为非阻塞
pthread_mutex_lock(&queue->mutex);
queue->block=0;
pthread_mutex_unlock(&queue->mutex);
pthread_cond_broadcast(&queue->cond);
}
static inline void _add_task(task_queue_t* queue,void *task){
//不在限制类型,只要任务结构的起始指针用于链接下一个任务的指针
void**link=(void**)task;//将link指向的是*next并同时将task赋值给link
*link=NULL;//此处的需要进行合理的思考是否是清空后面的内存?
spinlock_lock(&queue->lock);
*queue->tail/*相当于queue->tail->next*/=link;//取最后的节点的next指针加到尾部
queue->tail=link;//将task添加到尾部
spinlock_lock(&queue->lock);
pthread_cond_signal(&queue->cond);
}
static inline void* _pop_task(task_queue_t*queue){
spinlock_lock(&queue->lock);
if(queue->head==NULL){
spinlock_lock(&queue->lock);
return NULL;
}
task_t *task;
task=queue->head;
void**link=(void**)task;
queue->head=link;
// queue->head=queue->task->next;
if(queue->head==NULL){//是否还存现在任务
queue->tail=&queue->head;
}
spinlock_lock(&queue->lock);
return task;
}
static inline void* _get_task(task_queue_t*queue){
task_t *task;
//存在虚假唤醒的问题
while (task=_pop_task(queue)==NULL) {
pthread_mutex_lock(&queue->mutex);
if(queue->block==0){
pthread_mutex_unlock(&queue->mutex);
return NULL;
}
//1.先unlock(&lock)
//2.在cond休眠
// 在add_task唤醒
//3.在cond唤醒
//4加上lock(&lock)
pthread_cond_wait(&queue->cond,&queue->mutex);//只唤醒一个唤醒其他的会出现逻辑错误
pthread_mutex_unlock(&queue->mutex);
}
return task;
}
static void _taskqueue_destory(task_queue_t*queue){
task_t *task;
while (task=_pop_task(queue)) {
free(task);
}
spinlock_destroy(&queue->lock);
pthread_cond_destroy(&queue->cond);
pthread_mutex_destroy(&queue->mutex);
free(queue);
}
static void * _thrdpool_worker(void*arg){
thrdpool_t *pool=(thrdpool_t* )arg;
task_t *task;
void *ctx;
while (atomic_load(&pool->quit)==0) {
task =(task_t*)_get_task(pool->task_queue);
if(!task)break;
handler_pt func=task->func;
ctx=task->arg;
free(task);
func(ctx);
}
return NULL;
}
static void _threads_terminate(thrdpool_t *pool){
atomic_store(&pool->quit,1);
_nonblock(pool->task_queue);
int i;
for (i=0; i<pool->thrd_count; i++) {
pthread_join(pool->threads[i],NULL);
}
}
static int _threads_create(thrdpool_t *pool,size_t thrd_count){
pthread_attr_t attr;
int ret;
ret=pthread_attr_init(&attr);//线程初始化的设置
if (ret==0) {
pool->threads=(pthread_t *)malloc(sizeof(pthread_t)*thrd_count);
if(pool->threads){
int i=0;
for (; i<thrd_count; i++) {
if (pthread_create(&pool->threads[i],&attr,_thrdpool_worker,pool)!=0) {
break;
}
pool->thrd_count=i;
pthread_attr_destroy(&attr);
if (i==thrd_count) {
return 0;
}
_threads_terminate(pool);
free(pool->threads);
}
ret=-1;
}
return ret;
}
}
thrdpool_t * thrdpool_create(int thrd_count){
thrdpool_t *pool;
pool=(thrdpool_t*)malloc(sizeof(*pool));
if (pool) {
task_queue_t *queue=_taskqueue_create();
if (queue) {
pool->task_queue=queue;
atomic_init(&pool->quit,0);
if (_taskqueue_create(pool,thrd_count)==0) {
return pool;
}
_taskqueue_destory(queue);
}
free(pool);
}
return NULL;
}
int thrdpool_post(thrdpool_t*pool,handler_pt func,void *arg){
if (atomic_load(&pool->quit)==1) {
return -1;
}
task_t *task=(task_t*)malloc(sizeof(task_t));
if (!task)
return -1;
task->func=func;
task->arg=arg;
_add_task(pool->task_queue,task);
return 0;
}
void thrdpool_waitdone(thrdpool_t *pool){
int i=0;
for (i=0; i<pool->thrd_count; i++) {
pthread_join(pool->threads[i],NULL);
}
_taskqueue_destory(pool->task_queue);
free(pool->threads);
free(pool);
}