对于处理socket任务线程池处理两种
1.把fd直接扔给线程池
2.把解析数据和中间业务处理扔给线程池,读写由主线程自己处理
线程一些问题
- 多线程共用fd,如何避免多线程使用的脏数据?
第一种会出现这个情况,线程A在fd准备数据做处理时,线程B可能关闭了fd,或者有脏数据 - 线程池的放缩策略,如何做比较好?
可以考虑在任务数量比较多可以加线程,类似超过60%以上就加线程,30%就减少线程,统计一段时间
/*14-2locker.h代码清单*/
#ifndef LOCKER_H
#define LOCKER_H
#include <exception>
#include <pthread.h>
#include <semaphore.h>
class sem
{
public:
sem()
{
if (sem_init(&m_sem, 0, 0) != 0)
{
throw std::exception();
}
}
~sem()
{
sem_destroy(&m_sem);
}
bool wait()
{
return sem_wait(&m_sem) == 0;
}
bool post()
{
return sem_post(&m_sem) == 0;
}
private:
sem_t m_sem;
};
class locker
{
public:
locker()
{
if (pthread_mutex_init(&m_mutex, NULL) != 0)
{
throw std::exception();
}
}
~locker()
{
pthread_mutex_destroy(&m_mutex);
}
bool lock()
{
return pthread_mutex_lock(&m_mutex) == 0;
}
bool unlock()
{
return pthread_mutex_unlock(&m_mutex) == 0;
}
private:
pthread_mutex_t m_mutex;
};
class cond
{
public:
cond()
{
if (pthread_mutex_init(&m_mutex, NULL) != 0)
{
throw std::exception();
}
if (pthread_cond_init(&m_cond, NULL) != 0)
{
pthread_mutex_destroy(&m_mutex);
throw std::exception();
}
}
~cond()
{
pthread_mutex_destroy(&m_mutex);
pthread_cond_destroy(&m_cond);
}
bool wait()
{
int ret = 0;
pthread_mutex_lock(&m_mutex);
ret = pthread_cond_wait(&m_cond, &m_mutex);
//完成任务
//释放锁
pthread_mutex_unlock(&m_mutex);
return ret == 0;
}
bool signal()
{
return pthread_cond_signal(&m_cond) == 0;
}
private:
pthread_mutex_t m_mutex;
pthread_cond_t m_cond;
};
#endif
/*15-3threadpool.h代码清单*/
#ifndef THREADPOOL_H
#define THREADPOOL_H
#include <list>
#include <cstdio>
#include <exception>
#include <pthread.h>
/*引用第14章介绍的线程同步机制的包装类*/
#include "../14/14-2locker.h"
/*线程范类,将它定义为模板类是为了代码复用。模板参数T是任务*/
template <typename T>
class threadpool
{
public:
/*参数thread_number是线程池中线程的数量,max_requests是请求队列中最多允许的、等待
处理的请求的数量*/
threadpool(int thread_number = 8, int max_requests = 10000);
~threadpool();
/*往请求队列中添加任务*/
bool append(T *request);
private:
/*工作线程运行的函数,它不断从工作队列中取出任务并执行之*/
static void *worker(void *arg);
void run();
private:
bool m_stop; /*是否结束线程*/
int m_thread_number; /*线程池中的线程数*/
int m_max_requests; /*请求队列中允许的最大请求数*/
pthread_t *m_threads; /*描述线程池的数组,共大小为m_thread_number*/
std::list<T *> m_workqueue; /*请求队列*/
locker m_queuelocker; /*保护请求队列的互斤锁*/
sem m_queuestat; /*是否有任务需要处理*/
//sem_t 比 pthread_cond_t更好用,原子操作编写简单
//cond m_cond;
};
template <typename T>
threadpool<T>::threadpool(int thread_number, int max_requests) :
m_thread_number(thread_number), m_max_requests(max_requests), m_stop(false), m_threads(NULL)
{
if ((thread_number <= 0) || (max_requests <= 0))
{
throw std::exception();
}
m_threads = new pthread_t[m_thread_number];
if (!m_threads)
{
throw std::exception();
}
/*创建thread_number个线程,并将它们都设置为脱离线程*/
for (int i = 0; i < thread_number; ++i)
{
printf("create the %dth thread\n", i);
if (pthread_create(m_threads + i, NULL, worker, this) != 0)
{
delete[] m_threads;
throw std::exception();
}
if (pthread_detach(m_threads[i]))
{
delete[] m_threads;
throw std::exception();
}
}
}
template <typename T>
threadpool<T>::~threadpool()
{
delete[] m_threads;
m_stop = true;
}
template <typename T>
bool threadpool<T>::append(T *request)
{
/*操作工作队列时一定要加锁,因为它被所有线程共享*/
m_queuelocker.lock();
if (m_workqueue.size() > m_max_requests)
{
m_queuelocker.unlock();
return false;
}
m_workqueue.push_back(request);
m_queuelocker.unlock();
m_queuestat.post();
return true;
}
template <typename T>
void *threadpool<T>::worker(void *arg)
{
threadpool *pool = (threadpool *)arg;
pool->run();
return pool;
}
template <typename T>
void threadpool<T>::run()
{
while (!m_stop)
{
m_queuestat.wait();
m_queuelocker.lock();
if (m_workqueue.empty())
{
m_queuelocker.unlock();
continue;
}
T *request = m_workqueue.front();
m_workqueue.pop_front();
m_queuelocker.unlock();
if (!request)
{
continue;
}
request->process();
}
}
#endif