本文根据网上资料整理
据《面向模式的软件架构2 - POSA2》总结,线程池有两种模式:HS/HA 半同步/ 半异步模式与L/F 领导者跟随者模式
这里不做概念论述,直接上例子
1、HS/HA 半同步/ 半异步模式
又称生产者消费者模式
HS/HA 模式 在队列中存放的是数据,队列的两端分别是生产者和消费者
1.1、原始版本
原始的多线程技术,应用逻辑和线程并发策略紧密绑定。
在一个典型的服务器程序中,客户端的请求往往包含了很多不同的逻辑命令,
如在一个线程处理函数中,需要根据客户端的命令代码处理不同的业务逻辑;
如此这般,业务处理逻辑和线程逻辑紧密耦合,实现简单
int thrad_main(int cmd_id, char *data)
{
switch(cmd_id)
{
case 1:
...
break;
case 2:
... break;
}
}
2.1、标准c++简单回调函数版
消费者调整为主动对象,对象初始化时接收回调函数,已使得消费者处理逻辑可配置;
ThreadTest.cpp
#include <stdio.h>
#include <unistd.h>
#include "CThreadManager.h"
using namespace std;
// 线程要执行的函数
int Count(int nWork)
{
int nResult = nWork * nWork;
printf("count result is %d\n",nResult);
return 0;
}
int main() {
// 创建线程管理类的实例,把要执行的线程函数和最大线程数传进去
CThreadManager* pManager = new CThreadManager(Count, 3);
// 把要进行计算的数放到工作队列中
pManager->PushWorkQue(5);
pManager->PushWorkQue(20);
// 设置信号量,唤醒线程
pManager->PostSem();
pManager->PostSem();
// 等待子线程执行
sleep(1);
return 0;
}
CThreadManager.h
#ifndef CTHREADMANAGER_H_
#define CTHREADMANAGER_H_
#include <stdio.h>
#include <list>
#include <queue>
#include <semaphore.h>
#include "CThread.h"
using namespace std;
class CThreadManager {
friend void* ManageFuction(void*);
private:
sem_t m_sem; // 信号量
pthread_mutex_t m_mutex; // 互斥锁
queue<int> m_queWork; // 工作队列
list<CThread*> m_lstThread; // 线程list
int (*m_threadFuction)(int); //函数指针,指向main函数传过来的线程执行函数
public:
CThreadManager(int (*threadFuction)(int), int nMaxThreadCnt);
virtual ~CThreadManager();
int WaitSem();
int PostSem();
int LockMutex();
int UnlockMutex();
void PushWorkQue(int nWork);
int PopWorkQue();
int RunThreadFunction(int nWork);
};
#endif /* CTHREADMANAGER_H_ */
CThreadManager.cpp
#include "CThreadManager.h"
// 线程执行函数,它只是个壳子,处理信号量和互斥锁等,
// 最后调用main函数传过来的线程执行函数来实现业务处理
void* ManageFuction(void* argv)
{
CThreadManager* pManager = (CThreadManager*)argv;
// 进行无限循环(意味着线程是不销毁的,重复利用)
while(true)
{
// 线程开启后,就在这里阻塞着,直到main函数设置了信号量
pManager->WaitSem();
printf("thread wakeup.\n");
// 从工作队列中取出要处理的数
pManager->LockMutex();
int nWork = pManager->PopWorkQue();
pManager->UnlockMutex();
printf("call Count function.\n");
pManager->RunThreadFunction(nWork);
}
return 0;
}
CThreadManager::CThreadManager(int (*threadFuction)(int), int nMaxThreadCnt) {
sem_init(&m_sem, 0, 0);
pthread_mutex_init(&m_mutex, NULL);
m_threadFuction = threadFuction;
for(int i=0; i<nMaxThreadCnt; i++)
{
CThread* pThread = new CThread(ManageFuction, this);
printf("thread started.\n");
m_lstThread.push_back(pThread);
}
}
CThreadManager::~CThreadManager()
{
sem_destroy(&m_sem);
pthread_mutex_destroy(&m_mutex);
list<CThread*>::iterator it;
for(it=m_lstThread.begin(); it!=m_lstThread.end();it++)
{
(*it)->JoinThread();
}
}
// 等待信号量
int CThreadManager::WaitSem()
{
return sem_wait(&m_sem);
}
// 设置信号量
int CThreadManager::PostSem()
{
return sem_post(&m_sem);
}
// 取得锁
int CThreadManager::LockMutex()
{
int n= pthread_mutex_lock(&m_mutex);
return n;
}
// 释放锁
int CThreadManager::UnlockMutex()
{
return pthread_mutex_unlock(&m_mutex);
}
// 往工作队列里放要处理的数
void CThreadManager::PushWorkQue(int nWork)
{
m_queWork.push(nWork);
}
// 从工作队列中取出要处理的数
int CThreadManager::PopWorkQue()
{
int nWork = m_queWork.front();
m_queWork.pop();
return nWork;
}
// 执行main函数传过来的线程执行函数
int CThreadManager::RunThreadFunction(int nWork)
{
return (*m_threadFuction)(nWork);
}
CThread.h
#ifndef CTHREAD_H_
#define CTHREAD_H_
#include <pthread.h>
class CThread {
private:
pthread_t m_thread; //保持线程句柄
public:
CThread(void* (*threadFuction)(void*),void* threadArgv);
virtual ~CThread();
void JoinThread();
};
#endif /* CTHREAD_H_ */
CThread.cpp
#include "CThread.h"
CThread::CThread(void* (*threadFuction)(void*),void* threadArgv) {
// 初始化线程属性
pthread_attr_t threadAttr;
pthread_attr_init(&threadAttr);
pthread_create(&m_thread, &threadAttr, threadFuction, threadArgv);
}
CThread::~CThread() {
// TODO Auto-generated destructor stub
}
void CThread::JoinThread()
{
// join
pthread_join(m_thread, NULL);
}
1.3、C++虚拟函数版
在 1.2 C++简单回调函数回调的基础上,改用虚函数实现 子线程处理函数的可配置
1.4、C++虚拟函数+模板
在1.3 C++虚拟函数版基础上,增加模板,以使得队列数据可配置
我同事的源码:
#include <pthread.h>
#include <unistd.h>
#include <time.h>
#include <list>
using namespace std;
template<typename _Ty>
class share_queue_thread
{
private:
list<_Ty> m_queue;
list<pthread_t> m_list_tid;
pthread_mutex_t m_mutex_obj;
pthread_mutex_t m_mutex_data;
pthread_cond_t m_cond;
bool b_running;
public:
share_queue_thread() : b_running(false)
{
pthread_mutex_init(&m_mutex_obj, NULL);
pthread_mutex_init(&m_mutex_data, NULL);
pthread_cond_init(&m_cond, NULL);
}
~share_queue_thread()
{
stop();
pthread_mutex_destroy(&m_mutex_obj);
pthread_mutex_destroy(&m_mutex_data);
pthread_cond_destroy(&m_cond);
}
private:
share_queue_thread(const share_queue_thread& ref){ };
share_queue_thread& operator=(const share_queue_thread& ref){ };
public:
virtual void on_thread_start(pthread_t id){ };
virtual void on_thread_exit(pthread_t id){ };
virtual void on_service_start_success(int nthread){ };
virtual void on_service_start_error(){ };
virtual void on_service_exit(){ };
virtual void on_process(const _Ty& obj, pthread_t id) = 0;
public:
static void* proc(void* arg)
{
share_queue_thread* obj = (share_queue_thread*)arg;
pthread_mutex_t* lp_mutex = &(obj->m_mutex_data);
pthread_cond_t* lp_cond = &(obj->m_cond);
list<_Ty>* lp_queue = &(obj->m_queue);
pthread_t tid = pthread_self();
while(obj->b_running)
{
pthread_mutex_lock(lp_mutex);
if(lp_queue->empty())
{
struct timespec timeout;
timeout.tv_sec = time(NULL) + 3;
timeout.tv_nsec = 500000;
if(pthread_cond_timedwait(lp_cond, lp_mutex, &timeout))
{
pthread_mutex_unlock(lp_mutex);
continue;
}
if(lp_queue->empty())
{
pthread_mutex_unlock(lp_mutex);
continue;
}
}
_Ty data = lp_queue->front();
lp_queue->pop_front();
pthread_mutex_unlock(lp_mutex);
obj->on_process(data, tid);
}
return NULL;
}
public:
int start(int nthread)
{
pthread_mutex_lock(&m_mutex_obj);
if(b_running)
{
pthread_mutex_unlock(&m_mutex_obj);
return 1;
}
b_running = true;
for(int i=0; i<nthread; i++)
{
pthread_t tid;
if(pthread_create(&tid, NULL, &share_queue_thread::proc, (void*)this))
continue;
m_list_tid.push_back(tid);
on_thread_start(tid);
}
if(!m_list_tid.size())
{
b_running = false;
pthread_mutex_unlock(&m_mutex_obj);
on_service_start_error();
return 2;
}
on_service_start_success(m_list_tid.size());
pthread_mutex_unlock(&m_mutex_obj);
return 0;
}
int stop()
{
pthread_mutex_lock(&m_mutex_obj);
if(!b_running)
{
pthread_mutex_unlock(&m_mutex_obj);
return 1;
}
b_running = false;
while(!m_list_tid.empty())
{
pthread_t tid = m_list_tid.front();
m_list_tid.pop_front();
pthread_join(tid, NULL);
on_thread_exit(tid);
}
m_queue.clear();
pthread_mutex_unlock(&m_mutex_obj);
on_service_exit();
return 0;
}
public:
void push(const _Ty& obj)
{
if(!b_running)
return;
pthread_mutex_lock(&m_mutex_data);
if(m_queue.empty())
pthread_cond_signal(&m_cond);
m_queue.push_back(obj);
pthread_mutex_unlock(&m_mutex_data);
}
public:
int size()
{
pthread_mutex_lock(&m_mutex_obj);
int n = m_queue.size();
pthread_mutex_unlock(&m_mutex_obj);
return n;
}
};
1.4、陈硕的muduo库实现
相当于以上 标准c++版本 的boost版;
队列中存放的是 boost::function对象 + 处理数据
ThreadPool_test.cc
#include <muduo/base/ThreadPool.h>
#include <muduo/base/CountDownLatch.h>
#include <boost/bind.hpp>
#include <stdio.h>
void print()
{
printf("tid=%d\n", muduo::CurrentThread::tid());
}
void printString(const std::string& str)
{
printf("tid=%d, str=%s\n", muduo::CurrentThread::tid(), str.c_str());
}
int main()
{
muduo::ThreadPool pool("MainThreadPool");
pool.start(5);
pool.run(print);
pool.run(print);
for (int i = 0; i < 100; ++i)
{
char buf[32];
snprintf(buf, sizeof buf, "task %d", i);
pool.run(boost::bind(printString, std::string(buf)));
}
muduo::CountDownLatch latch(1);
pool.run(boost::bind(&muduo::CountDownLatch::countDown, &latch));
latch.wait();
pool.stop();
}
ThreadPool.h
// Use of this source code is governed by a BSD-style license
// that can be found in the License file.
//
// Author: Shuo Chen (chenshuo at chenshuo dot com)
#ifndef MUDUO_BASE_THREADPOOL_H
#define MUDUO_BASE_THREADPOOL_H
#include <muduo/base/Condition.h>
#include <muduo/base/Mutex.h>
#include <muduo/base/Thread.h>
#include <muduo/base/Types.h>
#include <boost/function.hpp>
#include <boost/noncopyable.hpp>
#include <boost/ptr_container/ptr_vector.hpp>
#include <deque>
namespace muduo
{
class ThreadPool : boost::noncopyable
{
public:
typedef boost::function<void ()> Task;
explicit ThreadPool(const string& name = string());
~ThreadPool();
void start(int numThreads);
void stop();
void run(const Task& f);
private:
void runInThread();
Task take();
MutexLock mutex_;
Condition cond_;
string name_;
boost::ptr_vector<muduo::Thread> threads_;
std::deque<Task> queue_;
bool running_;
};
}
#endif
ThreadPool.cc
// Use of this source code is governed by a BSD-style license
// that can be found in the License file.
//
// Author: Shuo Chen (chenshuo at chenshuo dot com)
#include <muduo/base/ThreadPool.h>
#include <muduo/base/Exception.h>
#include <boost/bind.hpp>
#include <assert.h>
#include <stdio.h>
using namespace muduo;
ThreadPool::ThreadPool(const string& name)
: mutex_(),
cond_(mutex_),
name_(name),
running_(false)
{
}
ThreadPool::~ThreadPool()
{
}
void ThreadPool::start(int numThreads)
{
assert(threads_.empty());
running_ = true;
threads_.reserve(numThreads);
for (int i = 0; i < numThreads; ++i)
{
char id[32];
snprintf(id, sizeof id, "%d", i);
threads_.push_back(new muduo::Thread(
boost::bind(&ThreadPool::runInThread, this), name_+id));
threads_[i].start();
}
}
void ThreadPool::stop()
{
running_ = false;
cond_.notifyAll();
for_each(threads_.begin(),
threads_.end(),
boost::bind(&muduo::Thread::join, _1));
}
void ThreadPool::run(const Task& task)
{
if (threads_.empty())
{
task();
}
else
{
MutexLockGuard lock(mutex_);
queue_.push_back(task);
cond_.notify();
}
}
ThreadPool::Task ThreadPool::take()
{
MutexLockGuard lock(mutex_);
while (queue_.empty() && running_)
{
cond_.wait();
}
Task task;
if(!queue_.empty())
{
task = queue_.front();
queue_.pop_front();
}
return task;
}
void ThreadPool::runInThread()
{
try
{
while (running_)
{
Task task(take());
if (task)
{
task();
}
}
}
catch (const Exception& ex)
{
fprintf(stderr, "exception caught in ThreadPool %s\n", name_.c_str());
fprintf(stderr, "reason: %s\n", ex.what());
fprintf(stderr, "stack trace: %s\n", ex.stackTrace());
abort();
}
catch (const std::exception ex)
{
fprintf(stderr, "exception caught in ThreadPool %s\n", name_.c_str());
fprintf(stderr, "reason: %s\n", ex.what());
abort();
}
catch (...)
{
fprintf(stderr, "unknown exception caught in ThreadPool %s\n", name_.c_str());
abort();
}
}
Thread.h
#ifndef MUDUO_BASE_THREAD_H
#define MUDUO_BASE_THREAD_H
#include <muduo/base/Atomic.h>
#include <muduo/base/Types.h>
#include <boost/function.hpp>
#include <boost/noncopyable.hpp>
#include <pthread.h>
namespace muduo
{
class Thread : boost::noncopyable
{
public:
typedef boost::function<void ()> ThreadFunc;
explicit Thread(const ThreadFunc&, const string& name = string());
~Thread();
void start();
void join();
bool started() const { return started_; }
// pthread_t pthreadId() const { return pthreadId_; }
pid_t tid() const { return tid_; }
const string& name() const { return name_; }
static int numCreated() { return numCreated_.get(); }
private:
static void* startThread(void* thread);
void runInThread();
bool started_;
pthread_t pthreadId_;
pid_t tid_;
ThreadFunc func_;
string name_;
static AtomicInt32 numCreated_;
};
namespace CurrentThread
{
pid_t tid();
const char* name();
bool isMainThread();
}
}
#endif
Thread.cc
#include <muduo/base/Thread.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <linux/unistd.h>
namespace muduo
{
namespace CurrentThread
{
__thread const char* t_threadName = "unknown";
}
namespace detail
{
__thread pid_t t_cachedTid = 0;
pid_t gettid()
{
return static_cast<pid_t>(::syscall(SYS_gettid));
}
void afterFork()
{
t_cachedTid = gettid();
muduo::CurrentThread::t_threadName = "main";
// no need to call pthread_atfork(NULL, NULL, &afterFork);
}
class ThreadNameInitializer
{
public:
ThreadNameInitializer()
{
muduo::CurrentThread::t_threadName = "main";
pthread_atfork(NULL, NULL, &afterFork);
}
};
ThreadNameInitializer init;
}
}
using namespace muduo;
using namespace muduo::detail;
pid_t CurrentThread::tid()
{
if (t_cachedTid == 0)
{
t_cachedTid = gettid();
}
return t_cachedTid;
}
const char* CurrentThread::name()
{
return t_threadName;
}
bool CurrentThread::isMainThread()
{
return tid() == ::getpid();
}
AtomicInt32 Thread::numCreated_;
Thread::Thread(const ThreadFunc& func, const string& n)
: started_(false),
pthreadId_(0),
tid_(0),
func_(func),
name_(n)
{
numCreated_.increment();
}
Thread::~Thread()
{
}
void Thread::start()
{
assert(!started_);
started_ = true;
pthread_create(&pthreadId_, NULL, &startThread, this);
}
void Thread::join()
{
assert(started_);
pthread_join(pthreadId_, NULL);
}
void* Thread::startThread(void* obj)
{
Thread* thread = static_cast<Thread*>(obj);
thread->runInThread();
return NULL;
}
void Thread::runInThread()
{
tid_ = CurrentThread::tid();
muduo::CurrentThread::t_threadName = name_.c_str();
func_();
muduo::CurrentThread::t_threadName = "finished";
}
2、LF 领导者/跟随者模型
有一个线程是领导者,其余线程是在线程池中的跟随者。当请求到达时,领导者会拾取它,并从跟随者中选取一个新的领导者,然后继续处理请求。
优点:
+性能提高,因为不用进行线程间上下文切换。
缺点:
-不容易处理爆发的客户,因为不一定有显示的排队层。
-实现更复杂
找到了一个简单的实现:
/**
*@brief 启动线程池
*/
int SP_LFServer::run()
{
//......
mThreadPool = new SP_ThreadPool(mMaxThreads);
for(int i = 0; i < mMaxThreads; i++)
{
mThreadPool->dispatch(lfHandler, this);
}
//......
}
/**
*@brief 启动单个线程,传入工作函数处理指针
*/
typedef static void* (*CALLBACK)(void*);
void SP_LFServer::dispatch( CALLBACK cb, void * arg )
{
pthread_create(&time_tid, NULL, cb, this);
}
/**
*@brief 线程主函数 thread_main
*/
void * SP_LFServer::lfHandler(void * arg)
{
SP_LFServer * server = (SP_LFServer*) arg;
for(; 0 == server->mIsShutdown;)
{
/* follower begin */
server->handleOneEvent();
}
return NULL;
}
/**
*@brief 单个线程中的处理逻辑
*/
void SP_LFServer::handleOneEvent()
{
SP_Task * task = NULL;
SP_Message * msg = NULL;
/* follower wait */
pthread_mutex_lock(&mMutex);
/* follower end */
/* leader begin */
for(; 0 == mIsShutdown && NULL == task && NULL == msg;)
{
if(mEventArg->getInputResultQueue()->getLength() > 0)
{
//读取数据
task = (SP_Task*) mEventArg->getInputResultQueue()->pop();
}
else if(mEventArg->getOutputResultQueue()->getLength() > 0)
{
//发送数据
msg = (SP_Message*) mEventArg->getOutputResultQueue()->pop();
}
if(NULL == task && NULL == msg)
{
event_base_loop(mEventArg->getEventBase(), EVLOOP_ONCE);
}
}
/* leader end */
pthread_mutex_unlock(&mMutex);
/* worker begin */
if(NULL != task)
task->run();
if(NULL != msg)
mCompletionHandler->completionMessage(msg);
/* worker end */
}