Linux简单高并发模型——Epoll + 线程池

首先是一个locker.h的文件,封装了信号量、互斥量、条件变量。

在线程池中的任务队列需要互斥量的保护,当任务队列中有任务到达时,需要唤醒一个等待pthread_cond_wait()的线程,线程池停止时,需要唤醒所以的线程,调用的是pthread_cond_broadcast()。


locker.h文件:

#ifndef _LOCKER_H_
#define _LOCKER_H_

#include <pthread.h>
#include <stdio.h>
#include <semaphore.h>

/*信号量的类*/
class sem_locker
{
private:
    sem_t m_sem;

public:
    //初始化信号量
    sem_locker()
    {
	if(sem_init(&m_sem, 0, 0) != 0)
	    printf("sem init error\n");
    }
    //销毁信号量
    ~sem_locker()
    {
	sem_destroy(&m_sem);
    }

    //等待信号量
    bool wait()
    {
	return sem_wait(&m_sem) == 0;
    }
    //添加信号量
    bool add()
    {
	return sem_post(&m_sem) == 0;
    }
};


/*互斥 locker*/
class mutex_locker
{
private:
    pthread_mutex_t m_mutex;

public:
    mutex_locker()
    {
    	if(pthread_mutex_init(&m_mutex, NULL) != 0)
	    printf("mutex init error!");
    }
    ~mutex_locker()
    {
	pthread_mutex_destroy(&m_mutex);
    }

    bool mutex_lock()  //lock mutex
    {
	return pthread_mutex_lock(&m_mutex) == 0;
    }
    bool mutex_unlock()   //unlock
    {
	return pthread_mutex_unlock(&m_mutex) == 0;
    }
};

/*条件变量 locker*/
class cond_locker
{
private:
    pthread_mutex_t m_mutex;
    pthread_cond_t m_cond;

public:
    // 初始化 m_mutex and m_cond
    cond_locker()
    {
	if(pthread_mutex_init(&m_mutex, NULL) != 0)
	    printf("mutex init error");
	if(pthread_cond_init(&m_cond, NULL) != 0)
	{   //条件变量初始化是被,释放初始化成功的mutex
	    pthread_mutex_destroy(&m_mutex);
	    printf("cond init error");
	}
    }
    // destroy mutex and cond
    ~cond_locker()
    {
	pthread_mutex_destroy(&m_mutex);
	pthread_cond_destroy(&m_cond);
    }
    //等待条件变量
    bool wait()
    {
	int ans = 0;
	pthread_mutex_lock(&m_mutex);
	ans = pthread_cond_wait(&m_cond, &m_mutex);
	pthread_mutex_unlock(&m_mutex);
	return ans == 0;
    }
    //唤醒等待条件变量的线程
    bool signal()
    {
	return pthread_cond_signal(&m_cond) == 0;
    }

    //唤醒all等待条件变量的线程
    bool broadcast()
    {
            return pthread_cond_broadcast(&m_cond) == 0;
    }
};

#endif


thread_pool.h文件。

创建threadnum个线程,并调用pthread_detach()分离线程,线程结束,自动回收资源。(前面的一篇博客的线程池有bug,不完整,线程池退出时,不能让所有的线程正常退出)


#ifndef _PTHREAD_POOL_
#define _PTHREAD_POOL_

#include "locker.h"
#include <queue>
#include <stdio.h>
#include <exception>
#include <errno.h>
#include <pthread.h>
#include <iostream>

template<class T>
class threadpool
{
private:
    int thread_number;  //线程池的线程数
    //int max_task_number;  //任务队列中的最大任务数
    pthread_t *all_threads;   //线程数组
    std::queue<T *> task_queue; //任务队列
    mutex_locker queue_mutex_locker;  //互斥锁
    //sem_locker queue_sem_locker;   //信号量
    cond_locker queue_cond_locker; //cond
    bool is_stop; //是否结束线程
public:
    threadpool(int thread_num = 20);
    ~threadpool();
    bool append_task(T *task);  //添加任务
    void start();              //线程池开启
    void stop();               //线程池关闭
private:
    //线程运行的函数。执行run()函数
    static void *worker(void *arg);
    void run();
    T *getTask();   //获取任务
};

template <class T>
threadpool<T>::threadpool(int thread_num):
	thread_number(thread_num),is_stop(false), all_threads(NULL)
{       //构造函数
    if(thread_num <= 0)
	printf("threadpool can't init because thread_number = 0");

    all_threads = new pthread_t[thread_number];
    if(all_threads == NULL)
    	printf("can't init threadpool because thread array can't new");
}

template <class T>
threadpool<T>::~threadpool()
{
    delete []all_threads;
    stop();
}

template <class T>
void threadpool<T>::stop() //线程池停止
{
        is_stop = true;
        //queue_sem_locker.add();
        queue_cond_locker.broadcast();
}

template <class T>
void threadpool<T>::start()  //线程池启动
{
    for(int i = 0; i < thread_number; ++i)
    {
	//printf("create the %dth pthread\n", i);
	if(pthread_create(all_threads + i, NULL, worker, this) != 0)
	{//创建线程失败,清除成功申请的资源并抛出异常
	    delete []all_threads;
	    throw std::exception();
	}
	if(pthread_detach(all_threads[i]))
	{//将线程设置为脱离线程,失败则清除成功申请的资源并抛出异常
	    delete []all_threads;
	    throw std::exception();
	}
    }
}
//添加任务进入任务队列
template <class T>
bool threadpool<T>::append_task(T *task)   //添加任务
{   //获取互斥锁
    queue_mutex_locker.mutex_lock();
    
    bool is_signal = task_queue.empty();
    //添加进入队列
    task_queue.push(task);
    queue_mutex_locker.mutex_unlock();
    //唤醒等待任务的线程
    if(is_signal)
    {
            queue_cond_locker.signal();
    }
    return true;
}

template <class T>
void *threadpool<T>::worker(void *arg)  //线程工作函数
{
    threadpool *pool = (threadpool *)arg;
    pool->run();
    return pool;
}

template <class T>
T* threadpool<T>::getTask()   //从任务队列中获取任务
{
    T *task = NULL;
    queue_mutex_locker.mutex_lock();
    if(!task_queue.empty())
    {
        task = task_queue.front();
        task_queue.pop();
    }
    queue_mutex_locker.mutex_unlock();
    return task;
}

template <class T>
void threadpool<T>::run()
{
    while(!is_stop){
        T *task = getTask();
        if(task == NULL)  //队列为空,等待
                queue_cond_locker.wait();
        else              //执行任务
                task->doit();
    }
    //for test
    //printf("exit%d\n", (unsigned long)pthread_self());
}

#endif



封装了epoll。

EpollServer.h中的BaseTask.h和Task.h应该放在另外一个文件中的。这里图个方便,哈哈。

#ifndef _EPOLL_SERVER_H_
#define _EPOLL_SERVER_H_

#include <sys/socket.h>
#include <sys/types.h>
#include <stdio.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sys/wait.h>
#include <sys/epoll.h>
//#include <pthread.h>

#include "thread_pool.h"

#define MAX_EVENT 1024   //epoll_events的最大个数
#define MAX_BUFFER 2048  //Buffer的最大字节

class BaseTask
{
public:
	virtual void doit() = 0;
};

class Task : public BaseTask
{
private:
	int sockfd;
	char order[MAX_BUFFER];
public:
	Task(char *str, int fd) : sockfd(fd)
	{
		memset(order, '\0', MAX_BUFFER);
		strcpy(order, str);
	}
	void doit()  //任务的执行函数
	{
		//do something of the order
		//printf("%s\n", order);
		snprintf(order, MAX_BUFFER - 1, "somedata\n");
		write(sockfd, order, strlen(order));
	}
};

class EpollServer
{
private:
	bool is_stop;   //是否停止epoll_wait的标志
	int threadnum;   //线程数目
	int sockfd;     //监听的fd
	int port;      //端口
	int epollfd;    //Epoll的fd
	threadpool<BaseTask> *pool;   //线程池的指针
	//char address[20];
	epoll_event events[MAX_EVENT];  //epoll的events数组
	struct sockaddr_in bindAddr;   //绑定的sockaddr

public://构造函数
	EpollServer()
	{}
	EpollServer(int ports, int thread) : is_stop(false) , threadnum(thread) ,
		port(ports), pool(NULL)
	{
	}
	~EpollServer()  //析构
	{
		delete pool;
	}

	void init();

	void epoll();

	static int setnonblocking(int fd)  //将fd设置称非阻塞
	{
		int old_option = fcntl(fd, F_GETFL);
		int new_option = old_option | O_NONBLOCK;
		fcntl(fd, F_SETFL, new_option);
		return old_option;
	}

	static void addfd(int epollfd, int sockfd, bool oneshot)  //向Epoll中添加fd
	{//oneshot表示是否设置称同一时刻,只能有一个线程访问fd,数据的读取都在主线程中,所以调用都设置成false
		epoll_event event;
		event.data.fd = sockfd;
		event.events = EPOLLIN | EPOLLET;
		if(oneshot)
		{
			event.events |= EPOLLONESHOT;
		}
		epoll_ctl(epollfd, EPOLL_CTL_ADD, sockfd, &event); //添加fd
		EpollServer::setnonblocking(sockfd);
	}

};

void EpollServer::init()   //EpollServer的初始化
{
	bzero(&bindAddr, sizeof(bindAddr));
	bindAddr.sin_family = AF_INET;
	bindAddr.sin_port = htons(port);
	bindAddr.sin_addr.s_addr = htonl(INADDR_ANY);
        //创建Socket
	sockfd = socket(AF_INET, SOCK_STREAM, 0);
	if(sockfd < 0)
	{
		printf("EpollServer socket init error\n");
		return;
	}
	int ret = bind(sockfd, (struct sockaddr *)&bindAddr, sizeof(bindAddr));
	if(ret < 0)
	{
		printf("EpollServer bind init error\n");
		return;
	}
	ret = listen(sockfd, 10);
	if(ret < 0)
	{
		printf("EpollServer listen init error\n");
		return;
	}
        //create Epoll
	epollfd = epoll_create(1024);
	if(epollfd < 0)
	{
		printf("EpollServer epoll_create init error\n");
		return;
	}
	pool = new threadpool<BaseTask>(threadnum);  //创建线程池
}

void EpollServer::epoll()
{
	pool->start();   //线程池启动
	//
	addfd(epollfd, sockfd, false);
	while(!is_stop)
	{//调用epoll_wait
		int ret = epoll_wait(epollfd, events, MAX_EVENT, -1);
		if(ret < 0)  //出错处理
		{
			printf("epoll_wait error\n");
			break;
		}
		for(int i = 0; i < ret; ++i)
		{
			int fd = events[i].data.fd;
			if(fd == sockfd)  //新的连接到来
			{
				struct sockaddr_in clientAddr;
				socklen_t len = sizeof(clientAddr);
				int confd = accept(sockfd, (struct sockaddr *)
					&clientAddr, &len);

				EpollServer::addfd(epollfd, confd, false);
			}
			else if(events[i].events & EPOLLIN)  //某个fd上有数据可读
			{
				char buffer[MAX_BUFFER];
		readagain:	memset(buffer, 0, sizeof(buffer));
				int ret = read(fd, buffer, MAX_BUFFER - 1);
				if(ret == 0)  //某个fd关闭了连接,从Epoll中删除并关闭fd
				{
					struct epoll_event ev;
					ev.events = EPOLLIN;
					ev.data.fd = fd;
					epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, &ev);
					shutdown(fd, SHUT_RDWR);
					printf("%d logout\n", fd);
					continue;
				}
				else if(ret < 0)//读取出错,尝试再次读取
				{
					if(errno == EAGAIN)	
					{
						printf("read error! read again\n");
						goto readagain;
					    	break;
					}
				}
				else//成功读取,向线程池中添加任务
				{
					BaseTask *task = new Task(buffer, fd);
					pool->append_task(task);
				}
			}
			else
			{
				printf("something else had happened\n");
			}
		}
	}
	close(sockfd);//结束。

	pool->stop();
}

#endif


接下来是简单的Demo的测试。

#include "EpollServer.h"

int main(int argc, char const *argv[])
{
	if(argc != 3)
	{
		printf("usage %s port threadnum\n", argv[0]);
		return -1;
	}
	int port = atoi(argv[1]);
	if(port == 0)
	{
		printf("port must be Integer\n");
		return -1;
	}
	int threadnum = atoi(argv[2]);
	if(port == 0)
	{
		printf("threadnum must be Integer\n");
		return -1;
	}
	EpollServer *epoll = new EpollServer(port, threadnum);

	epoll->init();

	epoll->epoll();
	return 0;
}


代码在Ubuntu中编译通过。下次再来更新能够支持并发量的多少。


-------------------------------------------------------------------------------------------------











  • 7
    点赞
  • 50
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
Epoll+线程池的工作原理如下: 1. 单线程创建epoll并等待,有I/O请求(socket)到达时,将其加入epoll并从线程池中取一个空闲工作者线程,将实际的业务交由工作者线程处理。 2. 当多个任务到来时,Epoll及时响应并将任务下发给特定的处理线程,完成对应的任务。 3. 如果只用单线程进行listen轮询监听的话,效率上实在是太低。而借助epoll的话就会很完美的解决这个问题。 4. 使用线程池的缘由是为了避免频繁创建和销毁线程,提高线程的复用率和效率。 代码示例: ```python import socket import threading import queue import select # 定义线程池类 class ThreadPool: def __init__(self, max_workers): self.max_workers = max_workers self._workers = [] self._task_queue = queue.Queue() self._init_workers() # 初始化线程池 def _init_workers(self): for i in range(self.max_workers): worker = threading.Thread(target=self._worker) worker.start() self._workers.append(worker) # 工作者线程 def _worker(self): while True: try: func, args, kwargs = self._task_queue.get() func(*args, **kwargs) except Exception as e: print(e) # 提交任务 def submit(self, func, *args, **kwargs): self._task_queue.put((func, args, kwargs)) # 定义服务端类 class Server: def __init__(self, host, port, max_workers): self.host = host self.port = port self.max_workers = max_workers self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.server_socket.bind((self.host, self.port)) self.server_socket.listen(5) self.thread_pool = ThreadPool(self.max_workers) # 处理客户端请求 def handle_request(self, client_socket, client_address): print(f"Connected by {client_address}") while True: data = client_socket.recv(1024) if not data: break client_socket.sendall(data) client_socket.close() # 运行服务端 def serve_forever(self): print(f"Server is running on {self.host}:{self.port}") while True: client_socket, client_address = self.server_socket.accept() self.thread_pool.submit(self.handle_request, client_socket, client_address) # 运行服务端 if __name__ == '__main__': server = Server('localhost', 8888, 10) server.serve_forever() ```
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值