接前面教程,日志类和tcp_server基类见(1)。
常见的IO并发-select、poll、epoll,当然,最常用的是epoll,因为其采用底层事件回调函数机制,将就绪状态IO加入到就绪链表,提高了并发性能。epoll接口使用主要是三步:epoll_create,epoll_ctl,epoll_wait。
epoll-IO并发服务tcp_server_epoll.h
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/epoll.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <fcntl.h>
#include <thread>
#include <pthread.h>
#include <string>
#include <mutex>
#include "tcp_server.h"
//using namespace std;
#define MAXLINE 4096
#define MAXEPOLL 10000
struct threadEpoll{
int epoll_fd;
struct epoll_event *evs;
struct epoll_event ev;
};
class EpollServer : public Server{
public:
EpollServer(string ip, int port):Server(ip, port){
}
static void *threadWorker(void *args);
int start();
};
类的实现tcp_server_epoll.cpp
#include <thread>
#include <pthread.h>
#include <string>
#include <mutex>
#include <sys/wait.h>
#include "tcp_server_epoll.h"
void *EpollServer::threadWorker(void * args){
char buff[MAXLINE];
//tcp info check socket info
struct tcp_info info;
int len = sizeof(info);
struct threadEpoll *t_epoll = (struct threadEpoll*) args;
while(true){
int fds = epoll_wait(t_epoll->epoll_fd,t_epoll->evs,MAXEPOLL,0);
if(fds > 0){
m_log->outputLog("epoll number of fds:%d",fds);
}
sleep(1);
for(int i=0;i<fds;i++){
int connfd = t_epoll->evs[i].data.fd;
//int n = recv(connfd,buff,MAXLINE,MSG_DONTWAIT);
while(1){
int n = recv(connfd,buff,MAXLINE,MSG_DONTWAIT);
if(n>0){
buff[n] = '\0';
m_log->outputLog("thread:%lu,connfd:%d,recv msg:%s",pthread_self(),connfd,buff);
}else{
if(errno == EINTR || errno == EAGAIN){
//getsockopt(connfd,IPPROTO_TCP,TCP_INFO,&info,(socklen_t *)&len);
//if(info.tcpi_state == TCP_ESTABLISHED){
m_log->outputLog("established recv error: %s, errno:%d",strerror(errno),errno);
//break;
}else{
//del connfd and close
if(epoll_ctl(t_epoll->epoll_fd,EPOLL_CTL_DEL,connfd,&t_epoll->ev) != 0){
m_log->outputLog("epoll_ctl error: %s, errno:%d",strerror(errno),errno);
//continue;
}
m_log->outputLog("close connfd: %d error: %s, errno:%d",connfd,strerror(errno),errno);
m_log->outputLog("recv error: %s, errno:%d",strerror(errno),errno);
close(connfd);
//break;
}
m_log->outputLog("thread:%lu,connfd:%d,recv ok",pthread_self(),connfd);
break;
}
}
}
}
return NULL;
}
int EpollServer::start(){
int sockfd,connfd;
struct sockaddr_in cliaddr;
socklen_t clilen;
if((sockfd = socket(AF_INET, SOCK_STREAM, 0)) < 0){
m_log->outputLog("socket create failed: error %s, errno:%d",strerror(errno),errno);
return 0;
}
m_log->outputLog("socket create %d", sockfd);
int ret = ::bind(sockfd, (struct sockaddr*) &addr, sizeof(addr));
if(ret == -1){
m_log->outputLog("bind socket error: %s, errno:%d",strerror(errno),errno);
return 0;
}
if((listen(sockfd,10)) == -1){
m_log->outputLog("listen error: %s, errno:%d",strerror(errno),errno);
return 0;
}
//socklen_t len = sizeof(cliaddr);
int epoll_fd;
struct epoll_event ev;
struct epoll_event evs[MAXEPOLL];
struct threadEpoll t_epoll;
if((epoll_fd = epoll_create(MAXEPOLL)) <= 0){
m_log->outputLog("epoll create error: %s, errno:%d",strerror(errno),errno);
return 0;
}
t_epoll.epoll_fd = epoll_fd;
t_epoll.evs = evs;
t_epoll.ev = ev;
pthread_t threadworker;
if(pthread_create(&threadworker,NULL,threadWorker,(void*)&t_epoll) != 0){
m_log->outputLog("fork error:%s, error:%d", strerror(errno),errno);
}
while(true){
socklen_t len = sizeof(cliaddr);
if((connfd = accept(sockfd,(struct sockaddr*)&cliaddr,&len)) == -1){
m_log->outputLog("accept error: %s, errno:%d",strerror(errno),errno);
continue;
}
//keepalive check closed socket
int keepAlive = 1;
int keepIdle = 60;
int keepInterval = 5;
int keepCount = 3;
setsockopt(connfd, SOL_SOCKET, SO_KEEPALIVE, (void *)&keepAlive, sizeof(keepAlive));
setsockopt(connfd, SOL_TCP, TCP_KEEPIDLE, (void*)&keepIdle, sizeof(keepIdle));
setsockopt(connfd, SOL_TCP, TCP_KEEPINTVL, (void *)&keepInterval, sizeof(keepInterval));
setsockopt(connfd, SOL_TCP, TCP_KEEPCNT, (void *)&keepCount, sizeof(keepCount));
//end
ev.events = EPOLLIN | EPOLLET;
ev.data.fd = connfd;
if(epoll_ctl(epoll_fd,EPOLL_CTL_ADD,connfd,&ev) != 0){
m_log->outputLog("epoll_ctl error: %s, errno:%d",strerror(errno),errno);
continue;
}
}
close(sockfd);
}
服务器测试程序test_server.cpp
#include "logger.h"
#include "tcp_server.h"
#include "tcp_server_thread.h"
#include "tcp_server_process.h"
#include "tcp_server_epoll.h"
#include "tcp_server_libevent.h"
#include "tcp_server_threadpoolevent.h"
#include <signal.h>
#include <sys/wait.h>
using namespace std;
logger *m_log = logger::get_instance();
int main(){
signal(SIGCHLD, &func_waitpid);
//logger *m_log = logger::get_instance();
m_log->openLogFile("./","test_server.log",true);
//Server *m_server = new Server("111.206.73.111",12345);
//Server *m_server = new ThreadServer("111.206.73.111",12345);
//Server *m_server = new ProcessServer("111.206.73.111",12345);
Server *m_server = new EpollServer("111.206.73.111",12345);
//Server *m_server = new poolEventServer("111.206.73.111",12345);
m_server->init(m_log);
m_server->start();
}
服务结构比较简单,我想强调的两点是:
1、通过setsockopt函数可以根据服务,对socket连接进行保活计时器的设置,可以有效清理无效的连接,释放资源。
2、通过读取socket数据,检测socket连接释放处于established状态的两种方法:1)对于接收端,如果接收到的字节数小于0,那么,recv返回errno错误EAGAIN(非阻塞接收)和EINTR(中断信号),对于这两种情况可以认为连接是有效的,可以继续处理,其他情况认为连接异常可以关闭;2)可以通过getsockopt(connfd,IPPROTO_TCP,TCP_INFO,&info,(socklen_t *)&len)函数来获取连接真正的状态,如果连接状态是TCP_ESTABLISHED,那么连接是正常建立的,其他情况认为连接异常,可以关闭。