netstat -anop | grep 9999 监听端口9999
应用层有多少个fd相对的epoll集合中就得有多少个fd 一一对应d
阻塞io和非阻塞io
阻塞 是等待某个条件满足
int flags = fcntl(sockfd, F_GETFL, 0); //获取当前io的状态
flags |= O_NONBLOCK; //设置非阻塞
fcntl(sockfd, F_SETFL, flags); //给io设置我们设定的fiags状态
accept();出现新客户端连接时处于非阻塞 未有新的连接前处于阻塞 程序停止不往下走
recv();接收函数 在没有连接的客户端时处于阻塞
单次收发
struct sockaddr_in clientaddr;
socklen_t len = sizeof(clientaddr);
int clientfd = accept(sockfd, (struct sockaddr*)&clientaddr, &len);
char buffer[BUFFER_LENGTH] = {0};
//接收
int ret = recv(clientfd, buffer, BUFFER_LENGTH, 0);
//发送
send(clientfd, buffer, ret, 0);
反复收发 缺点 第二客户端可以连接但无法被接收信息
struct sockaddr_in clientaddr;
socklen_t len = sizeof(clientaddr);
while (1) { //master
int clientfd = accept(sockfd, (struct sockaddr*)&clientaddr, &len);
char buffer[BUFFER_LENGTH] = {0};
int ret = recv(clientfd, buffer, BUFFER_LENGTH, 0);
printf("ret: %d, buffer: %s\n", ret, buffer);
send(clientfd, buffer, ret, 0);
}
使用多线程 实现 多客户端同时发送接收数据 一请求 一线程 但并发量很难变庞大
struct sockaddr_in clientaddr;
socklen_t len = sizeof(clientaddr);
while (1) { //master
int clientfd = accept(sockfd, (struct sockaddr*)&clientaddr, &len);
pthread_t threadid;
pthread_create(&threadid, NULL, client_thread, &clientfd);
// 第一个参数 第二个参数是线程的属性 第三个参数是线程的入口函数 第四个参数 是传入的参数
}
// 1 connection 1 thread
void *client_thread(void *arg) {
int clientfd = *(int*)arg;
//每个客户端循环发送接收
while (1) { //slave
char buffer[BUFFER_LENGTH] = {0};
int ret = recv(clientfd, buffer, BUFFER_LENGTH, 0);
if (ret == 0) {
close(clientfd);
break;
}
printf("ret: %d, buffer: %s\n", ret, buffer);
send(clientfd, buffer, ret, 0);
}
}
TCP服务如何写
绑定服务器ip端口和ip
int sockfd = socket(AF_INET, SOCK_STREAM, 0); // io
struct sockaddr_in servaddr;
memset(&servaddr, 0, sizeof(struct sockaddr_in)); // 192.168.2.123
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl(INADDR_ANY); // 0.0.0.0 绑定的是任意IP /INADDR_ANY绑定的是当前ip地址
servaddr.sin_port = htons(9999);
//绑定bind
if (-1 == bind(sockfd, (struct sockaddr*)&servaddr, sizeof(struct sockaddr))) {
printf("bind failed: %s", strerror(errno));
return -1;
}
一个客户端对应一个FD
//每次有新的客户端就给其创建一个fd
clientfd = accept(sockfd, (struct sockaddr*)&clientaddr, &len);
//accept是当有客户端连接进来才往下走
网络io如何管理
io多路复用
1. select (数量有限)
缺点 需要提前设置io的集合 然后拷贝到内核中
循环判断集合中的io
io的数量有限(数组构成数量存储
//select(maxfd, rfds, wfds, efds, timeout);
// maxfd fd的最大长度 rfds 可读的集合 wfds 可写的集合 出错的集合 ,频率
//1024
// //5000
fd_set rfds, rset; //
FD_ZERO(&rfds); //将fd_set置0 使得不包含任何集合fd
FD_SET(sockfd, &rfds);
int maxfd = sockfd;
int clientfd = 0;
while (1) { // master 循环去访问io
rset = rfds;
//返回select存在的事件个数
int nready = select(maxfd+1, &rset, NULL, NULL, NULL);
if (FD_ISSET(sockfd, &rset)) { //判断fd里面是否存在事件
clientfd = accept(sockfd, (struct sockaddr*)&clientaddr, &len);
printf("accept: %d\n", clientfd);
FD_SET(clientfd, &rfds);
//避免发生fd回收 所以将最大连接数 设置给maxfd
if (clientfd > maxfd) maxfd = clientfd;
if (-- nready == 0) continue;
}
//
int i = 0;
for (i = sockfd+1; i <= maxfd;i ++) {
//找fd所对应的事件
if (FD_ISSET(i, &rset)) {
char buffer[BUFFER_LENGTH] = {0};
int ret = recv(i, buffer, BUFFER_LENGTH, 0);
if (ret == 0) {
close(i);
break;
}
printf("ret: %d, buffer: %s\n", ret, buffer);
send(i, buffer, ret, 0);
}
}
}
2. epoll
不删除io会一直存在
水平触发:io有数据就触发 适合大块数据传输 不会丢数据 一般会套用循环用来读完数据
边沿触发:io从无数据到有数据只触发一次 适合小规模数据块传输 数据能一次性读完
默认边沿触发
// networkio, select/poll
int epfd = epoll_create(1);//1000 //list 3.0之后数值大于0即可 3.0之前size是大小
struct epoll_event ev;
ev.events = EPOLLIN; //事件
ev.data.fd = sockfd;
epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &ev); //添加进集合
struct epoll_event events[1024] = {0};//传输事件的容器
while (1) { // mainloop
//判断返回需要处理的事件数目
int nready = epoll_wait(epfd, events, 1024, -1); //-1, 0, 读取epoll事件的等待时间
if (nready < 0) continue;
//遍历事件集
int i = 0;
for (i = 0;i < nready;i ++) {
int connfd = events[i].data.fd; //监听端口等于监听事件文件描述符
if (sockfd == connfd) { // accept
int clientfd = accept(sockfd, (struct sockaddr*)&clientaddr, &len);
if (clientfd <= 0) {
continue;
}
printf(" clientfd: %d\n", clientfd);
ev.events = EPOLLIN | EPOLLET;
ev.data.fd = clientfd; //使得监听端口为新的fd
epoll_ctl(epfd, EPOLL_CTL_ADD, clientfd, &ev);
} else if (events[i].events & EPOLLIN) { //通过事件
char buffer[10] = {0};
short len = 0;
//读头的长度
recv(connfd, &len, 2, 0);
len = ntohs(len);
int n = recv(connfd, buffer, 10, 0);//10每次读取的数据字节大小
if (n > 0) {
printf("recv : %s\n", buffer);
send(connfd, buffer, n, 0);
} else if (n == 0) {
printf("close\n");
epoll_ctl(epfd, EPOLL_CTL_DEL, connfd, NULL);
close(connfd);
}
}
}
}
3. poll
相对于 select
struct pollfd fds[POLL_SIZE] = {0};
fds[sockfd].fd = sockfd;
fds[sockfd].events = POLLIN; //事件
int maxfd = sockfd;
int clientfd = 0;
while (1) {
int nready = poll(fds, maxfd+1, -1);
//revents 是内核可读之后的反馈值
if (fds[sockfd].revents & POLLIN) {
//将连接的客户端存储设置事件
clientfd = accept(sockfd, (struct sockaddr*)&clientaddr, &len);
printf("accept: %d\n", clientfd);
fds[clientfd].fd = clientfd;
fds[clientfd].events = POLLIN;
if (clientfd > maxfd) maxfd = clientfd;
if (-- nready == 0) continue;
}
//循环读取激活连接的客户端fd数据
int i = 0;
for (i = 0;i <= maxfd;i ++) {
if (fds[i].revents & POLLIN) {
char buffer[BUFFER_LENGTH] = {0};
int ret = recv(i, buffer, BUFFER_LENGTH, 0);
if (ret == 0) {
//需要清空设置的事件
fds[i].fd = -1;
fds[i].events = 0;
close(i);
break;
}
printf("ret: %d, buffer: %s\n", ret, buffer);
send(i, buffer, ret, 0);
}
}
}
io事件管理--reactor
1. 事件驱动 EPOLLIN/EPOLLOUT
2. 回调函数 listenfd--->accept_cb (包含clientfd
clientfd--->recv_cb (recv_cb/send_cb两者同级
clientfd--->send_cb
3.业务网络相隔离
4. EPOLL ---->mainloop
//函数指针
typedef int (*ZVCALLBACK)(int fd, int events, void *arg);
//a
typedef struct zv_connect_s {
int fd;
//设置fd对应的事件 回调函数
ZVCALLBACK cb;
char rbuffer[BUFFER_LENGTH]; // channel
int rc; //读取的数据总长度
int count;//决定下次读多长
char wbuffer[BUFFER_LENGTH]; // channel
int wc; //预计写入的数据总长度
char resource[BUFFER_LENGTH];
int enable_sendfile;
struct zv_kvstore_s *kvheader;
} zv_connect_t;
//动态数组定义 链表的实现方式
typedef struct zv_connblock_s {
zv_connect_t *block;
struct zv_connblock_s *next;
} zv_connblock_t;
//反应堆 c c包含b b包含a
typedef struct zv_reactor_s {
int epfd;
int blkcnt;
zv_connblock_t *blockheader; //
} zv_reactor_t;
//初始化 一分配就有一释放
int zv_init_reactor(zv_reactor_t *reactor) {
if (!reactor) return -1;
reactor->blockheader = malloc(sizeof(zv_connblock_t) + EVENT_LENGTH * sizeof(zv_connect_t));
if (reactor->blockheader == NULL) return -1;
reactor->blockheader->block = (zv_connect_t*)(reactor->blockheader + 1);
reactor->blkcnt = 1;
reactor->blockheader->next = NULL;
reactor->epfd = epoll_create(1);
}
//释放 一分配就有一释放
void zv_dest_reactor(zv_reactor_t *reactor) {
if (!reactor) return ;
if (!reactor->blockheader) free(reactor->blockheader);
close(reactor->epfd);
}
int send_cb(int fd, int event, void *arg) {
//printf("send_cb\n");
zv_reactor_t *reactor = (zv_reactor_t*)arg;
zv_connect_t *conn = zv_connect_idx(reactor, fd);
//zv_http_response(conn); //注释以至于停止ftp服务 对于下面if 0模块
//echo
send(fd, conn->wbuffer, conn->wc, 0); // send header
#if 0
if (conn->enable_sendfile) { // sendbody
int filefd = open(conn->resource, O_RDONLY);
if (filefd == -1) {
printf("errno: %d\n", errno);
return -1;
}
struct stat stat_buf;
fstat(filefd, &stat_buf);
int ret = sendfile(fd, filefd, NULL, stat_buf.st_size); // sendbody
if (ret == -1) {
printf("errno: %d\n", errno);
}
close(filefd);
}
#endif
conn->cb = recv_cb;
//
struct epoll_event ev;
ev.events = EPOLLIN; // EPOLLIN/EPOLLOUT 可读/可写
ev.data.fd = fd;
epoll_ctl(reactor->epfd, EPOLL_CTL_MOD, fd, &ev);
}
int recv_cb(int fd, int event, void *arg) {
zv_reactor_t *reactor = (zv_reactor_t*)arg;
zv_connect_t *conn = zv_connect_idx(reactor, fd);
int ret = recv(fd, conn->rbuffer, conn->count, 0);
if (ret < 0) {
//ret < 0 意味着挂起
} else if (ret == 0) {
//释放空间
conn->fd = -1;
conn->rc = 0;
conn->wc = 0;
//从epoll移除
epoll_ctl(reactor->epfd, EPOLL_CTL_DEL, fd, NULL);
close(fd);
return -1;
} else { //ret > 0
//conn->rc += ret; //web ftp服务套件
conn->rc = ret;
printf("rbuffer: %s, ret: %d\n", conn->rbuffer, conn->rc);
memset(conn->wbuffer, BUFFER_LENGTH,0); //为什么要释放 因为能够接收数据当时回返的数据不对 所以需要每次清空缓存之后在接着接收数据
// --> echo
memcpy(conn->wbuffer, conn->rbuffer, conn->rc);
conn->wc = conn->rc;
//zv_http_request(conn); //实现web server 不做ftp请求
conn->cb = send_cb;
//io可写调用回调函数
struct epoll_event ev;
ev.events = EPOLLOUT;
ev.data.fd = fd;
epoll_ctl(reactor->epfd, EPOLL_CTL_MOD, fd, &ev);
}
}
//设置存储每个连接对应的信息
int accept_cb(int fd, int events, void *arg) {
struct sockaddr_in clientaddr;
socklen_t len = sizeof(clientaddr);
//返回连接后产生的新fd
int clientfd = accept(fd, (struct sockaddr*)&clientaddr, &len);
if (clientfd < 0) {
printf("accept errno: %d\n", errno);
return -1;
}
//
printf(" clientfd: %d\n", clientfd);
zv_reactor_t *reactor = (zv_reactor_t*)arg;
zv_connect_t *conn = zv_connect_idx(reactor, clientfd);
conn->fd = clientfd;
conn->cb = recv_cb;
conn->count = BUFFER_LENGTH;
//conn->kvheader = malloc(sizeof(zv_kvstore_t));
//init_kvpair(conn->kvheader);
struct epoll_event ev;
ev.events = EPOLLIN;
ev.data.fd = clientfd;
epoll_ctl(reactor->epfd, EPOLL_CTL_ADD, clientfd, &ev);
}
//封装当前的端口fd和端口事件
int set_listener(zv_reactor_t *reactor, int fd, ZVCALLBACK cb) {
if (!reactor || !reactor->blockheader) return -1;
//放进链表
//永远是当前的端口和端口事件
reactor->blockheader->block[fd].fd = fd;
//注入回调函数---事件
reactor->blockheader->block[fd].cb = cb;
//放入epoll管理集合
struct epoll_event ev;
ev.events = EPOLLIN;
ev.data.fd = fd;
epoll_ctl(reactor->epfd, EPOLL_CTL_ADD, fd, &ev);
}
int main(int argc, char *argv[]) {
//open
if (argc < 2) return -1;
//printf("sockfd: %d\n", sockfd);
zv_reactor_t reactor;
zv_init_reactor(&reactor); // epoll
int port = atoi(argv[1]);
int i = 0;
for (i = 0;i < 1;i ++) {
int sockfd = init_server(port+i); //
set_listener(&reactor, sockfd, accept_cb); //
}
struct epoll_event events[EVENT_LENGTH] = {0};
while (1) { //mainloop, event driver
int nready = epoll_wait(reactor.epfd, events, EVENT_LENGTH, -1);
int i = 0;
for (i = 0;i < nready;i ++) {
int connfd = events[i].data.fd;
zv_connect_t *conn = zv_connect_idx(&reactor, connfd);
//有事件调用链表中对应fd的回调函数
if (events[i].events & EPOLLIN) {
conn->cb(connfd, events[i].events, &reactor);
}
if (events[i].events & EPOLLOUT) {
conn->cb(connfd, events[i].events, &reactor);
}
}
}
}
在基础上进行思考并发操作实现
需要多线程 一线程一请求耗费内存在很多请求下内存不够分配且也没有必要采用单线程
最好是实现线程池 将读写操作放入线程池 进行多数量的并发接收和写的操作
在接收的fd中前5个是固定占用的
1. 0,1,2 stdin
2. listen fd ,epfd
后续连接的fd从5开始
服务器需要具有的特征
1. 并发量 服务器同时承载的客户端数量
2. qbs 每秒连接数
3. 最大时延
4. 一秒钟能创建多少连接数
5. 吞吐量
本文实现的是纯连接的多并发
2-4解决内存泄漏的问题
modprobe nf_conntrack 加载nf_conntrack模块 用于跟踪一个连接的状态的
htop 查看内核使用
epoll 本身支持管理100wio
reactor 单线程支持100w io 单纯连接不考虑业务
面试问服务器并发量多少
1. 测试结果 这是可控的因为是自己代码实现
2. 线上实际情况 这个不可控取决于多方面 机器等因素
遇到的问题参考文章