目录
今天使用异步操作来实现请求,这里要用到epoll_wait来监听服务端是否返回数据来实现异步
一:epoll_wait如何判断服务端是否返回
通过fd是否可读
二:流程图

三:伪代码
struct Context
{
epollfd;
}
struct sockItem
{
socketfd;
cb result_cb;
}
Context* asy_init()
{
Context* context = new Context;
epollfd = epoll_create(1);
context.epollfd = eppllfd;
std::thread th(handleCallback, context);
return context;
}
void asy_commit(Context* context, cb result_cb)
{
sockfd = socket();
sockaddr_in addr;
connect(sockfd, addr);
Request request;
sendto(sockfd, request, sizeof(request), 0);
sockItem* sockitem = new sockItem;
sockitem.sockfd = sockfd;
sockitem.result_cb = result_cb;
epoll_event event;
event.data.ptr = sockitem;
event.events = EPOLLIN;
epoll_ctl(context.epollfd, sockfd, EPOLL_CTL_ADD);
}
void handleCallback(Context* context)
{
epollfd = context.epollfd;
epoll_event events[1024];
int nums = epoll_wait(epollfd, events, 1024, 0);
for(int i = 0; i < nums; ++i)
{
sockitem = event.data.ptr;
sockfd = sockitem.sockfd;
result_cb = sockitem.result_cb;
if(event.data.fd & EPOLLIN)
{
int n = recvfrom(sockfd, response, sizeof(response), 0);
}
result_cb(response);
}
}
void result_callback(Response response) //用户定义的, 用于处理收到的数据的回调
{
std::cout << response << std::endl;
}
int main()
{
Context* context = asy_init();
for(int i = 0; i < nums; ++i)
{
asy_commit(context, result_callback);
}
return 0;
}
四:注意
- 两个context通过在堆区分配内存来防止被回收,进而实现函数间数据的传输
- 五个函数,两个和callback有关的函数,一个是handleCallback(在这个函数的最后面执行用户回调),一个是userCallback(将被执行的用户回调,让用户来决定怎么处理拿到的数据)
- event.data.ptr可以传输一个数据,这个在很多地方都是用于实现回调函数的注册
- 通过这样实现了commit只管请求,不管数据处理,从而实现了异步(不用等待返回数据处理,才能再次commit)
五:简化代码(可运行)
#include <sys/epoll.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <functional>
#include <thread>
#include <string>
#include <iostream>
struct Context
{
int epollfd;
};
//模拟构建request和response格式
using Response = std::string;
using Requset = std::string;
//用户回调的函数格式
using CallBack = std::function<void(Response response)>;
struct sockItem
{
int sockfd;
CallBack result_cb;
};
void result_callback(Response response) //用户定义的, 用于处理收到的数据的回调
{
std::cout << response << std::endl;
}
void handleCallback(Context* context)
{
int epollfd = context->epollfd;
epoll_event events[1024];
int nums = epoll_wait(epollfd, events, 1024, 0);
for(int i = 0; i < nums; ++i)
{
sockItem* sockitem = (sockItem* )events[i].data.ptr;
int sockfd = sockitem->sockfd;
CallBack result_cb = sockitem->result_cb;
// if(events[i].data.fd & EPOLLIN)
// {
// int ret = recvfrom(sockfd, response, sizeof(response), 0);
// }
std::string response = "response";//假设拿到了response
result_cb(response);
}
}
class asyInit
{
private:
std::thread th;
public:
~asyInit()
{
th.join();
}
Context* asy_init()
{
Context* context = new Context;
int epollfd = epoll_create(1);
context->epollfd = epollfd;
th = std::thread(handleCallback, context);
return context;
}
};
void Send(Response response) {}
void asy_commit(Context* context, CallBack result_cb_)
{
int sockfd = socket(AF_INET, SOCK_STREAM, 0);
// sockaddr_in addr;
// int ret = connect(sockfd, addr, sizeof(addr));
//connect(sockfd, addr, sizeof(addr));
//假设已经建立连接
Requset request = "request";
// sendto(sockfd, request.c_str(), sizeof(request.c_str()), 0);
Send(request);//假设发送数据
// sockItem* sockitem;//这里定义了指针没有分配内存, 直接报错
sockItem* sockitem = new sockItem;
sockitem->sockfd = sockfd;
sockitem->result_cb = result_cb_;
epoll_event event;
event.data.ptr = sockitem;
event.events = EPOLLIN;
epoll_ctl(context->epollfd, EPOLL_CTL_ADD, sockfd, &event);
}
int main()
{
asyInit asy;
Context* context = asy.asy_init();
for(int i = 0; i < 5; ++i)
{
asy_commit(context, result_callback);
}
return 0;
}
2024 patch,上述代码有些问题
1.epoll_wait没有在while(1)循环里,压根其实就循环了一次,这是不准确的
2.
区别两种fd的读事件
1):服务器的listen_fd的读事件,当这个读事件发生时,表示服务器收到连接请求,我们需要先accept接收到客户端fd,再把这个fd通过epoll_ctl加入到感兴趣事件当中:上述代码服务器压根就没有socket出一个listen_fd出来,并且listen,bind,setFdNoBlocking;
2):event[i].data.fd,这个是客户端的fd,当这个读事件发生时,表示客户端发了一些消息,我们需要read(fd, buf, BUF_SIZE);,读出数据,然后解析;
代码如下:
注意四个fd:
1)listen_sockfd:这个是服务器的fd,需要挂到epoll树上变成events[n].data.fd
2)epollfd:这个只是epoll树的实例,和读写无关
3)conn_fd:客户端的fd,后续会被挂到epoll树里变成events[n].data.fd
4)events[n].data.fd:epollfd感兴趣的fd
即:

int make_socket_non_blocking(int sfd) {}
int main() {
struct epoll_event event, events[MAX_EVENTS];
// 创建socket
int listen_sockfd = socket(AF_INET, SOCK_STREAM, 0);
// 设置socket为非阻塞
make_socket_non_blocking(listen_sockfd);
bind(listen_sockfd, (struct sockaddr *)&server_addr, sizeof(server_addr));
// 开始监听
listen(listen_sockfd, 10);
// 创建epoll实例
int epollfd = epoll_create1(0);
// 添加监听socket到epoll实例中,监控读事件
event.data.fd = listen_sockfd;
event.events = EPOLLIN;
epoll_ctl(epollfd, EPOLL_CTL_ADD, listen_sockfd, &event);
printf("Server listening on port %d\n", PORT);
for (;;) {
nfds = epoll_wait(epollfd, events, MAX_EVENTS, -1);
for (int n = 0; n < nfds; ++n) {
if (events[n].data.fd == listen_sock) {
// 处理新的连接请求
int conn_sockfd = accept(listen_sockfd, (struct sockaddr *)&client_addr, &client_len);
printf("Accepted connection from %s:%d\n", inet_ntoa(client_addr.sin_addr), ntohs(client_addr.sin_port));
// 设置新连接为非阻塞
make_socket_non_blocking(conn_sock);
// 将新的连接添加到epoll实例中,监控读事件
event.data.fd = conn_sockfd;
event.events = EPOLLIN;
(epoll_ctl(epollfd, EPOLL_CTL_ADD, conn_sockfd, &event);
} else {
// 处理已经存在的连接上的读事件
char buffer[BUF_SIZE] = {0};
ssize_t bytes_read;
bytes_read = recv(events[n].data.fd, buffer, BUF_SIZE - 1, 0);
if (bytes_read == -1) {
// 如果发生错误或者连接关闭,则关闭socket并从epoll中移除
if (errno != EAGAIN && errno != EWOULDBLOCK) {
perror("recv");
close(events[n].data.fd);
epoll_ctl(epollfd, EPOLL_CTL_DEL, events[n].data.fd, NULL);
}
} else if (bytes_read == 0) {
// 客户端正常关闭连接
printf("Client closed connection\n");
close(events[n].data.fd);
epoll_ctl(epollfd, EPOLL_CTL_DEL, events[n].data.fd, NULL);
} else {
// 处理接收到的数据(这里只是简单地打印)
buffer[bytes_read] = '\0'; // 确保字符串以null结尾
printf("Received: %s", buffer);
// (可选)你可以在这里发送响应回客户端
}
}
}

被折叠的 条评论
为什么被折叠?



