思维导图
基本概念
libevent是一个事件通知库,我们通常需要将它编译成.so动态库或者编译成.a静态库的形式将它引入到我们的项目中来。libevent的官网地址:libevent
libevent对事件的抽象
对网络事件的抽象
采用的是reactor模型中的异步通知的方式
对信号事件的抽象
libevent中对信号的处理也抽象成了一种通知的方式
对定时事件的抽象
libevent中对定时事件抽象成了一种延迟的异步任务
libevent的编译安装步骤
libevent的使用示例
用户自己处理IO
也就是说用户层自己调用accept以及read等IO操作函数来处理IO
#include<stdio.h>
#include<string.h>
#include<errno.h>
#include<unistd.h>
#include "event2/event.h"
void socket_read_cb(int fd, short events, void *arg);
void socket_accept_cb(int fd, short events, void* arg)
{
struct sockaddr_in addr;
socklen_t len = sizeof(addr);
evutil_socket_t clientfd = accept(fd, (struct sockaddr*)&addr, &len);
evutil_make_socket_nonblocking(clientfd);
printf("accept a client %d\n", clientfd);
struct event_base* base = (struct event_base*)arg;
struct event *ev = event_new(NULL, -1, 0, NULL, NULL);
event_assign(ev, base, clientfd, EV_READ | EV_PERSIST,
socket_read_cb, (void*)ev); // epoll_ctl()
event_add(ev, NULL);
}
void socket_read_cb(int fd, short events, void *arg)
{
char msg[4096];
struct event *ev = (struct event*)arg;
int len = read(fd, msg, sizeof(msg) - 1); // 主要职责是做io操作,io检测属性
if( len <= 0 ) // 1. 返回值 -1 异常了 0 连接断开了(服务端该连接的读端关闭)
{//2. errno ewouldblock
printf("client fd:%d disconnect\n", fd);
event_free(ev);
close(fd);
return;
}
msg[len] = '\0';
printf("recv the client msg: %s", msg);
char reply_msg[4096] = "recvieced msg: ";
strcat(reply_msg + strlen(reply_msg), msg);
write(fd, reply_msg, strlen(reply_msg));
}
int socket_listen(int port)
{
int errno_save;
evutil_socket_t listenfd = socket(AF_INET, SOCK_STREAM, 0);
if (listenfd == -1)
return -1;
evutil_make_listen_socket_reuseable(listenfd);
struct sockaddr_in sin;
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = 0;
sin.sin_port = htons(port);
if (bind(listenfd, (struct sockaddr *)&sin, sizeof(sin)) < 0) {
evutil_closesocket(listenfd);
return -1;
}
if (listen(listenfd, 5) < 0) {
evutil_closesocket(listenfd);
return -1;
}
evutil_make_socket_nonblocking(listenfd);
return listenfd;
}
int main(int argc, char** argv)
{
int listenfd = socket_listen(8989);
if (listenfd == -1)
{
printf("socket_listen error\n");
return -1;
}
printf("listen port : %d\n", 8989);
struct event_base* base = event_base_new();
struct event* ev_listen = event_new(base, listenfd, EV_READ | EV_PERSIST,
socket_accept_cb, base);
/*
event_new 等价于
struct event ev_listen;
event_set(&ev_listen, listenfd, EV_READ | EV_PERSIST, socket_accept_cb, base);
event_base_set(base, &ev_listen);
*/
event_add(ev_listen, NULL);
event_base_dispatch(base); // 事件循环
event_del(ev_listen);
event_base_free(base);
return 0;
}
/*
gcc evmain1.c -I./libevent/include -o ev1 -L./libevent/.libs -levent
client:
telnet 127.0.0.1 8989
*/
IO交由libevent底层来处理
accept、read等IO操作函数都已经在libevent底层被封装好了,用户层并不需要关注
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <event.h>
#include <time.h>
#include <signal.h>
#include <event2/listener.h>
#include <event2/bufferevent.h>
#include <event2/buffer.h>
void socket_read_callback(struct bufferevent *bev, void *arg) {
struct evbuffer *evbuf = bufferevent_get_input(bev);
// 界定数据包,解析数据包,分发业务逻辑
char *msg = evbuffer_readln(evbuf, NULL, EVBUFFER_EOL_LF);
if (!msg) return;
printf("server read the data: %s\n", msg);
char reply[4096] = {0};
sprintf(reply, "recvieced msg: %s\n", msg);
// -WRN: 需要自己释放资源
free(msg);
bufferevent_write(bev, reply, strlen(reply));
}
void redis_read_callback(struct bufferevent *bev, void *arg) {
struct evbuffer *evbuf = bufferevent_get_input(bev);
char *msg = evbuffer_readln(evbuf, NULL, EVBUFFER_EOL_CRLF_STRICT);
printf("server read the data from redis: %s\n", msg);
}
void redis_event_callback(struct bufferevent *bev, short events, void *arg) {
if (events & BEV_EVENT_CONNECTED) {
printf("connect successed!\n");
bufferevent_write(bev, "*1\r\n$4\r\nPING\r\n", 14);
} else {
printf("connect failed!\n");
bufferevent_free(bev); // close(fd)
}
}
// void stdio_callback(struct bufferevent *bev, void *arg)
// {
// struct evbuffer *evbuf = bufferevent_get_input(bev);
// char *msg = evbuffer_readln(evbuf, NULL, EVBUFFER_EOL_LF);
// if (!msg) return;
// if (strcmp(msg, "quit") == 0) {
// printf("safe exit!!!\n");
// exit(0);
// return;
// }
// printf("stdio read the data: %s\n", msg);
// }
//连接断开的情况会在这个地方处理
void socket_event_callback(struct bufferevent *bev, short events, void *arg)
{
if (events & BEV_EVENT_EOF) // read = 0
printf("connection closed\n");
else if (events & BEV_EVENT_ERROR) // strerro(errno)
printf("some other error\n");
else if (events & BEV_EVENT_TIMEOUT) //
printf("timeout\n");
//evconnlistener_new_bind函数中设置了 LEV_OPT_CLOSE_ON_FREE属性,
//那么当执行bufferevent_free这个函数的时候就会主动的调用close去关闭socket
bufferevent_free(bev); // close(fd)
}
// int clientfd = accept(listenfd, addr, sz);
void listener_callback(struct evconnlistener *listener, evutil_socket_t fd,
struct sockaddr *sock, int socklen, void *arg)
{
char ip[32] = {0};
evutil_inet_ntop(AF_INET, sock, ip, sizeof(ip)-1);
printf("accept a client fd:%d ip:%s\n", fd, ip);
struct event_base *base = (struct event_base *)arg;
// clientfd read
struct bufferevent *bev = bufferevent_socket_new(base, fd, BEV_OPT_CLOSE_ON_FREE);
// writecb 为什么为空?
bufferevent_setcb(bev, socket_read_callback, NULL, socket_event_callback, NULL);
bufferevent_enable(bev, EV_READ | EV_PERSIST);
}
static void
do_timer(int fd, short events, void* arg) {
struct event * timer = (struct event *)arg;
time_t now = time(NULL);
printf("do_timer %s", (char*)ctime(&now));
event_del(timer);
// struct timeval tv = {1,0};
// event_add(timer, &tv);
}
static void
do_sig_int(int fd, short event, void *arg) {
struct event *si = (struct event*) arg;
event_del(si);
printf("do_sig_int SIGINT\n");
}
int main()
{
struct event_base *base = event_base_new();
struct sockaddr_in sin;
memset(&sin, 0, sizeof(struct sockaddr_in));
sin.sin_family = AF_INET;
sin.sin_port = htons(8989);
/*
1. listenfd
2. bind
3. listen
4. 注册读事件
*/
/*
* evconnlistener_new_bind参数详解:
* base:将evconnlistener对象绑定到具体的reactor对象当中base
* listener_callback:注册读事件的回调函数
* LEV_OPT_REUSEABLE:监听端口设置为可复用
* LEV_OPT_CLOSE_ON_FREE:释放的时候会将listenfd给释放掉
* 10:listen函数的第二个参数backlog(全连接队列的长度)
* sin:五元组
*/
struct evconnlistener *listener =
evconnlistener_new_bind(base, listener_callback, base,
LEV_OPT_REUSEABLE | LEV_OPT_CLOSE_ON_FREE,
10, (struct sockaddr *)&sin,
sizeof(struct sockaddr_in));
printf("listen port : %d\n", 8989);
// struct bufferevent *ioev = bufferevent_socket_new(base, 0, BEV_OPT_CLOSE_ON_FREE);
// bufferevent_setcb(ioev, stdio_callback, NULL, NULL, base);
// bufferevent_enable(ioev, EV_READ | EV_PERSIST);
// memset(&sin, 0, sizeof(sin));
// sin.sin_addr.s_addr = inet_addr("127.0.0.1");
// sin.sin_family = AF_INET;
// sin.sin_port = htons(6379);
// struct bufferevent *rdsev = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE);
// bufferevent_setcb(rdsev, redis_read_callback, NULL, redis_event_callback, base);
// bufferevent_socket_connect(rdsev, (struct sockaddr*)&sin, sizeof(sin));
// struct event evtimer;
// struct timeval tv = {1,0};
// event_set(&evtimer, -1, 0, do_timer, &evtimer);
// event_base_set(base, &evtimer);
// event_add(&evtimer, &tv);
// struct event evint;
// event_set(&evint, SIGINT, EV_SIGNAL, do_sig_int, &evint);
// event_base_set(base, &evint);
// event_add(&evint, NULL);
event_base_dispatch(base);
evconnlistener_free(listener);
event_base_free(base);
// bufferevent_free(rdsev);
return 0;
}
/*
gcc evmain2.c -I./libevent/include -o ev2 -L./libevent/.libs -levent
client:
telnet 127.0.0.1 8989
*/
源码分析
reactor对象的封装
struct event_base {
/** Function pointers and other data to describe this event_base's
* backend. */
/*
* evsel和evbase指向的是使用的是具体的哪一种IO多路复用:
* poll/kqueue/event_ports/select/poll/epoll
*/
const struct eventop *evsel;//封装了调用不同IO多路复用的接口
/** Pointer to backend-specific data. */
void *evbase;
/** List of changes to tell backend about at next dispatch. Only used
* by the O(1) backends. */
/*
* changelist代表了select/epoll中需要监听的读事件或者写事件的数组
*/
struct event_changelist changelist;
/** Function pointers used to describe the backend that this event_base
* uses for signals */
const struct eventop *evsigsel;//信号处理
/** Data to implement the common signal handler code. */
struct evsig_info sig;//信号
/** Number of virtual events */
int virtual_event_count;
/** Maximum number of virtual events active */
int virtual_event_count_max;
/** Number of total events added to this event_base */
int event_count;
/** Maximum number of total events added to this event_base */
int event_count_max;
/** Number of total events active in this event_base */
int event_count_active;
/** Maximum number of total events active in this event_base */
int event_count_active_max;
/** Set if we should terminate the loop once we're done processing
* events. */
int event_gotterm;
/** Set if we should terminate the loop immediately */
int event_break;
/** Set if we should start a new instance of the loop immediately. */
int event_continue;
/** The currently running priority of events */
int event_running_priority;
/** Set if we're running the event_base_loop function, to prevent
* reentrant invocation. */
int running_loop;
/** Set to the number of deferred_cbs we've made 'active' in the
* loop. This is a hack to prevent starvation; it would be smarter
* to just use event_config_set_max_dispatch_interval's max_callbacks
* feature */
int n_deferreds_queued;
/* Active event management. */
/** An array of nactivequeues queues for active event_callbacks (ones
* that have triggered, and whose callbacks need to be called). Low
* priority numbers are more important, and stall higher ones.
*/
/*
* 被触发的队列:比方说通过epoll_wait拿到的一些就绪事件都会被放到activequeues中,
* nactivequeues就代表了activequeues就绪队列的长度
*/
struct evcallback_list *activequeues;
/** The length of the activequeues array */
int nactivequeues;
/** A list of event_callbacks that should become active the next time
* we process events, but not this time. */
/*
* 延迟处理的队列,即需要延迟处理的事件放入到延迟队列中
*/
struct evcallback_list active_later_queue;
/* common timeout logic */
/** An array of common_timeout_list* for all of the common timeout
* values we know. */
struct common_timeout_list **common_timeout_queues;
/** The number of entries used in common_timeout_queues */
int n_common_timeouts;
/** The total size of common_timeout_queues. */
int n_common_timeouts_allocated;
/** Mapping from file descriptors to enabled (added) events */
/*
* 网络事件
*/
struct event_io_map io;
/** Mapping from signal numbers to enabled (added) events. */
/*
* 信号事件
*/
struct event_signal_map sigmap;
/** Priority queue of events with timeouts. */
/*
* 时间事件
*/
struct min_heap timeheap;
/** Stored timeval: used to avoid calling gettimeofday/clock_gettime
* too often. */
struct timeval tv_cache;
struct evutil_monotonic_timer monotonic_timer;
/** Difference between internal time (maybe from clock_gettime) and
* gettimeofday. */
struct timeval tv_clock_diff;
/** Second in which we last updated tv_clock_diff, in monotonic time. */
time_t last_updated_clock_diff;
#ifndef EVENT__DISABLE_THREAD_SUPPORT
/* threading support */
/** The thread currently running the event_loop for this base */
unsigned long th_owner_id;
/** A lock to prevent conflicting accesses to this event_base */
void *th_base_lock;
/** A condition that gets signalled when we're done processing an
* event with waiters on it. */
void *current_event_cond;
/** Number of threads blocking on current_event_cond. */
int current_event_waiters;
#endif
/** The event whose callback is executing right now */
struct event_callback *current_event;
#ifdef _WIN32
/** IOCP support structure, if IOCP is enabled. */
struct event_iocp_port *iocp;
#endif
/** Flags that this base was configured with */
enum event_base_config_flag flags;
struct timeval max_dispatch_time;
int max_dispatch_callbacks;
int limit_callbacks_after_prio;
/* Notify main thread to wake up break, etc. */
/** True if the base already has a pending notify, and we don't need
* to add any more. */
int is_notify_pending;
/** A socketpair used by some th_notify functions to wake up the main
* thread. */
evutil_socket_t th_notify_fd[2];
/** An event used by some th_notify functions to wake up the main
* thread. */
struct event th_notify;
/** A function used to wake up the main thread from another thread. */
int (*th_notify_fn)(struct event_base *base);
/** Saved seed for weak random number generator. Some backends use
* this to produce fairness among sockets. Protected by th_base_lock. */
struct evutil_weakrand_state weakrand_seed;
/** List of event_onces that have not yet fired. */
LIST_HEAD(once_event_list, event_once) once_events;
};
struct eventop
这个结构体封装了调用IO多路复用的接口
/** Structure to define the backend of a given event_base. */
struct eventop {
/** The name of this backend. */
const char *name;
/** Function to set up an event_base to use this backend. It should
* create a new structure holding whatever information is needed to
* run the backend, and return it. The returned pointer will get
* stored by event_init into the event_base.evbase field. On failure,
* this function should return NULL. */
//具体初始化的是哪个IO多路复用的接口
void *(*init)(struct event_base *);
/** Enable reading/writing on a given fd or signal. 'events' will be
* the events that we're trying to enable: one or more of EV_READ,
* EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that
* were enabled on this fd previously. 'fdinfo' will be a structure
* associated with the fd by the evmap; its size is defined by the
* fdinfo field below. It will be set to 0 the first time the fd is
* added. The function should return 0 on success and -1 on error.
*/
//具体添加的是哪个IO多路复用的接口
int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
/** As "add", except 'events' contains the events we mean to disable. */
//具体删除的是哪个IO多路复用的接口
int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
/** Function to implement the core of an event loop. It must see which
added events are ready, and cause event_active to be called for each
active event (usually via event_io_active or such). It should
return 0 on success and -1 on error.
*/
//具体分发的是哪个IO多路复用的接口
int (*dispatch)(struct event_base *, struct timeval *);
/** Function to clean up and free our data from the event_base. */
void (*dealloc)(struct event_base *);
/** Flag: set if we need to reinitialize the event base after we fork.
*/
int need_reinit;
/** Bit-array of supported event_method_features that this backend can
* provide. */
enum event_method_feature features;
/** Length of the extra information we should record for each fd that
has one or more active events. This information is recorded
as part of the evmap entry for each fd, and passed as an argument
to the add and del functions above.
*/
size_t fdinfo_len;
};
reactor对象的封装小结
reactor对象主要包含了:
- IO多路复用的对象
- 信号集的处理
- 触发的队列
- 延迟处理的队列
事件对象的封装
event对象的封装
- 如果用户层自己处理IO,那么通常就会使用的是event对象
struct event {
/*
* 事件触发的回调函数
*/
struct event_callback ev_evcallback;
/* for managing timeouts */
union {
TAILQ_ENTRY(event) ev_next_with_common_timeout;
int min_heap_idx;//定时事件就要记录最小堆的index
} ev_timeout_pos;
evutil_socket_t ev_fd;//如果是定时事件就需要记录timerfd
//reactor:one eventloop per thread每一个线程中都会有一个事件循环
struct event_base *ev_base;//指向具体所属的reactor对象
union {
/* used for io events */
/*
* 网络事件
*/
struct {
LIST_ENTRY (event) ev_io_next;
struct timeval ev_timeout;
} ev_io;
/* used by signal events */
/*
* 信号事件
*/
struct {
LIST_ENTRY (event) ev_signal_next;
short ev_ncalls;
/* Allows deletes in callback */
short *ev_pncalls;
} ev_signal;
} ev_;
short ev_events; //具体注册的事件:比如说注册的读事件、注册的写事件
short ev_res; /* result passed to event callback */
struct timeval ev_timeout;//事件超时
};
bufferevent对象的封装
- bufferevent对象的封装是在event对象的基础上,多封装了一层缓冲区
- bufferevent将IO操作封装在了底层,用户无需关注IO操作(如accept、read、write等)
/**
Shared implementation of a bufferevent.
This type is exposed only because it was exposed in previous versions,
and some people's code may rely on manipulating it. Otherwise, you
should really not rely on the layout, size, or contents of this structure:
it is fairly volatile, and WILL change in future versions of the code.
**/
struct bufferevent {
/** Event base for which this bufferevent was created. */
/*
* 当前的bufferevent具体属于哪个reactor对象
*/
struct event_base *ev_base;
/** Pointer to a table of function pointers to set up how this
bufferevent behaves. */
/*
* bufferevent的具体的操作
*/
const struct bufferevent_ops *be_ops;
/** A read event that triggers when a timeout has happened or a socket
is ready to read data. Only used by some subtypes of
bufferevent. */
struct event ev_read;
/** A write event that triggers when a timeout has happened or a socket
is ready to write data. Only used by some subtypes of
bufferevent. */
struct event ev_write;
/** An input buffer. Only the bufferevent is allowed to add data to
this buffer, though the user is allowed to drain it. */
struct evbuffer *input;//读缓冲区
/** An input buffer. Only the bufferevent is allowed to drain data
from this buffer, though the user is allowed to add it. */
struct evbuffer *output;//写缓冲区
/*
* 读事件的低水平线:读缓冲区中有多少数据才会触发回调
* 这里默认的读事件的低水平线设置为0,所以每次只要来了读事件就都会触发读回调readcb
*/
/*
* 读事件的高水平线:如果读缓冲区达到了多大的数据,那么就要关闭读事件(也就是说当前bufferevent就不会再去监听读事件了)
* 或者说读缓冲区内的数据比较多并且超过阈值的时候,bufferevent就不再处理读事件,内核中的缓冲区中的数据不再拷贝到用户态缓冲区了
*/
struct event_watermark wm_read;
/*
* 写事件的低水平线,注意写事件是只有低水平,没有高水平的
* 写事件的低水平线的默认值是0,也就是说当写缓冲区为空的时候会回调writecb
* 也就是说我们通常是不需要设置写回调
*/
/*
* writecb的应用场景:
* 1、有一个业务逻辑需要发送出去88KB的数据
* 2、bufferevent是会先将这88KB的数据写入到用户态缓冲区。最终正常流程是会调用write将这88KB数据发送出去,然后清空缓冲区中的数据
* 3、用户态将这88KB数据全部发送完成(也就是说用户态缓冲区没有数据的时候)才会回调writecb这个方法
*/
struct event_watermark wm_write;
//读事件的回调
bufferevent_data_cb readcb;
//低水平触发的回调,这里需要注意的是bufferevent内部会处理写事件发送出去,业务逻辑中将这个参数置为NULL即可,不需要设置写回调
bufferevent_data_cb writecb;
/* This should be called 'eventcb', but renaming it would break
* backward compatibility */
/*
* 把所有的错误事件(所有的被动关闭连接或者其他异常),都是通过errorcb回调函数来处理
* 这里就起到了解耦合的作用,连接的被动断开都是在errorcb这个回调函数中处理的
*/
bufferevent_event_cb errorcb;
void *cbarg;
struct timeval timeout_read;
struct timeval timeout_write;
/** Events that are currently enabled: currently EV_READ and EV_WRITE
are supported. */
short enabled;
};
bufferevent具体操作的封装
/**
Implementation table for a bufferevent: holds function pointers and other
information to make the various bufferevent types work.
*/
struct bufferevent_ops {
/** The name of the bufferevent's type. */
const char *type;
/** At what offset into the implementation type will we find a
bufferevent structure?
Example: if the type is implemented as
struct bufferevent_x {
int extra_data;
struct bufferevent bev;
}
then mem_offset should be offsetof(struct bufferevent_x, bev)
*/
off_t mem_offset;
/** Enables one or more of EV_READ|EV_WRITE on a bufferevent. Does
not need to adjust the 'enabled' field. Returns 0 on success, -1
on failure.
*/
//这个就等同于epoll_ctrl中的ADD操作
int (*enable)(struct bufferevent *, short);
/** Disables one or more of EV_READ|EV_WRITE on a bufferevent. Does
not need to adjust the 'enabled' field. Returns 0 on success, -1
on failure.
*/
//这个就等同于epoll_ctl中的MOD操作
int (*disable)(struct bufferevent *, short);
/** Detatches the bufferevent from related data structures. Called as
* soon as its reference count reaches 0. */
//这个就等同于epoll_ctl中的DEL操作
void (*unlink)(struct bufferevent *);
/** Free any storage and deallocate any extra data or structures used
in this implementation. Called when the bufferevent is
finalized.
*/
void (*destruct)(struct bufferevent *);
/** Called when the timeouts on the bufferevent have changed.*/
int (*adj_timeouts)(struct bufferevent *);
/** Called to flush data. */
int (*flush)(struct bufferevent *, short, enum bufferevent_flush_mode);
/** Called to access miscellaneous fields. */
int (*ctrl)(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
};
struct evbuffer
如果我们使用bufferevent对象的话,就不需要关注具体的IO操作函数(比如说accept、read、write等)。主要原因是因为bufferevent结构体中封装了用户态的缓冲区(struct evbuffer *input/output)
evconnlistener对象的封装
- evconnlistener对象封装出来是专门用于处理listenfd的。
- evconnlistener将IO操作封装在了底层,用户无需关注IO操作(如accept、read、write等)
事件循环的封装
用户自己封装的reactor模型
libevent封装的reactor
int
event_base_loop(struct event_base *base, int flags)
{
//拿出IO多路复用的所有操作:add,del等事件操作相关的接口
const struct eventop *evsel = base->evsel;
struct timeval tv;
struct timeval *tv_p;
int res, done, retval = 0;
/* Grab the lock. We will release it inside evsel.dispatch, and again
* as we invoke user callbacks. */
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
if (base->running_loop) {
event_warnx("%s: reentrant invocation. Only one event_base_loop"
" can run on each event_base at once.", __func__);
EVBASE_RELEASE_LOCK(base, th_base_lock);
return -1;
}
base->running_loop = 1;
clear_time_cache(base);
if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
evsig_set_base_(base);
done = 0;
#ifndef EVENT__DISABLE_THREAD_SUPPORT
base->th_owner_id = EVTHREAD_GET_ID();
#endif
base->event_gotterm = base->event_break = 0;
while (!done) {
base->event_continue = 0;
base->n_deferreds_queued = 0;
/* Terminate the loop if we have been asked to */
if (base->event_gotterm) {
break;
}
if (base->event_break) {
break;
}
tv_p = &tv;//处理定时任务
if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
//找出定时任务中最近将要被触发的任务(找到最小的定时任务被触发的时间)
timeout_next(base, &tv_p);
} else {
/*
* if we have active events, we just poll new events
* without waiting.
*/
evutil_timerclear(&tv);
}
/* If we have no events, we just exit */
if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
event_debug(("%s: no events registered.", __func__));
retval = 1;
goto done;
}
event_queue_make_later_events_active(base);
clear_time_cache(base);
//这里就会调用epoll_wait,前面的时间参数tv_p就会被传入到第二个参数对象中
//返回的res就是返回的就绪事件的个数
res = evsel->dispatch(base, tv_p);
if (res == -1) {
event_debug(("%s: dispatch returned unsuccessfully.",
__func__));
retval = -1;
goto done;
}
update_time_cache(base);
//收集定时事件(注意这里并不会处理定时事件、只会对定时任务做一个收集)
//将就绪的定时任务收集到activequeues当中
timeout_process(base);
if (N_ACTIVE_CALLBACKS(base)) {
//开始处理activequeues中(排好序的队列)的事件了
int n = event_process_active(base);
if ((flags & EVLOOP_ONCE)
&& N_ACTIVE_CALLBACKS(base) == 0
&& n != 0)
done = 1;
} else if (flags & EVLOOP_NONBLOCK)
done = 1;
}
event_debug(("%s: asked to terminate loop.", __func__));
done:
clear_time_cache(base);
base->running_loop = 0;
EVBASE_RELEASE_LOCK(base, th_base_lock);
return (retval);
}
将收集到的定时任务放入到activequeues中
event_queue_insert_active的实现
这个函数的主要作用就是将就绪的任务放入到activequeues中
libevent的综合代码示例
ev做为服务端,客户端接入的实验
代码实现
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <event.h>
#include <event2/listener.h>
// typedef void (*bufferevent_event_cb)(struct bufferevent *bev, short what, void *ctx);
void
connected_cb(struct bufferevent *bev, short what, void *ctx) {
if (what == BEV_EVENT_CONNECTED) {
printf("connect redis-server successed\n");
} else {
printf("connect redis-server failed\n");
}
}
// typedef void (*evconnlistener_cb)(struct evconnlistener *, evutil_socket_t, struct sockaddr *, int socklen, void *);
void
accept_cb(struct evconnlistener *listen, evutil_socket_t fd, struct sockaddr *sock, int socklen, void *arg) {
char ip[32] = {0};
evutil_inet_ntop(AF_INET, sock, ip, sizeof(ip)-1);
printf("accept a client fd:%d ip:%s\n", fd, ip);
struct event_base *base = (struct event_base*)arg;
}
int main() {
struct event_base * base = event_base_new();
struct sockaddr_in sin = {0};
sin.sin_family = AF_INET;
sin.sin_port = htons(8888);
struct evconnlistener *listen = evconnlistener_new_bind(base, accept_cb, base,
LEV_OPT_REUSEABLE | LEV_OPT_CLOSE_ON_FREE, 512, (struct sockaddr*)&sin, sizeof(sin));
// struct sockaddr_in sin = {0};
// sin.sin_addr.s_addr = inet_addr("127.0.0.1");
// sin.sin_family = AF_INET;
// sin.sin_port = htons(6379);
// struct bufferevent *ev = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE);
// bufferevent_socket_connect(ev, (struct sockaddr *)&sin, sizeof(sin));
// bufferevent_setcb(ev, NULL, NULL, connected_cb, NULL);
event_base_dispatch(base);//这里会有个死循环,一直处理就绪的网络事件
evconnlistener_free(listen);
event_base_free(base);
return 0;
}
运行结果
这里的实验我们并没有调用accept函数,这里我们使用的是libevent库中的evconnlistener这个对象,在这个evconnlistener这个对象中就对接入的客户端进行了accept的处理了,用户端无需关注。
ev做为客户端,主动去连接redis
代码实现
#include <event2/util.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <event.h>
#include <event2/listener.h>
// typedef void (*bufferevent_event_cb)(struct bufferevent *bev, short what, void *ctx);
void
connected_cb(struct bufferevent *bev, short what, void *ctx) {
if (what == BEV_EVENT_CONNECTED) {
printf("connect redis-server successed\n");
} else {
printf("connect redis-server failed\n");
}
}
// typedef void (*evconnlistener_cb)(struct evconnlistener *, evutil_socket_t, struct sockaddr *, int socklen, void *);
void
accept_cb(struct evconnlistener *listen, evutil_socket_t fd, struct sockaddr *sock, int socklen, void *arg) {
char ip[32] = {0};
evutil_inet_ntop(AF_INET, sock, ip, sizeof(ip)-1);
printf("accept a client fd:%d ip:%s\n", fd, ip);
struct event_base *base = (struct event_base*)arg;
evutil_closesocket(fd);//telnet连接建立成功之后就关闭与telnet之间的连接
struct sockaddr_in sin = {0};//建立与redis之间的连接
sin.sin_addr.s_addr = inet_addr("127.0.0.1");
sin.sin_family = AF_INET;
sin.sin_port = htons(6379);
struct bufferevent *ev = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE);
bufferevent_socket_connect(ev, (struct sockaddr *)&sin, sizeof(sin));
bufferevent_setcb(ev, NULL, NULL, connected_cb, NULL);
}
int main() {
struct event_base * base = event_base_new();
struct sockaddr_in sin = {0};
sin.sin_family = AF_INET;
sin.sin_port = htons(8888);
struct evconnlistener *listen = evconnlistener_new_bind(base, accept_cb, base,
LEV_OPT_REUSEABLE | LEV_OPT_CLOSE_ON_FREE, 512, (struct sockaddr*)&sin, sizeof(sin));
// struct sockaddr_in sin = {0};
// sin.sin_addr.s_addr = inet_addr("127.0.0.1");
// sin.sin_family = AF_INET;
// sin.sin_port = htons(6379);
// struct bufferevent *ev = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE);
// bufferevent_socket_connect(ev, (struct sockaddr *)&sin, sizeof(sin));
// bufferevent_setcb(ev, NULL, NULL, connected_cb, NULL);
event_base_dispatch(base);//这里会有个死循环,一直处理就绪的网络事件
evconnlistener_free(listen);
event_base_free(base);
return 0;
}
运行结果
启动telnet:
通过运行结果可以看出来ev客户端连接redis服务器成功:
libevent中高效的网络缓冲区
- 使用了动态的连续空间(与c++ stl中deque双端队列的实现比较相像)
-
- 这样的设计就保证了动态扩容的方便
- evbuffer结构具体的实现
-
- first指针
- last指针
- last_with_datap:指向了最后一个有数据的链表
- total_len
- 链表中的misalign元素:有效地解决了buffer中所指向的数组中的数据的频繁挪动的问题。
-
- misalign元素决定了有效数据的起始地址
- 链表中的off元素:记录了当前buffer所指向的数组中剩余的长度(或者说缓冲区中有效数据的长度)
-
- off元素决定了有效数据的长度
- 链表中的buffer_len元素:整个buffer的长度
- 注意chainbuffer可能存在的问题:
-
- 需要读写的数据在两个(或者多个)连续的buffer缓冲区中,也就是数据被分割了。数据分割就会引发多次的系统调用(在内核中会引起中断上下文的切换)。
- 解决chainbuffer可能会出现的数据被分割在不连续的buffer缓冲区(而导致的引发多次系统调用的问题)的方案是:使用readv/writev系统调用(可以读写不连续的内存空间),从而减少了系统调用。
-
-
- readv:把内核态连续的内存空间,读取到用户态不连续的内存空间
- writev:把用户态不连续的内存空间,写入到内核态连续的内存空间
-
evbuffer在libevent中的使用示例
readv的使用
/* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
* as howmuch? */
int
evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
{
struct evbuffer_chain **chainp;
int n;
int result;
#ifdef USE_IOVEC_IMPL
int nvecs, i, remaining;
#else
struct evbuffer_chain *chain;
unsigned char *p;
#endif
EVBUFFER_LOCK(buf);
if (buf->freeze_end) {
result = -1;
goto done;
}
n = get_n_bytes_readable_on_socket(fd);
if (n <= 0 || n > EVBUFFER_MAX_READ)
n = EVBUFFER_MAX_READ;
if (howmuch < 0 || howmuch > n)
howmuch = n;
#ifdef USE_IOVEC_IMPL
/* Since we can use iovecs, we're willing to use the last
* NUM_READ_IOVEC chains. */
if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) {
result = -1;
goto done;
} else {
IOV_TYPE vecs[NUM_READ_IOVEC];
#ifdef EVBUFFER_IOVEC_IS_NATIVE_
//nvecs个不连续的空间块
nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs,
NUM_READ_IOVEC, &chainp, 1);
#else
/* We aren't using the native struct iovec. Therefore,
we are on win32. */
struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2,
&chainp, 1);
for (i=0; i < nvecs; ++i)
WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
#endif
#ifdef _WIN32
{
DWORD bytesRead;
DWORD flags=0;
if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
/* The read failed. It might be a close,
* or it might be an error. */
if (WSAGetLastError() == WSAECONNABORTED)
n = 0;
else
n = -1;
} else
n = bytesRead;
}
#else
//把内核态连续的空间读取到用户态不连续的内存空间中去
n = readv(fd, vecs, nvecs);
#endif
}
#else /*!USE_IOVEC_IMPL*/
/* If we don't have FIONREAD, we might waste some space here */
/* XXX we _will_ waste some space here if there is any space left
* over on buf->last. */
if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
result = -1;
goto done;
}
/* We can append new data at this point */
p = chain->buffer + chain->misalign + chain->off;
#ifndef _WIN32
n = read(fd, p, howmuch);
#else
n = recv(fd, p, howmuch, 0);
#endif
#endif /* USE_IOVEC_IMPL */
if (n == -1) {
result = -1;
goto done;
}
if (n == 0) {
result = 0;
goto done;
}
#ifdef USE_IOVEC_IMPL
remaining = n;
for (i=0; i < nvecs; ++i) {
/* can't overflow, since only mutable chains have
* huge misaligns. */
size_t space = (size_t) CHAIN_SPACE_LEN(*chainp);
/* XXXX This is a kludge that can waste space in perverse
* situations. */
if (space > EVBUFFER_CHAIN_MAX)
space = EVBUFFER_CHAIN_MAX;
if ((ev_ssize_t)space < remaining) {
(*chainp)->off += space;
remaining -= (int)space;
} else {
(*chainp)->off += remaining;
buf->last_with_datap = chainp;
break;
}
chainp = &(*chainp)->next;
}
#else
chain->off += n;
advance_last_with_data(buf);
#endif
buf->total_len += n;
buf->n_add_for_cb += n;
/* Tell someone about changes in this buffer */
evbuffer_invoke_callbacks_(buf);
result = n;
done:
EVBUFFER_UNLOCK(buf);
return result;
}
writev的使用
#ifdef USE_IOVEC_IMPL
static inline int
evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
ev_ssize_t howmuch)
{
IOV_TYPE iov[NUM_WRITE_IOVEC];
struct evbuffer_chain *chain = buffer->first;
int n, i = 0;
if (howmuch < 0)
return -1;
ASSERT_EVBUFFER_LOCKED(buffer);
/* XXX make this top out at some maximal data length? if the
* buffer has (say) 1MB in it, split over 128 chains, there's
* no way it all gets written in one go. */
while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
#ifdef USE_SENDFILE
/* we cannot write the file info via writev */
if (chain->flags & EVBUFFER_SENDFILE)
break;
#endif
iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);
if ((size_t)howmuch >= chain->off) {
/* XXXcould be problematic when windows supports mmap*/
iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
howmuch -= chain->off;
} else {
/* XXXcould be problematic when windows supports mmap*/
iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;
break;
}
chain = chain->next;
}
if (! i)
return 0;
#ifdef _WIN32
{
DWORD bytesSent;
if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
n = -1;
else
n = bytesSent;
}
#else
n = writev(fd, iov, i);
#endif
return (n);
}
#endif
struct evbuffer结构
struct evbuffer {
/** The first chain in this buffer's linked list of chains. */
struct evbuffer_chain *first;
/** The last chain in this buffer's linked list of chains. */
struct evbuffer_chain *last;
/** Pointer to the next pointer pointing at the 'last_with_data' chain.
*
* To unpack:
*
* The last_with_data chain is the last chain that has any data in it.
* If all chains in the buffer are empty, it is the first chain.
* If the buffer has no chains, it is NULL.
*
* The last_with_datap pointer points at _whatever 'next' pointer_
* pointing at the last_with_data chain. If the last_with_data chain
* is the first chain, or it is NULL, then the last_with_datap pointer
* is &buf->first.
*/
struct evbuffer_chain **last_with_datap;
/** Total amount of bytes stored in all chains.*/
size_t total_len;
/** Number of bytes we have added to the buffer since we last tried to
* invoke callbacks. */
size_t n_add_for_cb;
/** Number of bytes we have removed from the buffer since we last
* tried to invoke callbacks. */
size_t n_del_for_cb;
#ifndef EVENT__DISABLE_THREAD_SUPPORT
/** A lock used to mediate access to this buffer. */
void *lock;
#endif
/** True iff we should free the lock field when we free this
* evbuffer. */
unsigned own_lock : 1;
/** True iff we should not allow changes to the front of the buffer
* (drains or prepends). */
unsigned freeze_start : 1;
/** True iff we should not allow changes to the end of the buffer
* (appends) */
unsigned freeze_end : 1;
/** True iff this evbuffer's callbacks are not invoked immediately
* upon a change in the buffer, but instead are deferred to be invoked
* from the event_base's loop. Useful for preventing enormous stack
* overflows when we have mutually recursive callbacks, and for
* serializing callbacks in a single thread. */
unsigned deferred_cbs : 1;
#ifdef _WIN32
/** True iff this buffer is set up for overlapped IO. */
unsigned is_overlapped : 1;
#endif
/** Zero or more EVBUFFER_FLAG_* bits */
ev_uint32_t flags;
/** Used to implement deferred callbacks. */
struct event_base *cb_queue;
/** A reference count on this evbuffer. When the reference count
* reaches 0, the buffer is destroyed. Manipulated with
* evbuffer_incref and evbuffer_decref_and_unlock and
* evbuffer_free. */
int refcnt;
/** A struct event_callback handle to make all of this buffer's callbacks
* invoked from the event loop. */
struct event_callback deferred;
/** A doubly-linked-list of callback functions */
LIST_HEAD(evbuffer_cb_queue, evbuffer_cb_entry) callbacks;
/** The parent bufferevent object this evbuffer belongs to.
* NULL if the evbuffer stands alone. */
struct bufferevent *parent;
};
为什么需要网络缓冲区
buffer缓冲区设计的三种类型
- 固定数组、固定长度
-
- 缺点:
-
-
- 数组的长度一次性分配,就会一次性确定了缓冲区处理数据包的能力。也就是说这个数组的高水平线已经确定了并且无法改变,没有动态伸缩的能力。
- 并且固定数组还要频繁挪动数据(比方说当前缓冲区中有1.5个数据包,现在读出来了一个数据包,还有0.5个数据包残留。那么这个残留的0.5个数据包就会需要被挪动到固定数组的最前端)
-
- ringbuffer
-
- 缺点:同固定数组一样,可伸缩性特别差;但是ringbuffer解决了固定数组中数据被频繁地挪动的问题。
- chainbuffer:解决了固定数据以及ringbuffer中可伸缩性的问题。libevent中高效的网络缓冲区就是采用了chainbuffer这种设计模式