event的配置解析相关的代码已经分析完毕。下面分析一下另一个流程中event模块的实现。即在nginx创建进程,并且开始执行进程里的代码的时候。入口函数是ngx_worker_process_cycle。
for (i = 0; ngx_modules[i]; i++) {
if (ngx_modules[i]->init_process) {
if (ngx_modules[i]->init_process(cycle) == NGX_ERROR) {
/* fatal */
exit(2);
}
}
}
event模块的子模块ngx_event_core_module实现了该函数。该函数初始化connection结构体,把监听的fd加到事件驱动模块,注册读事件回调函数等。
// worker进程初始化时执行的函数,首先初始化选择的事件驱动模块,然后往里面增加监听套接字可读事件
static ngx_int_t ngx_event_process_init(ngx_cycle_t *cycle)
{
ngx_uint_t m, i;
ngx_socket_t fd;
ngx_event_t *rev, *wev;
ngx_listening_t *s;
ngx_connection_t *c;
ngx_core_conf_t *ccf;
ngx_event_conf_t *ecf;
ngx_event_module_t *module;
#if (WIN32)
ngx_iocp_conf_t *iocpcf;
#endif
ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module);
ecf = ngx_event_get_conf(cycle->conf_ctx, ngx_event_core_module);
if (ngx_accept_mutex_ptr && ccf->worker_processes > 1 && ecf->accept_mutex)
{
ngx_accept_mutex = ngx_accept_mutex_ptr;
ngx_accept_mutex_held = 0;
ngx_accept_mutex_delay = ecf->accept_mutex_delay;
}
#if (NGX_THREADS)
if (!(ngx_posted_events_mutex = ngx_mutex_init(cycle->log, 0))) {
return NGX_ERROR;
}
#endif
// 初始化时间红黑树
if (ngx_event_timer_init(cycle->log) == NGX_ERROR) {
return NGX_ERROR;
}
cycle->connection_n = ecf->connections;
for (m = 0; ngx_modules[m]; m++) {
if (ngx_modules[m]->type != NGX_EVENT_MODULE) {
continue;
}
// 根据配置使用相应的模块,use在处理use配置时赋值
if (ngx_modules[m]->ctx_index == ecf->use) {
module = ngx_modules[m]->ctx;
// 初始化选择的模块,比如执行epoll的init函数
if (module->actions.init(cycle) == NGX_ERROR) {
/* fatal */
exit(2);
}
break;
}
}
// 根据配置分配大小为connections的数组
cycle->connections = ngx_alloc(sizeof(ngx_connection_t) * ecf->connections,
cycle->log);
if (cycle->connections == NULL) {
return NGX_ERROR;
}
c = cycle->connections;
// 初始化字段
for (i = 0; i < cycle->connection_n; i++) {
c[i].fd = (ngx_socket_t) -1;
c[i].data = NULL;
#if (NGX_THREADS)
c[i].lock = 0;
#endif
}
// 分配大小为connections的读写事件数组
cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * ecf->connections,
cycle->log);
if (cycle->read_events == NULL) {
return NGX_ERROR;
}
rev = cycle->read_events;
for (i = 0; i < cycle->connection_n; i++) {
rev[i].closed = 1;
#if (NGX_THREADS)
rev[i].lock = &c[i].lock;
rev[i].own_lock = &c[i].lock;
#endif
}
cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * ecf->connections,
cycle->log);
if (cycle->write_events == NULL) {
return NGX_ERROR;
}
wev = cycle->write_events;
for (i = 0; i < cycle->connection_n; i++) {
wev[i].closed = 1;
#if (NGX_THREADS)
wev[i].lock = &c[i].lock;
wev[i].own_lock = &c[i].lock;
#endif
}
/* for each listening socket */
// 初始化connection结构体,注册监听的fd到事件驱动模块,比如epoll
s = cycle->listening.elts;
for (i = 0; i < cycle->listening.nelts; i++) {
fd = s[i].fd;
#if (WIN32)
/*
* Winsock assignes a socket number divisible by 4
* so to find a connection we divide a socket number by 4.
*/
fd /= 4;
#endif
c = &cycle->connections[fd];
rev = &cycle->read_events[fd];
wev = &cycle->write_events[fd];
ngx_memzero(c, sizeof(ngx_connection_t));
ngx_memzero(rev, sizeof(ngx_event_t));
// 把监听的fd和listening结构体挂载到connection结构体中
c->fd = s[i].fd;
c->listening = &s[i];
c->ctx = s[i].ctx;
c->servers = s[i].servers;
c->log = s[i].log;
c->read = rev;
/* required by iocp in "c->write->active = 1" */
c->write = wev;
/* required by poll */
wev->index = NGX_INVALID_INDEX;
rev->log = c->log;
// connection结构体
rev->data = c;
rev->index = NGX_INVALID_INDEX;
rev->available = 0;
rev->accept = 1;
#if (HAVE_DEFERRED_ACCEPT)
rev->deferred_accept = s[i].deferred_accept;
#endif
//
if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) {
if (s[i].remain) {
/*
* delete the old accept events that were bound to
* the old cycle read events array
*/
if (ngx_del_event(&cycle->old_cycle->read_events[fd],
NGX_READ_EVENT, NGX_CLOSE_EVENT) == NGX_ERROR)
{
return NGX_ERROR;
}
cycle->old_cycle->connections[fd].fd = (ngx_socket_t) -1;
}
}
// 忽略
#if (WIN32)
if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
rev->event_handler = &ngx_event_acceptex;
if (ngx_add_event(rev, 0, NGX_IOCP_ACCEPT) == NGX_ERROR) {
return NGX_ERROR;
}
iocpcf = ngx_event_get_conf(cycle->conf_ctx, ngx_iocp_module);
if (ngx_event_post_acceptex(&s[i], iocpcf->post_acceptex)
== NGX_ERROR)
{
return NGX_ERROR;
}
} else {
rev->event_handler = &ngx_event_accept;
if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) {
return NGX_ERROR;
}
}
#else
// 设置监听套接字的可读事件回调,即监听有连接到来
rev->event_handler = &ngx_event_accept;
if (ngx_accept_mutex) {
continue;
}
// 使用的是ngx_rtsig_module事件驱动模块
if (ngx_event_flags & NGX_USE_RTSIG_EVENT) {
if (ngx_add_conn(c) == NGX_ERROR) {
return NGX_ERROR;
}
} else {
// 加入读事件,等待事件到来执行刚才注册的ngx_event_accept函数
if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) {
return NGX_ERROR;
}
}
#endif
}
return NGX_OK;
}
static int ngx_epoll_add_event(ngx_event_t *ev, int event, u_int flags)
{
int op, prev;
ngx_event_t *e;
ngx_connection_t *c;
struct epoll_event ee;
// connection结构体
c = ev->data;
// 读事件或者写事件
if (event == NGX_READ_EVENT) {
// 指向写事件结构
e = c->write;
// 保存之前的事件类型,如果e是active的话,见下面分析
prev = EPOLLOUT;
#if (NGX_READ_EVENT != EPOLLIN)
// 本次是读事件
event = EPOLLIN;
#endif
} else {
e = c->read;
prev = EPOLLIN;
#if (NGX_WRITE_EVENT != EPOLLOUT)
event = EPOLLOUT;
#endif
}
// 反事件是活跃的,即当前设置的是读,则判断写事件当前是不是已经在epoll里,是的话修改而不是插入节点
if (e->active) {
op = EPOLL_CTL_MOD;
// 累加事件类型,需要设置的事件是读写
event |= prev;
} else {
op = EPOLL_CTL_ADD;
}
ee.events = event | flags;// flags用户自定义标记位,events = 读 + 写 + 自定义
ee.data.ptr = (void *) ((uintptr_t) c | ev->instance);
ngx_log_debug3(NGX_LOG_DEBUG_EVENT, ev->log, 0,
"epoll add event: fd:%d op:%d ev:%08X",
c->fd, op, ee.events);
if (epoll_ctl(ep, op, c->fd, &ee) == -1) {
ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_errno,
"epoll_ctl(%d, %d) failed", op, c->fd);
return NGX_ERROR;
}
// 置位表示已经加入epoll事件红黑树
ev->active = 1;
#if 0
ev->oneshot = (flags & NGX_ONESHOT_EVENT) ? 1 : 0;
#endif
return NGX_OK;
}
至此nginx把所有需要监听的socket注册到epoll,然后就是等待事件的到来。下一步就是执行ngx_process_events函数,该函数取决于选用的事件驱动模块,比如epoll模块。下一节再分析。