view plaincopy to clipboardprint?
/*
* Processes an incoming "handle a new connection" item. This is called when
* input arrives on the libevent wakeup pipe.
*/
static void thread_libevent_process(int fd, short which, void *arg) {
LIBEVENT_THREAD *me = arg;
CQ_ITEM *item;
char buf[1];
//fd是该worker_thread的读管道的描述符,必须先从管道中读取一个字节出来,在水平触发模式下必须处理该事件,否则会被循环通知直到事件被处理。
if (read(fd, buf, 1) != 1)
if (settings.verbose > 0)
fprintf(stderr, "Can't read from libevent pipe/n");
//从该线程的连接队列获取一个item
item = cq_pop(me->new_conn_queue);
if (NULL != item) {
//处理对应的连接
conn *c = conn_new(item->sfd, item->init_state, item->event_flags,
item->read_buffer_size, item->transport, me->base);
if (c == NULL) {
if (IS_UDP(item->transport)) {
fprintf(stderr, "Can't listen for events on UDP socket/n");
exit(1);
} else {
if (settings.verbose > 0) {
fprintf(stderr, "Can't listen for events on fd %d/n",
item->sfd);
}
close(item->sfd);
}
} else {
c->thread = me;
}
//该连接处理完毕后,放回cqi_freelist 链表
cqi_free(item);
}
}
/*
* Processes an incoming "handle a new connection" item. This is called when
* input arrives on the libevent wakeup pipe.
*/
static void thread_libevent_process(int fd, short which, void *arg) {
LIBEVENT_THREAD *me = arg;
CQ_ITEM *item;
char buf[1];
//fd是该worker_thread的读管道的描述符,必须先从管道中读取一个字节出来,在水平触发模式下必须处理该事件,否则会被循环通知直到事件被处理。
if (read(fd, buf, 1) != 1)
if (settings.verbose > 0)
fprintf(stderr, "Can't read from libevent pipe/n");
//从该线程的连接队列获取一个item
item = cq_pop(me->new_conn_queue);
if (NULL != item) {
//处理对应的连接
conn *c = conn_new(item->sfd, item->init_state, item->event_flags,
item->read_buffer_size, item->transport, me->base);
if (c == NULL) {
if (IS_UDP(item->transport)) {
fprintf(stderr, "Can't listen for events on UDP socket/n");
exit(1);
} else {
if (settings.verbose > 0) {
fprintf(stderr, "Can't listen for events on fd %d/n",
item->sfd);
}
close(item->sfd);
}
} else {
c->thread = me;
}
//该连接处理完毕后,放回cqi_freelist 链表
cqi_free(item);
}
}
八,worker_libevent函数的分析
worker_libevent函数就是每个worker_thread启动的执行体,每个worker_thread创建和初始化完毕后,就执行该函数。
view plaincopy to clipboardprint?
/*
* Worker thread: main event loop, workerx thread 的执行体
*/
static void *worker_libevent(void *arg) {
LIBEVENT_THREAD *me = arg;
/* Any per-thread setup can happen here; thread_init() will block until
* all threads have finished initializing.
*/
pthread_mutex_lock(&init_lock);
init_count++;
pthread_cond_signal(&init_cond);
pthread_mutex_unlock(&init_lock);
//worker thread 进入libevent base loop 循环(重要的一步),等待事件的触发进行事件的处理
event_base_loop(me->base, 0);
return NULL;
}
/*
* Worker thread: main event loop, workerx thread 的执行体
*/
static void *worker_libevent(void *arg) {
LIBEVENT_THREAD *me = arg;
/* Any per-thread setup can happen here; thread_init() will block until
* all threads have finished initializing.
*/
pthread_mutex_lock(&init_lock);
init_count++;
pthread_cond_signal(&init_cond);
pthread_mutex_unlock(&init_lock);
//worker thread 进入libevent base loop 循环(重要的一步),等待事件的触发进行事件的处理
event_base_loop(me->base, 0);
return NULL;
}
九,conn_new函数的分析(worker_thread的重要处理逻辑)
核心的方法: 每个worker 线程收到pipe可读会调用该方法进行处理, 其基本逻辑是:首先从该线程的CQ队列中取队列头的一个CQ_ITEM,这个CQ_ITEM是被主线程丢到这个队列里的,item->sfd是已经建立的连接 的描述符,通过conn_new函数为该描述符注册libevent的读事件,me->base是代表自己的一个线程结构体,就是说对该描述符的事件处理交给当前这个workers线程处理, conn_new方法的最重要的内容如下代码所示:
view plaincopy to clipboardprint?
conn *conn_new(const int sfd,
enum conn_states init_state,
const int event_flags,
const int read_buffer_size,
enum network_transport transport,
struct event_base *base
)
{
conn *c = conn_from_freelist();
。。。//这里设置这个链接对象的值
//核心注册事件,注册sokect 的操作处理事件和对于的处理函数-event_handler
event_set(&c->event, sfd, event_flags, event_handler, (void *)c);
event_base_set(base, &c->event);
c->ev_flags = event_flags;
if (event_add(&c->event, 0) == -1) {
if (conn_add_to_freelist(c)) {
conn_free(c);
}
perror("event_add");
return NULL;
}
…
return c;
}
conn *conn_new(const int sfd,
enum conn_states init_state,
const int event_flags,
const int read_buffer_size,
enum network_transport transport,
struct event_base *base
)
{
conn *c = conn_from_freelist();
。。。//这里设置这个链接对象的值
//核心注册事件,注册sokect 的操作处理事件和对于的处理函数-event_handler
event_set(&c->event, sfd, event_flags, event_handler, (void *)c);
event_base_set(base, &c->event);
c->ev_flags = event_flags;
if (event_add(&c->event, 0) == -1) {
if (conn_add_to_freelist(c)) {
conn_free(c);
}
perror("event_add");
return NULL;
}
…
return c;
}
可以看到新的连接被注册了一个事件(实际是EV_READ|EV_PERSIST),由当前线程处理(因为这里的event_base是该workers线程自己的),当该连接有可读数据时会回调event_handler函数,实际上event_handler里主要是调用memcached的核心方法drive_machine,最后看看memcached网络事件处理的最核心部分- drive_machine 。需要铭记于心的是drive_machine是多线程环境执行的,主线程和workers都会执行drive_machine 。
十,drive_machine函数的分析
drive_machine主要是通过当前连接的state来判断该进行何种处理,因为通过libevent注册了读写时间后回调的都是这个核心函数,所以实际上我们在注册libevent相应事件时,会同时把事件状态写到该conn结构体里,libevent进行回调时会把该conn结构作为参数传递过来,就是该方法的形参 。
view plaincopy to clipboardprint?
//真正处理跟已经连接的socket的逻辑
static void drive_machine(conn *c) {
bool stop = false;
int sfd, flags = 1;
socklen_t addrlen;
struct sockaddr_storage addr;
int nreqs = settings.reqs_per_event;
int res;
assert(c != NULL);
while (!stop) {
switch(c->state) {
case conn_listening:
addrlen = sizeof(addr);
//处理accept出错的情况
if ((sfd = accept(c->sfd, (struct sockaddr *)&addr, &addrlen)) == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
/* these are transient, so don't log anything */
stop = true;
} else if (errno == EMFILE) {
if (settings.verbose > 0)
fprintf(stderr, "Too many open connections/n");
accept_new_conns(false);
stop = true;
} else {
perror("accept()");
stop = true;
}
break;
}
//设置sfd 为 非阻塞方式
if ((flags = fcntl(sfd, F_GETFL, 0)) < 0 ||
fcntl(sfd, F_SETFL, flags | O_NONBLOCK) < 0) {
perror("setting O_NONBLOCK");
close(sfd);
break;
}
//分发到某个指定的线程去
dispatch_conn_new(sfd, conn_new_cmd, EV_READ | EV_PERSIST,
DATA_BUFFER_SIZE, tcp_transport);
stop = true;
break;
// 以下代码略
}
return;
}
//真正处理跟已经连接的socket的逻辑
static void drive_machine(conn *c) {
bool stop = false;
int sfd, flags = 1;
socklen_t addrlen;
struct sockaddr_storage addr;
int nreqs = settings.reqs_per_event;
int res;
assert(c != NULL);
while (!stop) {
switch(c->state) {
case conn_listening:
addrlen = sizeof(addr);
//处理accept出错的情况
if ((sfd = accept(c->sfd, (struct sockaddr *)&addr, &addrlen)) == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
/* these are transient, so don't log anything */
stop = true;
} else if (errno == EMFILE) {
if (settings.verbose > 0)
fprintf(stderr, "Too many open connections/n");
accept_new_conns(false);
stop = true;
} else {
perror("accept()");
stop = true;
}
break;
}
//设置sfd 为 非阻塞方式
if ((flags = fcntl(sfd, F_GETFL, 0)) < 0 ||
fcntl(sfd, F_SETFL, flags | O_NONBLOCK) < 0) {
perror("setting O_NONBLOCK");
close(sfd);
break;
}
//分发到某个指定的线程去
dispatch_conn_new(sfd, conn_new_cmd, EV_READ | EV_PERSIST,
DATA_BUFFER_SIZE, tcp_transport);
stop = true;
break;
// 以下代码略
}
return;
}
十一,主线程是如何进行dispatch的?
view plaincopy to clipboardprint?
/*
* Dispatches a new connection to another thread. This is only ever called
* from the main thread, either during initialization (for UDP) or because
* of an incoming connection.
*/
void dispatch_conn_new(int sfd, enum conn_states init_state, int event_flags, int read_buffer_size,
enum network_transport transport
)
{
//从cqi_freelist 或者malloc 一个CQ_ITEM
CQ_ITEM *item = cqi_new();
//round robin 一个worker 线程
int tid = (last_thread + 1) % settings.num_threads;
LIBEVENT_THREAD *thread = threads + tid;
//该次选中的worker 线程保留起来
last_thread = tid;
//fill 该CQ_ITEM 的内容
item->sfd = sfd;
item->init_state = init_state;
item->event_flags = event_flags;
item->read_buffer_size = read_buffer_size;
item->transport = transport;
//push 到选中的那个线程的连接队列里面
cq_push(thread->new_conn_queue, item);
MEMCACHED_CONN_DISPATCH(sfd, thread->thread_id);
//往对应的线程写pipe写一个空的字节,此时触发该线程的notify
//管道可读,并触发对应的事件处理流程
if (write(thread->notify_send_fd, "", 1) != 1) {
perror("Writing to thread notify pipe");
}
}
/*
* Dispatches a new connection to another thread. This is only ever called
* from the main thread, either during initialization (for UDP) or because
* of an incoming connection.
*/
void dispatch_conn_new(int sfd, enum conn_states init_state, int event_flags, int read_buffer_size,
enum network_transport transport
)
{
//从cqi_freelist 或者malloc 一个CQ_ITEM
CQ_ITEM *item = cqi_new();
//round robin 一个worker 线程
int tid = (last_thread + 1) % settings.num_threads;
LIBEVENT_THREAD *thread = threads + tid;
//该次选中的worker 线程保留起来
last_thread = tid;
//fill 该CQ_ITEM 的内容
item->sfd = sfd;
item->init_state = init_state;
item->event_flags = event_flags;
item->read_buffer_size = read_buffer_size;
item->transport = transport;
//push 到选中的那个线程的连接队列里面
cq_push(thread->new_conn_queue, item);
MEMCACHED_CONN_DISPATCH(sfd, thread->thread_id);
//往对应的线程写pipe写一个空的字节,此时触发该线程的notify
//管道可读,并触发对应的事件处理流程
if (write(thread->notify_send_fd, "", 1) != 1) {
perror("Writing to thread notify pipe");
}
}
可以清楚的看到,主线程首先创建了一个新的CQ_ITEM,然后通过round robin策略选择了一个thread
并通过cq_push将这个CQ_ITEM放入了该线程的CQ队列里,那么对应的workers线程是怎么知道的呢
就是通过这个
write(threads[thread].notify_send_fd, "", 1)
向该线程管道写了1字节数据,则该线程的libevent立即回调了thread_libevent_process方法(上面已经描述过)
然后那个线程取出item,注册读时间,当该条连接上有数据时,最终也会回调drive_machine方法,也就是
drive_machine方法的 case conn_read:等全部是workers处理的,主线程只处理conn_listening 建立连接这个
本文来自CSDN博客,转载请标明出处:http://blog.csdn.net/tenfyguo/archive/2010/01/31/5274435.aspx