一、recv_notify_read
由第一篇文章中知道,一旦accept一个连接,就会指定一个工作线程调用这个函数。(注意:工作线程一直调用event_base_loop在循环等待事件)
void recv_notify_read(int sock, short event, void *arg)
{
int bytes;
int incomesock;
int result;
struct tracker_thread_data *pThreadData;
struct fast_task_info *pTask;
char szClientIp[IP_ADDRESS_SIZE];
in_addr_t client_addr;
while (1)
{
if ((bytes=read(sock, &incomesock, sizeof(incomesock))) < 0) //得到incomesock。
{
if (!(errno == EAGAIN || errno == EWOULDBLOCK))
{
}
break;
}
else if (bytes == 0)
{
break;
}
if (incomesock < 0)
{
struct timeval tv;
tv.tv_sec = 1;
tv.tv_usec = 0;
pThreadData = g_thread_data + (-1 * incomesock - 1) % \
g_work_threads;
event_base_loopexit(pThreadData->ev_base, &tv);
return;
}
client_addr = getPeerIpaddr(incomesock, \
szClientIp, IP_ADDRESS_SIZE);
if (g_allow_ip_count >= 0)
{
if (bsearch(&client_addr, g_allow_ip_addrs, \
g_allow_ip_count, sizeof(in_addr_t), \
cmp_by_ip_addr_t) == NULL)
{
close(incomesock);
continue;
}
}
if (tcpsetnonblockopt(incomesock) != 0)
{
close(incomesock);
continue;
}
pTask = free_queue_pop(); //从队列中取出一个结构体来处理。该队列在tracker_service_init中初始化。
if (pTask == NULL)
{
close(incomesock);
continue;
}
pThreadData = g_thread_data + incomesock % g_work_threads; //管理该线程的结构体
strcpy(pTask->client_ip, szClientIp);
event_set(&pTask->ev_read, incomesock, EV_READ, \
client_sock_read, pTask); //对该线程设置读事件
if (event_base_set(pThreadData->ev_base, &pTask->ev_read) != 0)
{
task_finish_clean_up(pTask);
close(incomesock);
continue;
}
event_set(&pTask->ev_write, incomesock, EV_WRITE, \
client_sock_write, pTask); //对该线程设置写事件(注意尚未添加)
if ((result=event_base_set(pThreadData->ev_base, \
&pTask->ev_write)) != 0)
{
task_finish_clean_up(pTask);
close(incomesock);
continue;
}
if (event_add(&pTask->ev_read, &g_network_tv) != 0) //添加读事件(含超时),注意还没有添加写事件
{
task_finish_clean_up(pTask);
close(incomesock);
continue;
}
}
}
二、client_sock_read
上文添加的读事件发生或超时时会回调该函数。
static void client_sock_read(int sock, short event, void *arg)
{
int bytes;
int recv_bytes;
struct fast_task_info *pTask;
pTask = (struct fast_task_info *)arg;
if (event == EV_TIMEOUT)
{
if (pTask->offset == 0 && pTask->req_count > 0) //req_count为request count.(?)
{
if (event_add(&pTask->ev_read, &g_network_tv) != 0) //重新设置等待事件
{
close(pTask->ev_read.ev_fd);
task_finish_clean_up(pTask);
}
}
else
{
close(pTask->ev_read.ev_fd);
task_finish_clean_up(pTask);
}
return;
}
while (1)
{
if (pTask->length == 0) //recv header
{
recv_bytes = sizeof(TrackerHeader) - pTask->offset;
}
else
{
recv_bytes = pTask->length - pTask->offset;
}
bytes = recv(sock, pTask->data + pTask->offset, recv_bytes, 0);
if (bytes < 0)
{
if (errno == EAGAIN || errno == EWOULDBLOCK)
{
if(event_add(&pTask->ev_read, &g_network_tv)!=0)
{
close(pTask->ev_read.ev_fd);
task_finish_clean_up(pTask);
}
}
else
{
close(pTask->ev_read.ev_fd);
task_finish_clean_up(pTask);
}
return;
}
else if (bytes == 0)
{
close(pTask->ev_read.ev_fd);
task_finish_clean_up(pTask);
return;
}
if (pTask->length == 0) //header
{
if (pTask->offset + bytes < sizeof(TrackerHeader))
{
if (event_add(&pTask->ev_read, &g_network_tv)!=0)
{
close(pTask->ev_read.ev_fd);
}
pTask->offset += bytes;
return;
}
pTask->length = buff2long(((TrackerHeader *) \
pTask->data)->pkg_len);
if (pTask->length < 0)
{
close(pTask->ev_read.ev_fd);
task_finish_clean_up(pTask);
return;
}
pTask->length += sizeof(TrackerHeader);
if (pTask->length > TRACKER_MAX_PACKAGE_SIZE)
{
close(pTask->ev_read.ev_fd);
task_finish_clean_up(pTask);
return;
}
}
pTask->offset += bytes;
if (pTask->offset >= pTask->length) //recv done
{
pTask->req_count++; //rec_count++,请求个数+1
tracker_deal_task(pTask);
return;
}
}
return;
}
int tracker_deal_task(struct fast_task_info *pTask)
{
TrackerHeader *pHeader;
int result;
pHeader = (TrackerHeader *)pTask->data;
switch(pHeader->cmd)
{
case TRACKER_PROTO_CMD_STORAGE_BEAT:
TRACKER_CHECK_LOGINED(pTask)
result = tracker_deal_storage_beat(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_SYNC_REPORT:
TRACKER_CHECK_LOGINED(pTask)
result = tracker_deal_storage_sync_report(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_REPORT_DISK_USAGE:
TRACKER_CHECK_LOGINED(pTask)
result = tracker_deal_storage_df_report(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_JOIN:
result = tracker_deal_storage_join(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_REPORT_STATUS:
result = tracker_deal_storage_report_status(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_GET_STATUS:
result = tracker_deal_server_get_storage_status(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_GET_SERVER_ID:
result = tracker_deal_get_storage_id(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_FETCH_STORAGE_IDS:
result = tracker_deal_fetch_storage_ids(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_REPLICA_CHG:
TRACKER_CHECK_LOGINED(pTask)
result = tracker_deal_storage_replica_chg(pTask);
break;
case TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE:
result = tracker_deal_service_query_fetch_update( \
pTask, pHeader->cmd);
break;
case TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE:
result = tracker_deal_service_query_fetch_update( \
pTask, pHeader->cmd);
break;
case TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL:
result = tracker_deal_service_query_fetch_update( \
pTask, pHeader->cmd);
break;
case TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE:
result = tracker_deal_service_query_storage( \
pTask, pHeader->cmd);
break;
case TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE:
result = tracker_deal_service_query_storage( \
pTask, pHeader->cmd);
break;
case TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ALL:
result = tracker_deal_service_query_storage( \
pTask, pHeader->cmd);
break;
case TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ALL:
result = tracker_deal_service_query_storage( \
pTask, pHeader->cmd);
break;
case TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP:
result = tracker_deal_server_list_one_group(pTask);
break;
case TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS:
result = tracker_deal_server_list_all_groups(pTask);
break;
case TRACKER_PROTO_CMD_SERVER_LIST_STORAGE:
result = tracker_deal_server_list_group_storages(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_SYNC_SRC_REQ:
result = tracker_deal_storage_sync_src_req(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_REQ:
TRACKER_CHECK_LOGINED(pTask)
result = tracker_deal_storage_sync_dest_req(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_SYNC_NOTIFY:
result = tracker_deal_storage_sync_notify(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_QUERY:
result = tracker_deal_storage_sync_dest_query(pTask);
break;
case TRACKER_PROTO_CMD_SERVER_DELETE_STORAGE:
result = tracker_deal_server_delete_storage(pTask);
break;
case TRACKER_PROTO_CMD_SERVER_SET_TRUNK_SERVER:
result = tracker_deal_server_set_trunk_server(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_REPORT_IP_CHANGED:
result = tracker_deal_storage_report_ip_changed(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_CHANGELOG_REQ:
result = tracker_deal_changelog_req(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_PARAMETER_REQ:
result = tracker_deal_parameter_req(pTask);
break;
case FDFS_PROTO_CMD_QUIT:
close(pTask->ev_read.ev_fd);
task_finish_clean_up(pTask);
return 0;
case FDFS_PROTO_CMD_ACTIVE_TEST:
result = tracker_deal_active_test(pTask);
break;
case TRACKER_PROTO_CMD_TRACKER_GET_STATUS:
result = tracker_deal_get_tracker_status(pTask);
break;
case TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_START:
result = tracker_deal_get_sys_files_start(pTask);
break;
case TRACKER_PROTO_CMD_TRACKER_GET_ONE_SYS_FILE:
result = tracker_deal_get_one_sys_file(pTask);
break;
case TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_END:
result = tracker_deal_get_sys_files_end(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FID:
TRACKER_CHECK_LOGINED(pTask)
result = tracker_deal_report_trunk_fid(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_FETCH_TRUNK_FID:
TRACKER_CHECK_LOGINED(pTask)
result = tracker_deal_get_trunk_fid(pTask);
break;
case TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FREE:
TRACKER_CHECK_LOGINED(pTask)
result = tracker_deal_report_trunk_free_space(pTask);
break;
case TRACKER_PROTO_CMD_TRACKER_PING_LEADER:
result = tracker_deal_ping_leader(pTask);
break;
case TRACKER_PROTO_CMD_TRACKER_NOTIFY_NEXT_LEADER:
result = tracker_deal_notify_next_leader(pTask);
break;
case TRACKER_PROTO_CMD_TRACKER_COMMIT_NEXT_LEADER:
result = tracker_deal_commit_next_leader(pTask);
break;
default:
logError("file: "__FILE__", line: %d, " \
"client ip: %s, unkown cmd: %d", \
__LINE__, pTask->client_ip, \
pHeader->cmd);
result = EINVAL;
break;
}
pHeader = (TrackerHeader *)pTask->data;
pHeader->status = result;
pHeader->cmd = TRACKER_PROTO_CMD_RESP;
long2buff(pTask->length - sizeof(TrackerHeader), pHeader->pkg_len);
send_add_event(pTask); //调用client_sock_write(pTask->ev_write.ev_fd, EV_WRITE, pTask);直接发送。
//注意到之前只初始化了写事件,并没有event_add写事件。
return 0;
}
四、client_sock_write
//在send数据之后,调用以下部分:
if (pTask->offset >= pTask->length)
{
pTask->offset = 0;
pTask->length = 0;
if ((result=event_add(&pTask->ev_read, \
&g_network_tv)) != 0) //一轮发送任务完成,继续监听读事件。
{
close(pTask->ev_read.ev_fd);
task_finish_clean_up(pTask);
return;
}
return;
}