一、tracker主流程
本文使用FastDFS_v4.06版本。先参考一下这两篇文章,看看主流程做了哪些事:
http://blog.chinaunix.net/uid-20498361-id-3328763.html
http://yangbajing.blog.chinaunix.net/uid-26786622-id-3146373.html
二、tracker_service_init()
注意:它产生了四个工作线程,这些线程的启动是通过向它们各自的管道写数据来进行的。它们会检测到管道读事件(通过libevent),从而进一步调用recv_notify_read()。
int tracker_service_init() //其中去除了错误处理部分
{
int result;
struct tracker_thread_data *pThreadData;
struct tracker_thread_data *pDataEnd;
pthread_t tid;
pthread_attr_t thread_attr;
if ((result=init_pthread_lock(&tracker_thread_lock)) != 0) //设置线程锁
{
return result;
}
if ((result=init_pthread_lock(&lb_thread_lock)) != 0)
{
return result;
}
if ((result=init_pthread_attr(&thread_attr, g_thread_stack_size)) != 0) //设置线程属性
{
}
//初始化g_free_queue结构,并为对应的g_mpool分配内存(大小为256个(block_size+8192),构成链表,256为最大连接数)。
if ((result=free_queue_init(g_max_connections, TRACKER_MAX_PACKAGE_SIZE,\
TRACKER_MAX_PACKAGE_SIZE, sizeof(TrackerClientInfo))) != 0)
{
return result;
}
//为每个线程分配一个线程结构,此处g_work_threads为4
g_thread_data = (struct tracker_thread_data *)malloc(sizeof( \
struct tracker_thread_data) * g_work_threads);
if (g_thread_data == NULL)
{
}
g_tracker_thread_count = 0;
pDataEnd = g_thread_data + g_work_threads;
for (pThreadData=g_thread_data; pThreadData<pDataEnd; pThreadData++)
{
pThreadData->ev_base = event_base_new(); //注意新版本libevent不再使用event_init()(非线程安全)
if (pThreadData->ev_base == NULL)
{
}
if (pipe(pThreadData->pipe_fds) != 0)
{
}
#if defined(OS_LINUX)
if ((result=fd_add_flags(pThreadData->pipe_fds[0], \
O_NONBLOCK | O_NOATIME)) != 0)
{
break;
}
#else
#endif
//创建线程,线程处理函数是work_thread_entrance(该函数将管道的可读事件添加到libevent中进行监听,该事件的处理函数为recv_notify_read).
if ((result=pthread_create(&tid, &thread_attr, \
work_thread_entrance, pThreadData)) != 0)
{
break;
}
else
{
if ((result=pthread_mutex_lock(&tracker_thread_lock)) != 0)
{
}
g_tracker_thread_count++;
if ((result=pthread_mutex_unlock(&tracker_thread_lock)) != 0)
{
}
}
}
pthread_attr_destroy(&thread_attr);
return 0;
}
三、sched_start
对于result=sched_start(&scheduleArray, &schedule_tid, g_thread_stack_size, (bool * volatile)&g_continue_flag),sched_start 是开启线程sched_thread_entrance (sched_thread.c 260行),定期执行指定的任务;这些任务有log_sync_func、tracker_mem_check_alive、tracker_write_status_to_file。这三个函数以后再看。
static void *sched_thread_entrance(void *args)
{
ScheduleContext *pContext;
ScheduleEntry *pPrevious;
ScheduleEntry *pCurrent;
ScheduleEntry *pSaveNext;
ScheduleEntry *pNode;
ScheduleEntry *pUntil;
int exec_count;
int i;
int sleep_time;
pContext = (ScheduleContext *)args;
if (sched_init_entries(&(pContext->scheduleArray)) != 0) //设置每个入口的next_call_time
{
free(pContext);
return NULL;
}
sched_make_chain(pContext); //按照next_call_time将各个入口排序,并构成链表
g_schedule_flag = true;
while (*(pContext->pcontinue_flag)) //这个线程就一直在处理这个循环了。
{
sched_check_waiting(pContext); //在此等待有事务定时到达,并设置已到达定时的任务个数。
if (pContext->scheduleArray.count == 0) //no schedule entry
{
sleep(1);
g_current_time = time(NULL);
continue;
}
g_current_time = time(NULL);
sleep_time = pContext->head->next_call_time - g_current_time;
/*
//fprintf(stderr, "count=%d, sleep_time=%d\n", \
pContext->scheduleArray.count, sleep_time);
*/
while (sleep_time > 0 && *(pContext->pcontinue_flag))
{
sleep(1);
g_current_time = time(NULL);
if (sched_check_waiting(pContext) == 0)
{
break;
}
sleep_time--;
}
if (!(*(pContext->pcontinue_flag)))
{
break;
}
exec_count = 0;
pCurrent = pContext->head;
while (*(pContext->pcontinue_flag) && (pCurrent != NULL \
&& pCurrent->next_call_time <= g_current_time))
{
//fprintf(stderr, "exec task id=%d\n", pCurrent->id);
pCurrent->task_func(pCurrent->func_args); //调用入口函数处理任务
pCurrent->next_call_time = g_current_time + \
pCurrent->interval;
pCurrent = pCurrent->next;
exec_count++;
}
if (exec_count == 0 || pContext->scheduleArray.count == 1)
{
continue;
}
if (exec_count > pContext->scheduleArray.count / 2) //如果有一半以上的任务到达定时了,则重新调整链表顺序。
{
sched_make_chain(pContext);
continue;
}
pNode = pContext->head; //如果只有少数几个定时到达,则用以下方法(逐个插入)处理链表顺序。
pContext->head = pCurrent; //new chain head
for (i=0; i<exec_count; i++)
{
if (pNode->next_call_time >= pContext->tail->next_call_time)
{
pContext->tail->next = pNode;
pContext->tail = pNode;
pNode = pNode->next;
pContext->tail->next = NULL;
continue;
}
pPrevious = NULL;
pUntil = pContext->head;
while (pUntil != NULL && \
pNode->next_call_time > pUntil->next_call_time)
{
pPrevious = pUntil;
pUntil = pUntil->next;
}
pSaveNext = pNode->next;
if (pPrevious == NULL)
{
pContext->head = pNode;
}
else
{
pPrevious->next = pNode;
}
pNode->next = pUntil;
pNode = pSaveNext;
}
}
g_schedule_flag = false;
logDebug("file: "__FILE__", line: %d, " \
"schedule thread exit", __LINE__);
free(pContext);
return NULL;
}
四、tracker_relationship_init-->relationship_thread_entrance
http://www3.xuebuyuan.com/1542033.html
tracker_relationship_init创建线程执行relationship_thread_entrance()。注意首次执行时,g_tracker_servers.servers为NULL
static void *relationship_thread_entrance(void* arg)
{
#define MAX_SLEEP_SECONDS 10
int fail_count;
int sleep_seconds;
fail_count = 0;
while (g_continue_flag) //该线程在此不断循环。
{
sleep_seconds = 1;
if (g_tracker_servers.servers != NULL)
{
if (g_tracker_servers.leader_index < 0)
{
if (relationship_select_leader() != 0)
{
sleep_seconds = 1 + (int)((double)rand()
* (double)MAX_SLEEP_SECONDS / RAND_MAX);
}
}
else
{
if (relationship_ping_leader() == 0)
{
fail_count = 0;
}
else
{
fail_count++;
if (fail_count >= 3)
{
g_tracker_servers.leader_index = -1;
}
}
}
}
if (g_last_tracker_servers != NULL)
{
tracker_mem_file_lock();
free(g_last_tracker_servers);
g_last_tracker_servers = NULL;
tracker_mem_file_unlock();
}
sleep(sleep_seconds);
}
return NULL;
}
五、log_set_cache(true)
设置g_log_contex->log_to_cache = 1,意味着write to buffer firstly, then sync to disk.
六、tracker_accept_loop(sock)
当有连接到来时,唤醒一个线程(向管道中写入incomesock),对应线程将会调用recv_notify_read(),见tracker_service_init()。
void tracker_accept_loop(int server_sock)
{
int incomesock;
struct sockaddr_in inaddr;
socklen_t sockaddr_len;
struct tracker_thread_data *pThreadData;
while (g_continue_flag)
{
sockaddr_len = sizeof(inaddr);
incomesock = accept(server_sock, (struct sockaddr*)&inaddr, &sockaddr_len);
if (incomesock < 0) //error
{
if (!(errno == EINTR || errno == EAGAIN))
{
}
continue;
}
pThreadData = g_thread_data + incomesock % g_work_threads;
if (write(pThreadData->pipe_fds[1], &incomesock, \
sizeof(incomesock)) != sizeof(incomesock))
{
close(incomesock);
}
}
}
七、后续工作
本文涉及的线程:主线程,4个工作线程,一个调度线程,一个选主线程。本文中尚未展开的函数,recv_notify_read(),三个调度任务函数,选主函数relationship_thread_entrance(),将在后续文章中剖析。与tracker主函数相关的其他一些处理函数可参考源代码。