live555 运行调度机制
在运行live555时,整体的简单结构是这样的:
TaskScheduler* scheduler = BasicTaskScheduler::createNew();
创建server
创建mediasession
创建submediasession
添加submediasession到mediasession
添加mediasession到server
env->taskScheduler().doEventLoop();
运行调度相关的类
TaskScheduler <-----BasicTaskScheduler0 <------BasicTaskScheduler
DelayQueueEntry<----DelayQueue
DelayQueueEntry<----AlarmHandler
超时队列
超时队列的组织
maxSchedulerGranularity默认为10ms,这里主要关注scheduleDelayedTask()函数它向DelayQueue fDelayQueue中添加Alarm_handler。
BasicTaskScheduler* BasicTaskScheduler::createNew(unsigned maxSchedulerGranularity) {
return new BasicTaskScheduler(maxSchedulerGranularity);
}
BasicTaskScheduler::BasicTaskScheduler(unsigned maxSchedulerGranularity)
: fMaxSchedulerGranularity(maxSchedulerGranularity), fMaxNumSockets(0)
#if defined(__WIN32__) || defined(_WIN32)
, fDummySocketNum(-1)
#endif
{
FD_ZERO(&fReadSet);
FD_ZERO(&fWriteSet);
FD_ZERO(&fExceptionSet);
if (maxSchedulerGranularity > 0) schedulerTickTask(); // ensures that we handle events frequently
}
void BasicTaskScheduler::schedulerTickTask() {
scheduleDelayedTask(fMaxSchedulerGranularity, schedulerTickTask, this);
}
超时队列的组织利用的时linux 2.4内核定时器队列的管理方式。队列中保存的超时时间是该节点的超时时间与前一个节点的超时间的差值,该节点的实际的超时时间是从该节点起到head节点的超时值的和。查询到期时,只需要判断第一个节点是否到期,到期就把该节点移除;插入时,将节点的超时值与从head开始的节点超时值的和比较找到合适的位置插入。
如上图,1号节点的超时时间为2,2号为2+6,3号为2+6+10,4号为2+6+10+20
TaskToken BasicTaskScheduler0::scheduleDelayedTask(int64_t microseconds,
TaskFunc* proc,
void* clientData) {
if (microseconds < 0) microseconds = 0;
DelayInterval timeToDelay((long)(microseconds/1000000), (long)(microseconds%1000000));
AlarmHandler* alarmHandler = new AlarmHandler(proc, clientData, timeToDelay);
fDelayQueue.addEntry(alarmHandler);
return (void*)(alarmHandler->token());
}
void DelayQueue::addEntry(DelayQueueEntry* newEntry) {
synchronize(); //同步动作用来防止没有及时执行timeout节点
DelayQueueEntry* cur = head();
while (newEntry->fDeltaTimeRemaining >= cur->fDeltaTimeRemaining) {
newEntry->fDeltaTimeRemaining -= cur->fDeltaTimeRemaining;
cur = cur->fNext;
}
cur->fDeltaTimeRemaining -= newEntry->fDeltaTimeRemaining;
// Add "newEntry" to the queue, just before "cur":
newEntry->fNext = cur;
newEntry->fPrev = cur->fPrev;
cur->fPrev = newEntry->fPrev->fNext = newEntry;
}
在创建TaskScheduler是就向延迟队列中加入一个alarmhandler,它的超时动作是添加另一个与自己一模一样的handler。
运行
env->taskScheduler().doEventLoop() 函数
void BasicTaskScheduler0::doEventLoop(char volatile* watchVariable) {
// Repeatedly loop, handling readble sockets and timed events:
while (1) {
if (watchVariable != NULL && *watchVariable != 0) break;
SingleStep();
}
}
在SingleStep() 中在select()后做三件事情:
- 处理加入的HandlerDescriptor,每个HandlerDescriptor跟socket fd相关
- 处理事件触发
- 处理延迟队列
void BasicTaskScheduler::SingleStep(unsigned maxDelayTime) {
fd_set readSet = fReadSet; // make a copy for this select() call
fd_set writeSet = fWriteSet; // ditto
fd_set exceptionSet = fExceptionSet; // ditto
DelayInterval const& timeToDelay = fDelayQueue.timeToNextAlarm();
struct timeval tv_timeToDelay;
tv_timeToDelay.tv_sec = timeToDelay.seconds();
tv_timeToDelay.tv_usec = timeToDelay.useconds();
// Very large "tv_sec" values cause select() to fail.
// Don't make it any larger than 1 million seconds (11.5 days)
const long MAX_TV_SEC = MILLION;
if (tv_timeToDelay.tv_sec > MAX_TV_SEC) {
tv_timeToDelay.tv_sec = MAX_TV_SEC;
}
// Also check our "maxDelayTime" parameter (if it's > 0):
if (maxDelayTime > 0 &&
(tv_timeToDelay.tv_sec > (long)maxDelayTime/MILLION ||
(tv_timeToDelay.tv_sec == (long)maxDelayTime/MILLION &&
tv_timeToDelay.tv_usec > (long)maxDelayTime%MILLION))) {
tv_timeToDelay.tv_sec = maxDelayTime/MILLION;
tv_timeToDelay.tv_usec = maxDelayTime%MILLION;
}
//select 超时时间设置为延迟队列的最近超时时间
int selectResult = select(fMaxNumSockets, &readSet, &writeSet, &exceptionSet, &tv_timeToDelay);
if (selectResult < 0) { //出错
#if defined(__WIN32__) || defined(_WIN32)
int err = WSAGetLastError();
// For some unknown reason, select() in Windoze sometimes fails with WSAEINVAL if
// it was called with no entries set in "readSet". If this happens, ignore it:
if (err == WSAEINVAL && readSet.fd_count == 0) {
err = EINTR;
// To stop this from happening again, create a dummy socket:
if (fDummySocketNum >= 0) closeSocket(fDummySocketNum);
fDummySocketNum = socket(AF_INET, SOCK_DGRAM, 0);
FD_SET((unsigned)fDummySocketNum, &fReadSet);
}
if (err != EINTR) {
#else
if (errno != EINTR && errno != EAGAIN) {
#endif
// Unexpected error - treat this as fatal:
#if !defined(_WIN32_WCE)
perror("BasicTaskScheduler::SingleStep(): select() fails");
// Because this failure is often "Bad file descriptor" - which is caused by an invalid socket number (i.e., a socket number
// that had already been closed) being used in "select()" - we print out the sockets that were being used in "select()",
// to assist in debugging:
fprintf(stderr, "socket numbers used in the select() call:");
for (int i = 0; i < 10000; ++i) {
if (FD_ISSET(i, &fReadSet) || FD_ISSET(i, &fWriteSet) || FD_ISSET(i, &fExceptionSet)) {
fprintf(stderr, " %d(", i);
if (FD_ISSET(i, &fReadSet)) fprintf(stderr, "r");
if (FD_ISSET(i, &fWriteSet)) fprintf(stderr, "w");
if (FD_ISSET(i, &fExceptionSet)) fprintf(stderr, "e");
fprintf(stderr, ")");
}
}
fprintf(stderr, "\n");
#endif
internalError();
}
}
// Call the handler function for one readable socket:
HandlerIterator iter(*fHandlers);
HandlerDescriptor* handler;
// To ensure forward progress through the handlers, begin past the last
// socket number that we handled:
if (fLastHandledSocketNum >= 0) {
while ((handler = iter.next()) != NULL) {
//找到刚处理的handler,如果有则退出
if (handler->socketNum == fLastHandledSocketNum) break;
}
if (handler == NULL) {
fLastHandledSocketNum = -1;
iter.reset(); // start from the beginning instead
}
}
//从已经处理过的handler之后进行查找进行处理
while ((handler = iter.next()) != NULL) {
int sock = handler->socketNum; // alias
int resultConditionSet = 0;
if (FD_ISSET(sock, &readSet) && FD_ISSET(sock, &fReadSet)/*sanity check*/) resultConditionSet |= SOCKET_READABLE;
if (FD_ISSET(sock, &writeSet) && FD_ISSET(sock, &fWriteSet)/*sanity check*/) resultConditionSet |= SOCKET_WRITABLE;
if (FD_ISSET(sock, &exceptionSet) && FD_ISSET(sock, &fExceptionSet)/*sanity check*/) resultConditionSet |= SOCKET_EXCEPTION;
if ((resultConditionSet&handler->conditionSet) != 0 && handler->handlerProc != NULL) {
fLastHandledSocketNum = sock;
// Note: we set "fLastHandledSocketNum" before calling the handler,
// in case the handler calls "doEventLoop()" reentrantly.
(*handler->handlerProc)(handler->clientData, resultConditionSet);
break;
}
}
//如果没有处理过handler,则从头进行处理
if (handler == NULL && fLastHandledSocketNum >= 0) {
// We didn't call a handler, but we didn't get to check all of them,
// so try again from the beginning:
iter.reset();
while ((handler = iter.next()) != NULL) {
int sock = handler->socketNum; // alias
int resultConditionSet = 0;
if (FD_ISSET(sock, &readSet) && FD_ISSET(sock, &fReadSet)/*sanity check*/) resultConditionSet |= SOCKET_READABLE;
if (FD_ISSET(sock, &writeSet) && FD_ISSET(sock, &fWriteSet)/*sanity check*/) resultConditionSet |= SOCKET_WRITABLE;
if (FD_ISSET(sock, &exceptionSet) && FD_ISSET(sock, &fExceptionSet)/*sanity check*/) resultConditionSet |= SOCKET_EXCEPTION;
if ((resultConditionSet&handler->conditionSet) != 0 && handler->handlerProc != NULL) {
fLastHandledSocketNum = sock;
// Note: we set "fLastHandledSocketNum" before calling the handler,
// in case the handler calls "doEventLoop()" reentrantly.
(*handler->handlerProc)(handler->clientData, resultConditionSet);
break;
}
}
if (handler == NULL) fLastHandledSocketNum = -1;//because we didn't call a handler
}
// Also handle any newly-triggered event (Note that we do this *after* calling a socket handler, 处理新触发的事件
// in case the triggered event handler modifies The set of readable sockets.)
if (fTriggersAwaitingHandling != 0) {
if (fTriggersAwaitingHandling == fLastUsedTriggerMask) {
// Common-case optimization for a single event trigger:
fTriggersAwaitingHandling &=~ fLastUsedTriggerMask;
if (fTriggeredEventHandlers[fLastUsedTriggerNum] != NULL) {
(*fTriggeredEventHandlers[fLastUsedTriggerNum])(fTriggeredEventClientDatas[fLastUsedTriggerNum]);
}
} else {
// Look for an event trigger that needs handling (making sure that we make forward progress through all possible triggers):
unsigned i = fLastUsedTriggerNum;
EventTriggerId mask = fLastUsedTriggerMask;
do {
i = (i+1)%MAX_NUM_EVENT_TRIGGERS;
mask >>= 1;
if (mask == 0) mask = 0x80000000;
if ((fTriggersAwaitingHandling&mask) != 0) {
fTriggersAwaitingHandling &=~ mask;
if (fTriggeredEventHandlers[i] != NULL) {
(*fTriggeredEventHandlers[i])(fTriggeredEventClientDatas[i]);
}
fLastUsedTriggerMask = mask;
fLastUsedTriggerNum = i;
break;
}
} while (i != fLastUsedTriggerNum);
}
}
// Also handle any delayed event that may have come due.
//最后检测延迟队列
fDelayQueue.handleAlarm();
}
在singlestep()的最后检测延迟队列,在创建TaskScheduler是加入的alarmHandler保证延迟队列中一直会有一个10ms超时事件的handler存在,使得select()的超时事件最大是10ms,也就是说至少10ms检测并执行一次singlestep的动作。
HandlerSet
在 BasicTaskScheduler0 与handler处理相关的元素:
// To implement background reads:
HandlerSet* fHandlers; 管理HandlerDescriptor链表
int fLastHandledSocketNum;
参照上文调度类关系图,handlerSet中管理handlerdescriptor双向链表,HandlerDescriptor元素有socket号,和此handler需要检测的fd变动条件:读,写,异常。handlerProc是回调函数。
事件触发
在 BasicTaskScheduler0 中与事件触发相关的元素
// To implement event triggers:
EventTriggerId volatile fTriggersAwaitingHandling; // implemented as a 32-bit bitmap
EventTriggerId fLastUsedTriggerMask; // implemented as a 32-bit bitmap
TaskFunc* fTriggeredEventHandlers[MAX_NUM_EVENT_TRIGGERS]; 事件回调函数
void* fTriggeredEventClientDatas[MAX_NUM_EVENT_TRIGGERS]; 响应回调函数的数据
unsigned fLastUsedTriggerNum; // in the range [0,MAX_NUM_EVENT_TRIGGERS)
32个事件,用mask来管理到来的事件。