live555继承关系比较复杂,所以做图记录下h264文件发送过程中关键变量的类型。
上一篇从源码分析了OPTION和DESCRIBE的运行流程。这篇分析剩下的SETUP和PLAY的运行流程。
一、 SETUP
if (!requestIncludedSessionId) {
// No session id was present in the request.
// So create a new "RTSPClientSession" object for this request.
// But first, make sure that we're authenticated to perform this command:
char urlTotalSuffix[2*RTSP_PARAM_STRING_MAX];
// enough space for urlPreSuffix/urlSuffix'\0'
urlTotalSuffix[0] = '\0';
if (urlPreSuffix[0] != '\0') {
strcat(urlTotalSuffix, urlPreSuffix);
strcat(urlTotalSuffix, "/");
}
strcat(urlTotalSuffix, urlSuffix);
if (authenticationOK("SETUP", urlTotalSuffix, (char const*)fRequestBuffer)) {
clientSession
= (RTSPServer::RTSPClientSession*)fOurRTSPServer.createNewClientSessionWithId();
} else {
areAuthenticated = False;
}
}
if (clientSession != NULL) {
clientSession->handleCmd_SETUP(this, urlPreSuffix, urlSuffix, (char const*)fRequestBuffer);
playAfterSetup = clientSession->fStreamAfterSETUP;
}
执行SETUP指令时,会先创建一个clientsession,然后让ClientSession去执行SETUP对应的操作。
void RTSPServer::RTSPClientSession
::handleCmd_SETUP(RTSPServer::RTSPClientConnection* ourClientConnection,
char const* urlPreSuffix, char const* urlSuffix, char const* fullRequestStr) {
// Normally, "urlPreSuffix" should be the session (stream) name, and "urlSuffix" should be the subsession (track) name.
// However (being "liberal in what we accept"), we also handle 'aggregate' SETUP requests (i.e., without a track name),
// in the special case where we have only a single track. I.e., in this case, we also handle:
// "urlPreSuffix" is empty and "urlSuffix" is the session (stream) name, or
// "urlPreSuffix" concatenated with "urlSuffix" (with "/" inbetween) is the session (stream) name.
fOurClientConnection = ourClientConnection;
fURLPreSuffix = urlPreSuffix; fURLSuffix = urlSuffix; fFullRequestStr = fullRequestStr;
fTrackId = urlSuffix; // in the normal case
// Begin by checking whether the specified stream name exists:
char const* streamName = urlPreSuffix; // in the normal case
fOurServer.lookupServerMediaSession(streamName, SETUPLookupCompletionFunction1, this,
fOurServerMediaSession == NULL);
}
老套路,先找到对应的mediasession再执行SETUPLookupCompletionFunction1。找到后会运行到handleCmd_SETUP_afterLookup2。
if (fStreamStates == NULL) {
// This is the first "SETUP" for this session. Set up our array of states for all of this session's subsessions (tracks):
fNumStreamStates = fOurServerMediaSession->numSubsessions();
fStreamStates = new struct streamState[fNumStreamStates];
ServerMediaSubsessionIterator iter(*fOurServerMediaSession);
ServerMediaSubsession* subsession;
for (unsigned i = 0; i < fNumStreamStates; ++i) {
subsession = iter.next();
fStreamStates[i].subsession = subsession;
fStreamStates[i].tcpSocketNum = -1; // for now; may get set for RTP-over-TCP streaming
fStreamStates[i].streamToken = NULL; // for now; it may be changed by the "getStreamParameters()" call that comes later
}
然后会创建一个fStreamStates ,并进行初始化。后面再做一些配置后,执行到
subsession->getStreamParameters(fOurSessionId, fOurClientConnection->fClientAddr,
clientRTPPort, clientRTCPPort,
fStreamStates[trackNum].tcpSocketNum, rtpChannelId, rtcpChannelId,
destinationAddress, destinationTTL, fIsMulticast,
serverRTPPort, serverRTCPPort,
fStreamStates[trackNum].streamToken);
FramedSource* mediaSource
= createNewStreamSource(clientSessionId, streamBitrate);
...
rtpGroupsock = createGroupsock(nullAddress(destinationAddress.ss_family), serverRTPPort);
...
rtcpGroupsock = createGroupsock(nullAddress(destinationAddress.ss_family), serverRTCPPort);
...
rtpSink = mediaSource == NULL ? NULL
: createNewRTPSink(rtpGroupsock, rtpPayloadType, mediaSource);
...
streamToken = fLastStreamToken
= new StreamState(*this, serverRTPPort, serverRTCPPort, rtpSink, udpSink,
streamBitrate, mediaSource,
rtpGroupsock, rtcpGroupsock);
在getStreamParameters时,会创建一个mediaSource,创建一个rtpSink,这都跟之前模拟RTPSink时的步骤一样。不同的是,rtpGroupsock和rtpGroupsock配置的是真是的IP地址和端口。
然后把mediaSource和rtpSink作为参数创建一个streamToken。这个steamToken作为一个唯一标识来标识当前客户端会话对应的子会话,其实就是对应于我们的h264文件流会话。存放在fStreamStates[0]里。并且确定是使用TCP还是UDP来作为RTP的发送协议,并保存客户端的通信信息。
这样就完成了对SETUP的处理。
二、PLAY
SETUP以后就有了clientSession了。那么收到PLAY指令时就会去对应的clientSession执行PLAY指令。
同样的也是根据url找到对应的subsession去处理。
handleCmd_PLAY(ourClientConnection, subsession, fullRequestStr);
handleCmd_Play中做了一些列配置后,来到startStream函数。
void OnDemandServerMediaSubsession::startStream(unsigned clientSessionId,
void* streamToken,
TaskFunc* rtcpRRHandler,
void* rtcpRRHandlerClientData,
unsigned short& rtpSeqNum,
unsigned& rtpTimestamp,
ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler,
void* serverRequestAlternativeByteHandlerClientData) {
StreamState* streamState = (StreamState*)streamToken;
Destinations* destinations
= (Destinations*)(fDestinationsHashTable->Lookup((char const*)clientSessionId));
if (streamState != NULL) {
streamState->startPlaying(destinations, clientSessionId,
rtcpRRHandler, rtcpRRHandlerClientData,
serverRequestAlternativeByteHandler, serverRequestAlternativeByteHandlerClientData);
RTPSink* rtpSink = streamState->rtpSink(); // alias
if (rtpSink != NULL) {
rtpSeqNum = rtpSink->currentSeqNo();
rtpTimestamp = rtpSink->presetNextTimestamp();
}
}
}
然后执行到了streamState->startPlaying
if (fRTPgs != NULL) fRTPgs->addDestination(dests->addr, dests->rtpPort, clientSessionId);
if (fRTCPgs != NULL && !(fRTCPgs == fRTPgs && dests->rtcpPort.num() == dests->rtpPort.num())) {
fRTCPgs->addDestination(dests->addr, dests->rtcpPort, clientSessionId);
}
...
...
if (fRTPSink != NULL) {
fRTPSink->startPlaying(*fMediaSource, afterPlayingStreamState, this);
fAreCurrentlyPlaying = True;
}
在startPlaying函数中添加和更新好客户端的RTP和RTCP地址和端口后,执行fRTPSink->startPlaying
,后面就跟上一篇的DESCRIBE中的模拟的RTPSink启动的流程一样了。这样大体分析完了live555RTSP的工作流程了。