live555 学习笔记-建立RTSP连接的过程(RTSP服务器端)

live555 学习笔记-建立RTSP连接的过程(RTSP服务器端)

监听

创建rtsp server,rtspserver的构造函数中,创建监听socket,添加到调度管理器BasicTaskScheduler的 HandlerSet* fHandlers当中,加入其中的handler的回调函数是incomingConnectionHandler。

RTSPServer::RTSPServer(UsageEnvironment& env,
		       int ourSocket, Port ourPort,
		       UserAuthenticationDatabase* authDatabase,
		       unsigned reclamationSeconds)
  : GenericMediaServer(env, ourSocket, ourPort, reclamationSeconds),
    fHTTPServerSocket(-1), fHTTPServerPort(0),
    fClientConnectionsForHTTPTunneling(NULL), // will get created if needed
    fTCPStreamingDatabase(HashTable::create(ONE_WORD_HASH_KEYS)),
    fPendingRegisterOrDeregisterRequests(HashTable::create(ONE_WORD_HASH_KEYS)),
    fRegisterOrDeregisterRequestCounter(0), fAuthDB(authDatabase), fAllowStreamingRTPOverTCP(True) {
}
GenericMediaServer
::GenericMediaServer(UsageEnvironment& env, int ourSocket, Port ourPort,
		     unsigned reclamationSeconds)
  : Medium(env),
    fServerSocket(ourSocket), fServerPort(ourPort), fReclamationSeconds(reclamationSeconds),
    fServerMediaSessions(HashTable::create(STRING_HASH_KEYS)),
    fClientConnections(HashTable::create(ONE_WORD_HASH_KEYS)),
    fClientSessions(HashTable::create(STRING_HASH_KEYS)) {
  ignoreSigPipeOnSocket(fServerSocket); // so that clients on the same host that are killed don't also kill us
  
  // Arrange to handle connections from others:
  env.taskScheduler().turnOnBackgroundReadHandling(fServerSocket, incomingConnectionHandler, this);
  env << "GenericMediaServer constructor" << "add listenning socket" << "\n";
}

void GenericMediaServer::incomingConnectionHandler(void* instance, int /*mask*/) {
  GenericMediaServer* server = (GenericMediaServer*)instance;
  server->incomingConnectionHandler();
}
void GenericMediaServer::incomingConnectionHandler() {
  incomingConnectionHandlerOnSocket(fServerSocket);
}

ClientConnection

incomingConnectionHandler中,在连接到来时创建一个clientconnection。

void GenericMediaServer::incomingConnectionHandlerOnSocket(int serverSocket) {
  struct sockaddr_in clientAddr;
  SOCKLEN_T clientAddrLen = sizeof clientAddr;
  int clientSocket = accept(serverSocket, (struct sockaddr*)&clientAddr, &clientAddrLen);
  if (clientSocket < 0) {
    int err = envir().getErrno();
    if (err != EWOULDBLOCK) {
      envir().setResultErrMsg("accept() failed: ");
    }
    return;
  }
  ignoreSigPipeOnSocket(clientSocket); // so that clients on the same host that are killed don't also kill us
  makeSocketNonBlocking(clientSocket);
  increaseSendBufferTo(envir(), clientSocket, 50*1024);
  
#ifdef DEBUG
  envir() << "accept()ed connection from " << AddressString(clientAddr).val() << "\n";
#endif
  envir() << "incommingConnectionHandlerOnSocket" << "\n";
  // Create a new object for handling this connection:
  (void)createNewClientConnection(clientSocket, clientAddr);
}

GenericMediaServer::ClientConnection*
RTSPServer::createNewClientConnection(int clientSocket, struct sockaddr_in clientAddr) {
  envir() << "new RTSPClinetConnection" << "\n";
  return new RTSPClientConnection(*this, clientSocket, clientAddr);
}

RTSPServer::RTSPClientConnection
::RTSPClientConnection(RTSPServer& ourServer, int clientSocket, struct sockaddr_in clientAddr)
  : GenericMediaServer::ClientConnection(ourServer, clientSocket, clientAddr),
    fOurRTSPServer(ourServer), fClientInputSocket(fOurSocket), fClientOutputSocket(fOurSocket),
    fIsActive(True), fRecursionCount(0), fOurSessionCookie(NULL) {
  resetRequestBuffer();
}

GenericMediaServer::ClientConnection
::ClientConnection(GenericMediaServer& ourServer, int clientSocket, struct sockaddr_in clientAddr)
  : fOurServer(ourServer), fOurSocket(clientSocket), fClientAddr(clientAddr) {
  // Add ourself to our 'client connections' table:
  fOurServer.fClientConnections->Add((char const*)this, this);
  
  // Arrange to handle incoming requests:
  resetRequestBuffer();
  envir().taskScheduler()
    .setBackgroundHandling(fOurSocket, SOCKET_READABLE|SOCKET_EXCEPTION, incomingRequestHandler, this);
}

request 的处理

incomingRequestHandler回调函数负责处理客户端发送的申请。

void RTSPServer::RTSPClientConnection::handleRequestBytes(int newBytesRead) {  
  int numBytesRemaining = 0;
  ++fRecursionCount;
  envir() << "rtsp server handle request bytes" << "\n";
  do {
    RTSPServer::RTSPClientSession* clientSession = NULL;

    if (newBytesRead < 0 || (unsigned)newBytesRead >= fRequestBufferBytesLeft) {
      // Either the client socket has died, or the request was too big for us.
      // Terminate this connection:
#ifdef DEBUG
      fprintf(stderr, "RTSPClientConnection[%p]::handleRequestBytes() read %d new bytes (of %d); terminating connection!\n", this, newBytesRead, fRequestBufferBytesLeft);
#endif
      fIsActive = False;
      break;
    }
    
    Boolean endOfMsg = False;
    unsigned char* ptr = &fRequestBuffer[fRequestBytesAlreadySeen];
#ifdef DEBUG
    ptr[newBytesRead] = '\0';
    fprintf(stderr, "RTSPClientConnection[%p]::handleRequestBytes() %s %d new bytes:%s\n",
	    this, numBytesRemaining > 0 ? "processing" : "read", newBytesRead, ptr);
#endif
    
    if (fClientOutputSocket != fClientInputSocket && numBytesRemaining == 0) {
      // We're doing RTSP-over-HTTP tunneling, and input commands are assumed to have been Base64-encoded.
      // We therefore Base64-decode as much of this new data as we can (i.e., up to a multiple of 4 bytes).
      
      // But first, we remove any whitespace that may be in the input data:
      unsigned toIndex = 0;
      for (int fromIndex = 0; fromIndex < newBytesRead; ++fromIndex) {
	char c = ptr[fromIndex];
	if (!(c == ' ' || c == '\t' || c == '\r' || c == '\n')) { // not 'whitespace': space,tab,CR,NL
	  ptr[toIndex++] = c;
	}
      }
      newBytesRead = toIndex;
      
      unsigned numBytesToDecode = fBase64RemainderCount + newBytesRead;
      unsigned newBase64RemainderCount = numBytesToDecode%4;
      numBytesToDecode -= newBase64RemainderCount;
      if (numBytesToDecode > 0) {
	ptr[newBytesRead] = '\0';
	unsigned decodedSize;
	unsigned char* decodedBytes = base64Decode((char const*)(ptr-fBase64RemainderCount), numBytesToDecode, decodedSize);
#ifdef DEBUG
	fprintf(stderr, "Base64-decoded %d input bytes into %d new bytes:", numBytesToDecode, decodedSize);
	for (unsigned k = 0; k < decodedSize; ++k) fprintf(stderr, "%c", decodedBytes[k]);
	fprintf(stderr, "\n");
#endif
	
	// Copy the new decoded bytes in place of the old ones (we can do this because there are fewer decoded bytes than original):
	unsigned char* to = ptr-fBase64RemainderCount;
	for (unsigned i = 0; i < decodedSize; ++i) *to++ = decodedBytes[i];
	
	// Then copy any remaining (undecoded) bytes to the end:
	for (unsigned j = 0; j < newBase64RemainderCount; ++j) *to++ = (ptr-fBase64RemainderCount+numBytesToDecode)[j];
	
	newBytesRead = decodedSize - fBase64RemainderCount + newBase64RemainderCount;
	  // adjust to allow for the size of the new decoded data (+ remainder)
	delete[] decodedBytes;
      }
      fBase64RemainderCount = newBase64RemainderCount;
    }
    
    unsigned char* tmpPtr = fLastCRLF + 2;
    if (fBase64RemainderCount == 0) { // no more Base-64 bytes remain to be read/decoded
      // Look for the end of the message: <CR><LF><CR><LF>
      if (tmpPtr < fRequestBuffer) tmpPtr = fRequestBuffer;
      while (tmpPtr < &ptr[newBytesRead-1]) {
	if (*tmpPtr == '\r' && *(tmpPtr+1) == '\n') {
	  if (tmpPtr - fLastCRLF == 2) { // This is it:
	    endOfMsg = True;
	    break;
	  }
	  fLastCRLF = tmpPtr;
	}
	++tmpPtr;
      }
    }
    
    fRequestBufferBytesLeft -= newBytesRead;
    fRequestBytesAlreadySeen += newBytesRead;
    
    if (!endOfMsg) break; // subsequent reads will be needed to complete the request
    
    // Parse the request string into command name and 'CSeq', then handle the command:
    fRequestBuffer[fRequestBytesAlreadySeen] = '\0';
    char cmdName[RTSP_PARAM_STRING_MAX];
    char urlPreSuffix[RTSP_PARAM_STRING_MAX];
    char urlSuffix[RTSP_PARAM_STRING_MAX];
    char cseq[RTSP_PARAM_STRING_MAX];
    char sessionIdStr[RTSP_PARAM_STRING_MAX];
    unsigned contentLength = 0;
    fLastCRLF[2] = '\0'; // temporarily, for parsing
    Boolean parseSucceeded = parseRTSPRequestString((char*)fRequestBuffer, fLastCRLF+2 - fRequestBuffer,
						    cmdName, sizeof cmdName,
						    urlPreSuffix, sizeof urlPreSuffix,
						    urlSuffix, sizeof urlSuffix,
						    cseq, sizeof cseq,
						    sessionIdStr, sizeof sessionIdStr,
						    contentLength);
    fLastCRLF[2] = '\r'; // restore its value
    Boolean playAfterSetup = False;
    if (parseSucceeded) {
#ifdef DEBUG
      fprintf(stderr, "parseRTSPRequestString() succeeded, returning cmdName \"%s\", urlPreSuffix \"%s\", urlSuffix \"%s\", CSeq \"%s\", Content-Length %u, with %d bytes following the message.\n", cmdName, urlPreSuffix, urlSuffix, cseq, contentLength, ptr + newBytesRead - (tmpPtr + 2));
#endif
      // If there was a "Content-Length:" header, then make sure we've received all of the data that it specified:
      if (ptr + newBytesRead < tmpPtr + 2 + contentLength) break; // we still need more data; subsequent reads will give it to us 
      
      // If the request included a "Session:" id, and it refers to a client session that's
      // current ongoing, then use this command to indicate 'liveness' on that client session:
      Boolean const requestIncludedSessionId = sessionIdStr[0] != '\0';
      if (requestIncludedSessionId) {
	clientSession
	  = (RTSPServer::RTSPClientSession*)(fOurRTSPServer.lookupClientSession(sessionIdStr));
	if (clientSession != NULL) clientSession->noteLiveness();
      }
    
      // We now have a complete RTSP request.
      // Handle the specified command (beginning with commands that are session-independent):
      fCurrentCSeq = cseq;
      if (strcmp(cmdName, "OPTIONS") == 0) {
	// If the "OPTIONS" command included a "Session:" id for a session that doesn't exist,
	// then treat this as an error:
	if (requestIncludedSessionId && clientSession == NULL) {
	  handleCmd_sessionNotFound();
	} else {
	  // Normal case:
	  handleCmd_OPTIONS();
	}
      } else if (urlPreSuffix[0] == '\0' && urlSuffix[0] == '*' && urlSuffix[1] == '\0') {
	// The special "*" URL means: an operation on the entire server.  This works only for GET_PARAMETER and SET_PARAMETER:
	if (strcmp(cmdName, "GET_PARAMETER") == 0) {
	  handleCmd_GET_PARAMETER((char const*)fRequestBuffer);
	} else if (strcmp(cmdName, "SET_PARAMETER") == 0) {
	  handleCmd_SET_PARAMETER((char const*)fRequestBuffer);
	} else {
	  handleCmd_notSupported();
	}
      } else if (strcmp(cmdName, "DESCRIBE") == 0) {
	handleCmd_DESCRIBE(urlPreSuffix, urlSuffix, (char const*)fRequestBuffer);
      } else if (strcmp(cmdName, "SETUP") == 0) {
	Boolean areAuthenticated = True;

	if (!requestIncludedSessionId) {
	  // No session id was present in the request.
	  // So create a new "RTSPClientSession" object for this request.

	  // But first, make sure that we're authenticated to perform this command:
	  char urlTotalSuffix[2*RTSP_PARAM_STRING_MAX];
	      // enough space for urlPreSuffix/urlSuffix'\0'
	  urlTotalSuffix[0] = '\0';
	  if (urlPreSuffix[0] != '\0') {
	    strcat(urlTotalSuffix, urlPreSuffix);
	    strcat(urlTotalSuffix, "/");
	  }
	  strcat(urlTotalSuffix, urlSuffix);
	  if (authenticationOK("SETUP", urlTotalSuffix, (char const*)fRequestBuffer)) {
	    clientSession
	      = (RTSPServer::RTSPClientSession*)fOurRTSPServer.createNewClientSessionWithId();
	  } else {
	    areAuthenticated = False;
	  }
	}
	if (clientSession != NULL) {
	  clientSession->handleCmd_SETUP(this, urlPreSuffix, urlSuffix, (char const*)fRequestBuffer);
	  playAfterSetup = clientSession->fStreamAfterSETUP;
	} else if (areAuthenticated) {
	  handleCmd_sessionNotFound();
	}
      } else if (strcmp(cmdName, "TEARDOWN") == 0
		 || strcmp(cmdName, "PLAY") == 0
		 || strcmp(cmdName, "PAUSE") == 0
		 || strcmp(cmdName, "GET_PARAMETER") == 0
		 || strcmp(cmdName, "SET_PARAMETER") == 0) {
	if (clientSession != NULL) {
	  clientSession->handleCmd_withinSession(this, cmdName, urlPreSuffix, urlSuffix, (char const*)fRequestBuffer);
	} else {
	  handleCmd_sessionNotFound();
	}
      } else if (strcmp(cmdName, "REGISTER") == 0 || strcmp(cmdName, "DEREGISTER") == 0) {
	// Because - unlike other commands - an implementation of this command needs
	// the entire URL, we re-parse the command to get it:
	char* url = strDupSize((char*)fRequestBuffer);
	if (sscanf((char*)fRequestBuffer, "%*s %s", url) == 1) {
	  // Check for special command-specific parameters in a "Transport:" header:
	  Boolean reuseConnection, deliverViaTCP;
	  char* proxyURLSuffix;
	  parseTransportHeaderForREGISTER((const char*)fRequestBuffer, reuseConnection, deliverViaTCP, proxyURLSuffix);

	  handleCmd_REGISTER(cmdName, url, urlSuffix, (char const*)fRequestBuffer, reuseConnection, deliverViaTCP, proxyURLSuffix);
	  delete[] proxyURLSuffix;
	} else {
	  handleCmd_bad();
	}
	delete[] url;
      } else {
	// The command is one that we don't handle:
	handleCmd_notSupported();
      }
    } else {
#ifdef DEBUG
      fprintf(stderr, "parseRTSPRequestString() failed; checking now for HTTP commands (for RTSP-over-HTTP tunneling)...\n");
#endif
      // The request was not (valid) RTSP, but check for a special case: HTTP commands (for setting up RTSP-over-HTTP tunneling):
      char sessionCookie[RTSP_PARAM_STRING_MAX];
      char acceptStr[RTSP_PARAM_STRING_MAX];
      *fLastCRLF = '\0'; // temporarily, for parsing
      parseSucceeded = parseHTTPRequestString(cmdName, sizeof cmdName,
					      urlSuffix, sizeof urlPreSuffix,
					      sessionCookie, sizeof sessionCookie,
					      acceptStr, sizeof acceptStr);
      *fLastCRLF = '\r';
      if (parseSucceeded) {
#ifdef DEBUG
	fprintf(stderr, "parseHTTPRequestString() succeeded, returning cmdName \"%s\", urlSuffix \"%s\", sessionCookie \"%s\", acceptStr \"%s\"\n", cmdName, urlSuffix, sessionCookie, acceptStr);
#endif
	// Check that the HTTP command is valid for RTSP-over-HTTP tunneling: There must be a 'session cookie'.
	Boolean isValidHTTPCmd = True;
	if (strcmp(cmdName, "OPTIONS") == 0) {
	  handleHTTPCmd_OPTIONS();
	} else if (sessionCookie[0] == '\0') {
	  // There was no "x-sessioncookie:" header.  If there was an "Accept: application/x-rtsp-tunnelled" header,
	  // then this is a bad tunneling request.  Otherwise, assume that it's an attempt to access the stream via HTTP.
	  if (strcmp(acceptStr, "application/x-rtsp-tunnelled") == 0) {
	    isValidHTTPCmd = False;
	  } else {
	    handleHTTPCmd_StreamingGET(urlSuffix, (char const*)fRequestBuffer);
	  }
	} else if (strcmp(cmdName, "GET") == 0) {
	  handleHTTPCmd_TunnelingGET(sessionCookie);
	} else if (strcmp(cmdName, "POST") == 0) {
	  // We might have received additional data following the HTTP "POST" command - i.e., the first Base64-encoded RTSP command.
	  // Check for this, and handle it if it exists:
	  unsigned char const* extraData = fLastCRLF+4;
	  unsigned extraDataSize = &fRequestBuffer[fRequestBytesAlreadySeen] - extraData;
	  if (handleHTTPCmd_TunnelingPOST(sessionCookie, extraData, extraDataSize)) {
	    // We don't respond to the "POST" command, and we go away:
	    fIsActive = False;
	    break;
	  }
	} else {
	  isValidHTTPCmd = False;
	}
	if (!isValidHTTPCmd) {
	  handleHTTPCmd_notSupported();
	}
      } else {
#ifdef DEBUG
	fprintf(stderr, "parseHTTPRequestString() failed!\n");
#endif
	handleCmd_bad();
      }
    }
    
#ifdef DEBUG
    fprintf(stderr, "sending response: %s", fResponseBuffer);
#endif
    send(fClientOutputSocket, (char const*)fResponseBuffer, strlen((char*)fResponseBuffer), 0);
    
    if (playAfterSetup) {
      // The client has asked for streaming to commence now, rather than after a
      // subsequent "PLAY" command.  So, simulate the effect of a "PLAY" command:
      clientSession->handleCmd_withinSession(this, "PLAY", urlPreSuffix, urlSuffix, (char const*)fRequestBuffer);
    }
    
    // Check whether there are extra bytes remaining in the buffer, after the end of the request (a rare case).
    // If so, move them to the front of our buffer, and keep processing it, because it might be a following, pipelined request.
    unsigned requestSize = (fLastCRLF+4-fRequestBuffer) + contentLength;
    numBytesRemaining = fRequestBytesAlreadySeen - requestSize;
    resetRequestBuffer(); // to prepare for any subsequent request
    
    if (numBytesRemaining > 0) {
      memmove(fRequestBuffer, &fRequestBuffer[requestSize], numBytesRemaining);
      newBytesRead = numBytesRemaining;
    }
  } while (numBytesRemaining > 0);
  
  --fRecursionCount;
  if (!fIsActive) {
    if (fRecursionCount > 0) closeSockets(); else delete this;
    // Note: The "fRecursionCount" test is for a pathological situation where we reenter the event loop and get called recursively
    // while handling a command (e.g., while handling a "DESCRIBE", to get a SDP description).
    // In such a case we don't want to actually delete ourself until we leave the outermost call.
  }
}

命令交互

Describe

客户端Describe request
在这里插入图片描述
服务器Describe response
在这里插入图片描述
response中携带的SDP

v=0 
o=- 1592096473074819 1 IN IP4 10.0.2.103 
s=Session streamed by "testOnDemandRTSPServer" 
i=h264ESVideoTest 
t=0 0 
a=tool:LIVE555 Streaming Media v2017.10.28 
a=type:broadcast 
a=control:* 
a=range:npt=0- 
a=x-qt-text-nam:Session streamed by "testOnDemandRTSPServer" 
a=x-qt-text-inf:h264ESVideoTest 
m=video 0 RTP/AVP 96 
c=IN IP4 0.0.0.0 
b=AS:500 
a=rtpmap:96 H264/90000 
a=fmtp:96 packetization-mode=1;profile-level-id=640028;sprop-parameter-sets=J2QAKKwa1AoAt03AQEBQAAA+gAAHUwBA,KO4E8sA= 
a=control:track1 
SDP

SDP是会话描述协议。一个SDP的返回表示一个会话(session), 以v=开头,会话内部会有一个或多个media信息,以m=开头,a=用来表示会话或者媒体的属性。
a=control:

a=control属性可用于session或media
如果在media中存在,则内容为控制指定流的URL。在Setup时就是用这个URL。
如果在session中存在,则内容为用于aggregate control的URL。在PLAY,TEARDOWN时使用。
a=control的值可能是相对的URL,也可能是绝对的URL。判断相对还是绝对的方法参见RFC1808。(估计就是是否有串“://”)
如果a=control的值是相对的则从Content-Base,Content-Location,request URL中去获得基路径。如果a=control的值为*,则使用的值即为Content-Base,Content-Location,request URL的值。
request URL指DESCRIBE时的url。因为a=control是在DESCRIBE后返回的sdp中存放的。
Describe 响应

DESCRIBE 响应详解
下面补充上文中描述不够详细的地方。
调用H264VideoFileServerMediaSubsession::sdplines()产生媒体相关的SDP信息,此函数的实现再父类OnDemandServerMediaSubsession中;OnDemandServerMediaSubsession::sdpLines() 中调用两个重要的虚函数createNewStreamSource(), createNewRTPSink(),它们的实现在H264VideoFileServerMediaSubsession类中。
H264VideoFileServerMediaSubsession ::createNewStreamSource() 返回H264VideoStreamFramer对象。
H264VideoFileServerMediaSubsession ::createNewRTPSink() 返回H264VideoRTPSink 对象。

H264VideoFileServerMediaSubsession ::createNewRTPSink()

createNewRTPSink()中比较重要的初始化参数,创建outputpacketbuffer,其中很重要的两个参数fpreferred 最佳payloadsize的默认值是 RTP_PAYLOAD_PREFERRED_SIZE = 1000,fMax 最大包长度是
RTP_PAYLOAD_MAX_SIZE =1456。

#ifndef RTP_PAYLOAD_MAX_SIZE
#define RTP_PAYLOAD_MAX_SIZE 1456
      // Default max packet size (1500, minus allowance for IP, UDP, UMTP headers)
      // (Also, make it a multiple of 4 bytes, just in case that matters.)
#endif
#ifndef RTP_PAYLOAD_PREFERRED_SIZE
#define RTP_PAYLOAD_PREFERRED_SIZE ((RTP_PAYLOAD_MAX_SIZE) < 1000 ? (RTP_PAYLOAD_MAX_SIZE) : 1000)
#endif

MultiFramedRTPSink::MultiFramedRTPSink(UsageEnvironment& env,
				       Groupsock* rtpGS,
				       unsigned char rtpPayloadType,
				       unsigned rtpTimestampFrequency,
				       char const* rtpPayloadFormatName,
				       unsigned numChannels)
  : RTPSink(env, rtpGS, rtpPayloadType, rtpTimestampFrequency,
	    rtpPayloadFormatName, numChannels),
    fOutBuf(NULL), fCurFragmentationOffset(0), fPreviousFrameEndedFragmentation(False),
    fOnSendErrorFunc(NULL), fOnSendErrorData(NULL) {
  setPacketSizes((RTP_PAYLOAD_PREFERRED_SIZE), (RTP_PAYLOAD_MAX_SIZE));
}
void MultiFramedRTPSink::setPacketSizes(unsigned preferredPacketSize,
					unsigned maxPacketSize) {
  if (preferredPacketSize > maxPacketSize || preferredPacketSize == 0) return;
      // sanity check

  delete fOutBuf;
  fOutBuf = new OutPacketBuffer(preferredPacketSize, maxPacketSize);
  fOurMaxPacketSize = maxPacketSize; // save value, in case subclasses need it
}

// OutPacketBuffer //

unsigned OutPacketBuffer::maxSize = 600000; // by default

OutPacketBuffer
::OutPacketBuffer(unsigned preferredPacketSize, unsigned maxPacketSize, unsigned maxBufferSize)
  : fPreferred(preferredPacketSize), fMax(maxPacketSize),
    fOverflowDataSize(0) {
  if (maxBufferSize == 0) maxBufferSize = maxSize;
  unsigned maxNumPackets = (maxBufferSize + (maxPacketSize-1))/maxPacketSize;
  fLimit = maxNumPackets*maxPacketSize;
  fBuf = new unsigned char[fLimit];
  resetPacketStart();
  resetOffset();
  resetOverflowData();
}

获取sdpline

char const*
OnDemandServerMediaSubsession::sdpLines() {
  if (fSDPLines == NULL) {
    // We need to construct a set of SDP lines that describe this
    // subsession (as a unicast stream).  To do so, we first create
    // dummy (unused) source and "RTPSink" objects,
    // whose parameters we use for the SDP lines:
    unsigned estBitrate;
    //创建H264VideoStreamFramer对象
    FramedSource* inputSource = createNewStreamSource(0, estBitrate);
    if (inputSource == NULL) return NULL; // file not found

    struct in_addr dummyAddr;
    dummyAddr.s_addr = 0;
    Groupsock* dummyGroupsock = createGroupsock(dummyAddr, 0);
    unsigned char rtpPayloadType = 96 + trackNumber()-1; // if dynamic
    RTPSink* dummyRTPSink = createNewRTPSink(dummyGroupsock, rtpPayloadType, inputSource);
    if (dummyRTPSink != NULL && dummyRTPSink->estimatedBitrate() > 0) estBitrate = dummyRTPSink->estimatedBitrate();

    setSDPLinesFromRTPSink(dummyRTPSink, inputSource, estBitrate);
    Medium::close(dummyRTPSink);
    delete dummyGroupsock;
    closeStreamSource(inputSource);
  }

  return fSDPLines;
}

创建H264VideoStreamFramer对象时,同时创建关联的H264or5VideoStreamParser对象,H264VideoStreamFramer还关联ByteStreamFileSource对象。
OnDemandServerMediaSubsession::setSDPLinesFromRTPSink函数,在这个函数中,我们通过创建的FramedSource对象和RTPSink对象将文件播放一段以便产生出sdp信息。在此,我要插一下Live555 RTSPServer播放媒体资源的一个大体流程:RTSPServer使用RTPSink获得和保存RTP包,RTPSink不断地向FramedSource请求帧数据,FramedSource取得帧数据后就调用回调函数把数据给RTPSink处理,RTPSink在回调函数中将数据发送给客户端。

void OnDemandServerMediaSubsession
::setSDPLinesFromRTPSink(RTPSink* rtpSink, FramedSource* inputSource, unsigned estBitrate) {
  if (rtpSink == NULL) return;

  char const* mediaType = rtpSink->sdpMediaType();
  unsigned char rtpPayloadType = rtpSink->rtpPayloadType();
  AddressString ipAddressStr(fServerAddressForSDP);
  char* rtpmapLine = rtpSink->rtpmapLine();
  char const* rtcpmuxLine = fMultiplexRTCPWithRTP ? "a=rtcp-mux\r\n" : "";
  char const* rangeLine = rangeSDPLine();
  char const* auxSDPLine = getAuxSDPLine(rtpSink, inputSource);
  if (auxSDPLine == NULL) auxSDPLine = "";

  char const* const sdpFmt =
    "m=%s %u RTP/AVP %d\r\n"
    "c=IN IP4 %s\r\n"
    "b=AS:%u\r\n"
    "%s"
    "%s"
    "%s"
    "%s"
    "a=control:%s\r\n";
  unsigned sdpFmtSize = strlen(sdpFmt)
    + strlen(mediaType) + 5 /* max short len */ + 3 /* max char len */
    + strlen(ipAddressStr.val())
    + 20 /* max int len */
    + strlen(rtpmapLine)
    + strlen(rtcpmuxLine)
    + strlen(rangeLine)
    + strlen(auxSDPLine)
    + strlen(trackId());
  char* sdpLines = new char[sdpFmtSize];
  sprintf(sdpLines, sdpFmt,
	  mediaType, // m= <media>
	  fPortNumForSDP, // m= <port>
	  rtpPayloadType, // m= <fmt list>
	  ipAddressStr.val(), // c= address
	  estBitrate, // b=AS:<bandwidth>
	  rtpmapLine, // a=rtpmap:... (if present)
	  rtcpmuxLine, // a=rtcp-mux:... (if present)
	  rangeLine, // a=range:... (if present)
	  auxSDPLine, // optional extra SDP line
	  trackId()); // a=control:<track-id>
  delete[] (char*)rangeLine; delete[] rtpmapLine;

  fSDPLines = strDup(sdpLines);
  delete[] sdpLines;
}
char const* H264VideoFileServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) {
  if (fAuxSDPLine != NULL) return fAuxSDPLine; // it's already been set up (for a previous client)

  if (fDummyRTPSink == NULL) { // we're not already setting it up for another, concurrent stream
    // Note: For H264 video files, the 'config' information ("profile-level-id" and "sprop-parameter-sets") isn't known
    // until we start reading the file.  This means that "rtpSink"s "auxSDPLine()" will be NULL initially,
    // and we need to start reading data from our file until this changes.
    fDummyRTPSink = rtpSink;

    // Start reading the file:
    fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this);

    // Check whether the sink's 'auxSDPLine()' is ready:
    checkForAuxSDPLine(this);
  }

  envir().taskScheduler().doEventLoop(&fDoneFlag);

  return fAuxSDPLine;
}

播放的具体过程参考播放过程
checkForAuxSDPLine()函数检查是否获取到附加sdpline,如果没有获取到,程序就停在这里进入事件调度主循环,不断地播放和检查。获取到时跳出主循环getAuxSDPLine()函数返回。

static void checkForAuxSDPLine(void* clientData) {
  H264VideoFileServerMediaSubsession* subsess = (H264VideoFileServerMediaSubsession*)clientData;
  subsess->checkForAuxSDPLine1();
}

void H264VideoFileServerMediaSubsession::checkForAuxSDPLine1() {
  nextTask() = NULL;

  char const* dasl;
  if (fAuxSDPLine != NULL) {
    // Signal the event loop that we're done:
    setDoneFlag();
  } else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL) {
    fAuxSDPLine = strDup(dasl);
    fDummyRTPSink = NULL;

    // Signal the event loop that we're done:
    setDoneFlag();
  } else if (!fDoneFlag) {
    // try again after a brief delay:
    int uSecsToDelay = 100000; // 100 ms
    nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay,
			      (TaskFunc*)checkForAuxSDPLine, this);
  }
}
char const* H264VideoRTPSink::auxSDPLine() {
  // Generate a new "a=fmtp:" line each time, using our SPS and PPS (if we have them),
  // otherwise parameters from our framer source (in case they've changed since the last time that
  // we were called):
  H264or5VideoStreamFramer* framerSource = NULL;
  u_int8_t* vpsDummy = NULL; unsigned vpsDummySize = 0;
  u_int8_t* sps = fSPS; unsigned spsSize = fSPSSize;
  u_int8_t* pps = fPPS; unsigned ppsSize = fPPSSize;
  if (sps == NULL || pps == NULL) {
    // We need to get SPS and PPS from our framer source:
    if (fOurFragmenter == NULL) return NULL; // we don't yet have a fragmenter (and therefore not a source)
    framerSource = (H264or5VideoStreamFramer*)(fOurFragmenter->inputSource());
    if (framerSource == NULL) return NULL; // we don't yet have a source

    framerSource->getVPSandSPSandPPS(vpsDummy, vpsDummySize, sps, spsSize, pps, ppsSize);
    if (sps == NULL || pps == NULL) return NULL; // our source isn't ready
  }

  // Set up the "a=fmtp:" SDP line for this stream:
  u_int8_t* spsWEB = new u_int8_t[spsSize]; // "WEB" means "Without Emulation Bytes"
  unsigned spsWEBSize = removeH264or5EmulationBytes(spsWEB, spsSize, sps, spsSize);
  if (spsWEBSize < 4) { // Bad SPS size => assume our source isn't ready
    delete[] spsWEB;
    return NULL;
  }
  u_int32_t profileLevelId = (spsWEB[1]<<16) | (spsWEB[2]<<8) | spsWEB[3];
  delete[] spsWEB;

  char* sps_base64 = base64Encode((char*)sps, spsSize);
  char* pps_base64 = base64Encode((char*)pps, ppsSize);

  char const* fmtpFmt =
    "a=fmtp:%d packetization-mode=1"
    ";profile-level-id=%06X"
    ";sprop-parameter-sets=%s,%s\r\n";
  unsigned fmtpFmtSize = strlen(fmtpFmt)
    + 3 /* max char len */
    + 6 /* 3 bytes in hex */
    + strlen(sps_base64) + strlen(pps_base64);
  char* fmtp = new char[fmtpFmtSize];
  sprintf(fmtp, fmtpFmt,
          rtpPayloadType(),
	  profileLevelId,
          sps_base64, pps_base64);

  delete[] sps_base64;
  delete[] pps_base64;

  delete[] fFmtpSDPLine; fFmtpSDPLine = fmtp;
  return fFmtpSDPLine;
}

SETUP

RTSP客户端 —> RTSP服务器端  SETUP命令      请求建立对某个媒体资源的连接
RTSP服务器端 —> RTSP客户端   回复SETUP命令    回复建立连接的结果
SETUP请求服务器建立媒体session
request

SETUP rtsp://172.29.64.90:8554/h264ESVideoTest/track1 RTSP/1.0
CSeq: 4
User-Agent: LibVLC/3.0.8 (LIVE555 Streaming Media v2016.11.28)
Transport: RTP/AVP;unicast;client_port=57020-57021

response

RTSP/1.0 200 OK
CSeq: 4
Date: Mon, Jun 22 2020 06:00:30 GMT
Transport: RTP/AVP;unicast;destination=172.29.65.253;source=172.29.64.90;client_port=57020-57021;server_port=6970-6971
Session: B1BFFAF6;timeout=65

其中Transport 头value的格式如下:

Transport = "Transport" ":"
1\#transport-spec
transport-spec = transport-protocol/profile[/lower-transport]
*parameter
transport-protocol = "RTP"
profile = "AVP"
lower-transport = "TCP" | "UDP"
parameter = ( "unicast" | "multicast" )
| ";" "destination" [ "=" address ]
| ";" "interleaved" "=" channel [ "-" channel ]
| ";" "append"
| ";" "ttl" "=" ttl
| ";" "layers" "=" 1*DIGIT
| ";" "port" "=" port [ "-" port ]
| ";" "client_port" "=" port [ "-" port ]
| ";" "server_port" "=" port [ "-" port ]
| ";" "ssrc" "=" ssrc
| ";" "mode" = <"> 1\#mode <">
ttl = 1*3(DIGIT)
port = 1*5(DIGIT)
ssrc = 8*8(HEX)
channel = 1*3(DIGIT)
address = host
mode = <"> *Method <"> | Method

一个Transport头只用来描述一个RTP 流。session 头包含session-id和超时时间。

 (strcmp(cmdName, "SETUP") == 0) {
	Boolean areAuthenticated = True;

	if (!requestIncludedSessionId) {
	  // No session id was present in the request.
	  // So create a new "RTSPClientSession" object for this request.

	  // But first, make sure that we're authenticated to perform this command:
	  char urlTotalSuffix[2*RTSP_PARAM_STRING_MAX];
	      // enough space for urlPreSuffix/urlSuffix'\0'
	  urlTotalSuffix[0] = '\0';
	  if (urlPreSuffix[0] != '\0') {
	    strcat(urlTotalSuffix, urlPreSuffix);
	    strcat(urlTotalSuffix, "/");
	  }
	  strcat(urlTotalSuffix, urlSuffix);
	  if (authenticationOK("SETUP", urlTotalSuffix, (char const*)fRequestBuffer)) {
	    clientSession
	      = (RTSPServer::RTSPClientSession*)fOurRTSPServer.createNewClientSessionWithId();
	  } else {
	    areAuthenticated = False;
	  }
	}
	if (clientSession != NULL) {
	  clientSession->handleCmd_SETUP(this, urlPreSuffix, urlSuffix, (char const*)fRequestBuffer);
	  playAfterSetup = clientSession->fStreamAfterSETUP;
	} else if (areAuthenticated) {
	  handleCmd_sessionNotFound();
	}
      }

收到SETUP命令时创建ClientSession,handleCmd_SETUP开始处理SETUP请求。
剩下的主要动作在getStreamParameters中

  • createNewStreamSource()
  • 两次createGroupsock() 创建server端udp socket
  • createNewRTPSink()
void OnDemandServerMediaSubsession
::getStreamParameters(unsigned clientSessionId,
		      netAddressBits clientAddress,
		      Port const& clientRTPPort,
		      Port const& clientRTCPPort,
		      int tcpSocketNum,
		      unsigned char rtpChannelId,
		      unsigned char rtcpChannelId,
		      netAddressBits& destinationAddress,
		      u_int8_t& /*destinationTTL*/,
		      Boolean& isMulticast,
		      Port& serverRTPPort,
		      Port& serverRTCPPort,
		      void*& streamToken) {
  if (destinationAddress == 0) destinationAddress = clientAddress;
  struct in_addr destinationAddr; destinationAddr.s_addr = destinationAddress;
  isMulticast = False;

  if (fLastStreamToken != NULL && fReuseFirstSource) {
    // Special case: Rather than creating a new 'StreamState',
    // we reuse the one that we've already created:
    serverRTPPort = ((StreamState*)fLastStreamToken)->serverRTPPort();
    serverRTCPPort = ((StreamState*)fLastStreamToken)->serverRTCPPort();
    ++((StreamState*)fLastStreamToken)->referenceCount();
    streamToken = fLastStreamToken;
  } else {
    // Normal case: Create a new media source:
    unsigned streamBitrate;
    FramedSource* mediaSource
      = createNewStreamSource(clientSessionId, streamBitrate);

    // Create 'groupsock' and 'sink' objects for the destination,
    // using previously unused server port numbers:
    RTPSink* rtpSink = NULL;
    BasicUDPSink* udpSink = NULL;
    Groupsock* rtpGroupsock = NULL;
    Groupsock* rtcpGroupsock = NULL;

    if (clientRTPPort.num() != 0 || tcpSocketNum >= 0) { // Normal case: Create destinations
      portNumBits serverPortNum;
      if (clientRTCPPort.num() == 0) {
	// We're streaming raw UDP (not RTP). Create a single groupsock:
	NoReuse dummy(envir()); // ensures that we skip over ports that are already in use
	for (serverPortNum = fInitialPortNum; ; ++serverPortNum) {
	  struct in_addr dummyAddr; dummyAddr.s_addr = 0;
	  
	  serverRTPPort = serverPortNum;
	  rtpGroupsock = createGroupsock(dummyAddr, serverRTPPort);
	  if (rtpGroupsock->socketNum() >= 0) break; // success
	}

	udpSink = BasicUDPSink::createNew(envir(), rtpGroupsock);
      } else {
	// Normal case: We're streaming RTP (over UDP or TCP).  Create a pair of
	// groupsocks (RTP and RTCP), with adjacent port numbers (RTP port number even).
	// (If we're multiplexing RTCP and RTP over the same port number, it can be odd or even.)
	NoReuse dummy(envir()); // ensures that we skip over ports that are already in use
	for (portNumBits serverPortNum = fInitialPortNum; ; ++serverPortNum) {
	  struct in_addr dummyAddr; dummyAddr.s_addr = 0;

	  serverRTPPort = serverPortNum;
	  rtpGroupsock = createGroupsock(dummyAddr, serverRTPPort);
	  if (rtpGroupsock->socketNum() < 0) {
	    delete rtpGroupsock;
	    continue; // try again
	  }

	  if (fMultiplexRTCPWithRTP) {
	    // Use the RTP 'groupsock' object for RTCP as well:
	    serverRTCPPort = serverRTPPort;
	    rtcpGroupsock = rtpGroupsock;
	  } else {
	    // Create a separate 'groupsock' object (with the next (odd) port number) for RTCP:
	    serverRTCPPort = ++serverPortNum;
	    rtcpGroupsock = createGroupsock(dummyAddr, serverRTCPPort);
	    if (rtcpGroupsock->socketNum() < 0) {
	      delete rtpGroupsock;
	      delete rtcpGroupsock;
	      continue; // try again
	    }
	  }

	  break; // success
	}

	unsigned char rtpPayloadType = 96 + trackNumber()-1; // if dynamic
	rtpSink = createNewRTPSink(rtpGroupsock, rtpPayloadType, mediaSource);
	if (rtpSink != NULL && rtpSink->estimatedBitrate() > 0) streamBitrate = rtpSink->estimatedBitrate();
      }

      // Turn off the destinations for each groupsock.  They'll get set later
      // (unless TCP is used instead):
      if (rtpGroupsock != NULL) rtpGroupsock->removeAllDestinations();
      if (rtcpGroupsock != NULL) rtcpGroupsock->removeAllDestinations();

      if (rtpGroupsock != NULL) {
	// Try to use a big send buffer for RTP -  at least 0.1 second of
	// specified bandwidth and at least 50 KB
	unsigned rtpBufSize = streamBitrate * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes
	if (rtpBufSize < 50 * 1024) rtpBufSize = 50 * 1024;
	increaseSendBufferTo(envir(), rtpGroupsock->socketNum(), rtpBufSize);
      }
    }

    // Set up the state of the stream.  The stream will get started later:
    streamToken = fLastStreamToken
      = new StreamState(*this, serverRTPPort, serverRTCPPort, rtpSink, udpSink,
			streamBitrate, mediaSource,
			rtpGroupsock, rtcpGroupsock);
  }

  // Record these destinations as being for this client session id:
  // key 为sessionid, value是 客户端目的地址,客户rtpport 客户端rtcpport
  Destinations* destinations;
  if (tcpSocketNum < 0) { // UDP
    destinations = new Destinations(destinationAddr, clientRTPPort, clientRTCPPort);
  } else { // TCP
    destinations = new Destinations(tcpSocketNum, rtpChannelId, rtcpChannelId);
  }
  fDestinationsHashTable->Add((char const*)clientSessionId, destinations);
}

PLAY

request

PLAY rtsp://10.0.15.50:8554/h264ESVideoTest/ RTSP/1.0
CSeq: 5
User-Agent: LibVLC/3.0.8 (LIVE555 Streaming Media v2016.11.28)
Session: B6A584EF
Range: npt=0.000-

response

RTSP/1.0 200 OK
CSeq: 5
Date: Tue, Jun 30 2020 08:46:58 GMT
Range: npt=0.000-
Session: B6A584EF
RTP-Info: url=rtsp://10.0.15.50:8554/h264ESVideoTest/track1;seq=38987;rtptime=380618412

经过上面的步骤后,服务器端就已经准备好向客户端传送RTP包以及RTCP包了,等待客户端发送PLAY命令后开始传输。服务器端收到PLAY命令后,调用RTSPClientSession::handleCmd_PLAY函数处理。在handleCmd_PLAY函数中,首先提取Scale,表示客户端期望的播放速度(正常、快进、快退),然后提取Range,表示客户端期望的播放起止范围,根据这两个参数分别调用ServerMediaSubsession::setStreamScale函数和ServerMediaSubsession::seekStream函数,最后调用ServerMediaSubsession::startStream函数开始传输数据。实际调用的是OnDemandServerMediaSubsession::startStream函数,看一下这个函数的内容:


void OnDemandServerMediaSubsession::startStream(unsigned clientSessionId,
						void* streamToken,
						TaskFunc* rtcpRRHandler,
						void* rtcpRRHandlerClientData,
						unsigned short& rtpSeqNum,
						unsigned& rtpTimestamp,
						ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler,
						void* serverRequestAlternativeByteHandlerClientData) {
  StreamState* streamState = (StreamState*)streamToken;
  Destinations* destinations
    = (Destinations*)(fDestinationsHashTable->Lookup((char const*)clientSessionId));
  if (streamState != NULL) {
    streamState->startPlaying(destinations, clientSessionId,
			      rtcpRRHandler, rtcpRRHandlerClientData,
			      serverRequestAlternativeByteHandler, serverRequestAlternativeByteHandlerClientData);
    RTPSink* rtpSink = streamState->rtpSink(); // alias
    if (rtpSink != NULL) {
      rtpSeqNum = rtpSink->currentSeqNo();
      rtpTimestamp = rtpSink->presetNextTimestamp();
    }
  }
}

在OnDemandServerMediaSubsessionstartStream函数中,主要是调用了StreamState::startPlaying函数,来看一下这个函数:

void StreamState
::startPlaying(Destinations* dests, unsigned clientSessionId,
	       TaskFunc* rtcpRRHandler, void* rtcpRRHandlerClientData,
	       ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler,
	       void* serverRequestAlternativeByteHandlerClientData) {
  if (dests == NULL) return;

  if (fRTCPInstance == NULL && fRTPSink != NULL) {
    // Create (and start) a 'RTCP instance' for this RTP sink:
    fRTCPInstance = fMaster.createRTCP(fRTCPgs, fTotalBW, (unsigned char*)fMaster.fCNAME, fRTPSink);
        // Note: This starts RTCP running automatically
    fRTCPInstance->setAppHandler(fMaster.fAppHandlerTask, fMaster.fAppHandlerClientData);
  }

  if (dests->isTCP) {
    // Change RTP and RTCP to use the TCP socket instead of UDP:
    if (fRTPSink != NULL) {
      fRTPSink->addStreamSocket(dests->tcpSocketNum, dests->rtpChannelId);
      RTPInterface
	::setServerRequestAlternativeByteHandler(fRTPSink->envir(), dests->tcpSocketNum,
						 serverRequestAlternativeByteHandler, serverRequestAlternativeByteHandlerClientData);
        // So that we continue to handle RTSP commands from the client
    }
    if (fRTCPInstance != NULL) {
      fRTCPInstance->addStreamSocket(dests->tcpSocketNum, dests->rtcpChannelId);
      fRTCPInstance->setSpecificRRHandler(dests->tcpSocketNum, dests->rtcpChannelId,
					  rtcpRRHandler, rtcpRRHandlerClientData);
    }
  } else {
    // Tell the RTP and RTCP 'groupsocks' about this destination
    // (in case they don't already have it):
    if (fRTPgs != NULL) fRTPgs->addDestination(dests->addr, dests->rtpPort, clientSessionId);
    if (fRTCPgs != NULL && !(fRTCPgs == fRTPgs && dests->rtcpPort.num() == dests->rtpPort.num())) {
      fRTCPgs->addDestination(dests->addr, dests->rtcpPort, clientSessionId);
    }
    if (fRTCPInstance != NULL) {
      fRTCPInstance->setSpecificRRHandler(dests->addr.s_addr, dests->rtcpPort,
					  rtcpRRHandler, rtcpRRHandlerClientData);
    }
  }

  if (fRTCPInstance != NULL) {
    // Hack: Send an initial RTCP "SR" packet, before the initial RTP packet, so that receivers will (likely) be able to
    // get RTCP-synchronized presentation times immediately:
    fRTCPInstance->sendReport();
  }

  if (!fAreCurrentlyPlaying && fMediaSource != NULL) {
    if (fRTPSink != NULL) {
      fRTPSink->startPlaying(*fMediaSource, afterPlayingStreamState, this);
      fAreCurrentlyPlaying = True;
    } else if (fUDPSink != NULL) {
      fUDPSink->startPlaying(*fMediaSource, afterPlayingStreamState, this);
      fAreCurrentlyPlaying = True;
    }
  }
}

在StreamState::startPlaying函数中,将客户端添加到目的客户端列表中去,然后调用RTPSink::startPlaying函数,实际调用的是MediaSink::startPlaying函数.

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值