live555实现ffmpeg解码H264的rtsp流

转载: http://blog.csdn.net/huguohu2006/article/details/7743119
 
由于需要实现一个解码H264的rtsp流的web客户端。我首先想到的是live555+ffmpeg。live555用于接收rtsp流,ffmpeg用于解码H264用于显示。看了一下live555发现里面的例子里只有一个openrtsp的例子有点想象,但是那个只是接收rtsp流存在一个文件中。我先尝试写了一个ffmpeg解码H264文件的程序,调试通过。现在只要把live555的例子改一下就可以了,把两个程序联合起来就可以了。这里主要的关键点是找到openrtsp写入文件的地方,只需将这个地方的数据获取到解码显示就可以了。

由于项目忙,也只能抽出时间来记录一下。

main函数在playCommon.cpp。main()的流程比较简单,跟服务端差别不大:建立任务计划对象--建立环境对象--处理用户输入的参数(RTSP地址)--创建RTSPClient实例--发出第一个RTSP请求(可能是OPTIONS也可能是DESCRIBE)--进入Loop。


我们主要来看看创建RTPSource在函数createSourceObjects()中,看一下:

  1. Boolean MediaSubsession::createSourceObjects(int useSpecialRTPoffset) {  
  2.   do {  
  3.     // First, check "fProtocolName"   
  4.     if (strcmp(fProtocolName, "UDP") == 0) {  
  5.       // A UDP-packetized stream (*not* a RTP stream)   
  6.       fReadSource = BasicUDPSource::createNew(env(), fRTPSocket);  
  7.       fRTPSource = NULL; // Note!   
  8.         
  9.       if (strcmp(fCodecName, "MP2T") == 0) { // MPEG-2 Transport Stream   
  10.     fReadSource = MPEG2TransportStreamFramer::createNew(env(), fReadSource);  
  11.     // this sets "durationInMicroseconds" correctly, based on the PCR values   
  12.       }  
  13.     } else {  
  14.       // Check "fCodecName" against the set of codecs that we support,   
  15.       // and create our RTP source accordingly   
  16.       // (Later make this code more efficient, as this set grows #####)   
  17.       // (Also, add more fmts that can be implemented by SimpleRTPSource#####)   
  18.       Boolean createSimpleRTPSource = False; // by default; can be changed below   
  19.       Boolean doNormalMBitRule = False; // default behavior if "createSimpleRTPSource" is True   
  20.       if (strcmp(fCodecName, "QCELP") == 0) { // QCELP audio   
  21.     fReadSource =  
  22.       QCELPAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource,  
  23.                      fRTPPayloadFormat,  
  24.                      fRTPTimestampFrequency);  
  25.     // Note that fReadSource will differ from fRTPSource in this case   
  26.       } else if (strcmp(fCodecName, "AMR") == 0) { // AMR audio (narrowband)   
  27.     fReadSource =  
  28.       AMRAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource,  
  29.                        fRTPPayloadFormat, 0 /*isWideband*/,  
  30.                        fNumChannels, fOctetalign, fInterleaving,  
  31.                        fRobustsorting, fCRC);  
  32.     // Note that fReadSource will differ from fRTPSource in this case   
  33.       } else if (strcmp(fCodecName, "AMR-WB") == 0) { // AMR audio (wideband)   
  34.     fReadSource =  
  35.       AMRAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource,  
  36.                        fRTPPayloadFormat, 1 /*isWideband*/,  
  37.                        fNumChannels, fOctetalign, fInterleaving,  
  38.                        fRobustsorting, fCRC);  
  39.     // Note that fReadSource will differ from fRTPSource in this case   
  40.       } else if (strcmp(fCodecName, "MPA") == 0) { // MPEG-1 or 2 audio   
  41.     fReadSource = fRTPSource  
  42.       = MPEG1or2AudioRTPSource::createNew(env(), fRTPSocket,  
  43.                           fRTPPayloadFormat,  
  44.                           fRTPTimestampFrequency);  
  45.       } else if (strcmp(fCodecName, "MPA-ROBUST") == 0) { // robust MP3 audio   
  46.     fReadSource = fRTPSource  
  47.       = MP3ADURTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,  
  48.                        fRTPTimestampFrequency);  
  49.     if (fRTPSource == NULL) break;  
  50.       
  51.     if (!fReceiveRawMP3ADUs) {  
  52.       // Add a filter that deinterleaves the ADUs after depacketizing them:   
  53.       MP3ADUdeinterleaver* deinterleaver  
  54.         = MP3ADUdeinterleaver::createNew(env(), fRTPSource);  
  55.       if (deinterleaver == NULL) break;  
  56.       
  57.       // Add another filter that converts these ADUs to MP3 frames:   
  58.       fReadSource = MP3FromADUSource::createNew(env(), deinterleaver);  
  59.     }  
  60.       } else if (strcmp(fCodecName, "X-MP3-DRAFT-00") == 0) {  
  61.     // a non-standard variant of "MPA-ROBUST" used by RealNetworks   
  62.     // (one 'ADU'ized MP3 frame per packet; no headers)   
  63.     fRTPSource  
  64.       = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,  
  65.                        fRTPTimestampFrequency,  
  66.                        "audio/MPA-ROBUST" /*hack*/);  
  67.     if (fRTPSource == NULL) break;  
  68.       
  69.     // Add a filter that converts these ADUs to MP3 frames:   
  70.     fReadSource = MP3FromADUSource::createNew(env(), fRTPSource,  
  71.                           False /*no ADU header*/);  
  72.       } else if (strcmp(fCodecName, "MP4A-LATM") == 0) { // MPEG-4 LATM audio   
  73.     fReadSource = fRTPSource  
  74.       = MPEG4LATMAudioRTPSource::createNew(env(), fRTPSocket,  
  75.                            fRTPPayloadFormat,  
  76.                            fRTPTimestampFrequency);  
  77.       } else if (strcmp(fCodecName, "VORBIS") == 0) { // Vorbis audio   
  78.     fReadSource = fRTPSource  
  79.       = VorbisAudioRTPSource::createNew(env(), fRTPSocket,  
  80.                         fRTPPayloadFormat,  
  81.                         fRTPTimestampFrequency);  
  82.       } else if (strcmp(fCodecName, "VP8") == 0) { // VP8 video   
  83.     fReadSource = fRTPSource  
  84.       = VP8VideoRTPSource::createNew(env(), fRTPSocket,  
  85.                      fRTPPayloadFormat,  
  86.                      fRTPTimestampFrequency);  
  87.       } else if (strcmp(fCodecName, "AC3") == 0 || strcmp(fCodecName, "EAC3") == 0) { // AC3 audio   
  88.     fReadSource = fRTPSource  
  89.       = AC3AudioRTPSource::createNew(env(), fRTPSocket,  
  90.                      fRTPPayloadFormat,  
  91.                      fRTPTimestampFrequency);  
  92.       } else if (strcmp(fCodecName, "MP4V-ES") == 0) { // MPEG-4 Elementary Stream video   
  93.     fReadSource = fRTPSource  
  94.       = MPEG4ESVideoRTPSource::createNew(env(), fRTPSocket,  
  95.                          fRTPPayloadFormat,  
  96.                          fRTPTimestampFrequency);  
  97.       } else if (strcmp(fCodecName, "MPEG4-GENERIC") == 0) {  
  98.     fReadSource = fRTPSource  
  99.       = MPEG4GenericRTPSource::createNew(env(), fRTPSocket,  
  100.                          fRTPPayloadFormat,  
  101.                          fRTPTimestampFrequency,  
  102.                          fMediumName, fMode,  
  103.                          fSizelength, fIndexlength,  
  104.                          fIndexdeltalength);  
  105.       } else if (strcmp(fCodecName, "MPV") == 0) { // MPEG-1 or 2 video   
  106.     fReadSource = fRTPSource  
  107.       = MPEG1or2VideoRTPSource::createNew(env(), fRTPSocket,  
  108.                           fRTPPayloadFormat,  
  109.                           fRTPTimestampFrequency);  
  110.       } else if (strcmp(fCodecName, "MP2T") == 0) { // MPEG-2 Transport Stream   
  111.     fRTPSource = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,  
  112.                         fRTPTimestampFrequency, "video/MP2T",  
  113.                         0, False);  
  114.     fReadSource = MPEG2TransportStreamFramer::createNew(env(), fRTPSource);  
  115.     // this sets "durationInMicroseconds" correctly, based on the PCR values   
  116.       } else if (strcmp(fCodecName, "H261") == 0) { // H.261   
  117.     fReadSource = fRTPSource  
  118.       = H261VideoRTPSource::createNew(env(), fRTPSocket,  
  119.                       fRTPPayloadFormat,  
  120.                       fRTPTimestampFrequency);  
  121.       } else if (strcmp(fCodecName, "H263-1998") == 0 ||  
  122.          strcmp(fCodecName, "H263-2000") == 0) { // H.263+   
  123.     fReadSource = fRTPSource  
  124.       = H263plusVideoRTPSource::createNew(env(), fRTPSocket,  
  125.                           fRTPPayloadFormat,  
  126.                           fRTPTimestampFrequency);  
  127.       } else if (strcmp(fCodecName, "H264") == 0) {  
  128.     fReadSource = fRTPSource  
  129.       = H264VideoRTPSource::createNew(env(), fRTPSocket,  
  130.                       fRTPPayloadFormat,  
  131.                       fRTPTimestampFrequency);  
  132.       } else if (strcmp(fCodecName, "DV") == 0) {  
  133.     fReadSource = fRTPSource  
  134.       = DVVideoRTPSource::createNew(env(), fRTPSocket,  
  135.                     fRTPPayloadFormat,  
  136.                     fRTPTimestampFrequency);  
  137.       } else if (strcmp(fCodecName, "JPEG") == 0) { // motion JPEG   
  138.     fReadSource = fRTPSource  
  139.       = JPEGVideoRTPSource::createNew(env(), fRTPSocket,  
  140.                       fRTPPayloadFormat,  
  141.                       fRTPTimestampFrequency,  
  142.                       videoWidth(),  
  143.                       videoHeight());  
  144.       } else if (strcmp(fCodecName, "X-QT") == 0  
  145.          || strcmp(fCodecName, "X-QUICKTIME") == 0) {  
  146.     // Generic QuickTime streams, as defined in   
  147.     // <http://developer.apple.com/quicktime/icefloe/dispatch026.html>   
  148.     char* mimeType  
  149.       = new char[strlen(mediumName()) + strlen(codecName()) + 2] ;  
  150.     sprintf(mimeType, "%s/%s", mediumName(), codecName());  
  151.     fReadSource = fRTPSource  
  152.       = QuickTimeGenericRTPSource::createNew(env(), fRTPSocket,  
  153.                          fRTPPayloadFormat,  
  154.                          fRTPTimestampFrequency,  
  155.                          mimeType);  
  156.     delete[] mimeType;  
  157.       } else if (  strcmp(fCodecName, "PCMU") == 0 // PCM u-law audio   
  158.            || strcmp(fCodecName, "GSM") == 0 // GSM audio   
  159.            || strcmp(fCodecName, "DVI4") == 0 // DVI4 (IMA ADPCM) audio   
  160.            || strcmp(fCodecName, "PCMA") == 0 // PCM a-law audio   
  161.            || strcmp(fCodecName, "MP1S") == 0 // MPEG-1 System Stream   
  162.            || strcmp(fCodecName, "MP2P") == 0 // MPEG-2 Program Stream   
  163.            || strcmp(fCodecName, "L8") == 0 // 8-bit linear audio   
  164.            || strcmp(fCodecName, "L16") == 0 // 16-bit linear audio   
  165.            || strcmp(fCodecName, "L20") == 0 // 20-bit linear audio (RFC 3190)   
  166.            || strcmp(fCodecName, "L24") == 0 // 24-bit linear audio (RFC 3190)   
  167.            || strcmp(fCodecName, "G726-16") == 0 // G.726, 16 kbps   
  168.            || strcmp(fCodecName, "G726-24") == 0 // G.726, 24 kbps   
  169.            || strcmp(fCodecName, "G726-32") == 0 // G.726, 32 kbps   
  170.            || strcmp(fCodecName, "G726-40") == 0 // G.726, 40 kbps   
  171.            || strcmp(fCodecName, "SPEEX") == 0 // SPEEX audio   
  172.            || strcmp(fCodecName, "T140") == 0 // T.140 text (RFC 4103)   
  173.            || strcmp(fCodecName, "DAT12") == 0 // 12-bit nonlinear audio (RFC 3190)   
  174.            ) {  
  175.     createSimpleRTPSource = True;  
  176.     useSpecialRTPoffset = 0;  
  177.       } else if (useSpecialRTPoffset >= 0) {  
  178.     // We don't know this RTP payload format, but try to receive   
  179.     // it using a 'SimpleRTPSource' with the specified header offset:   
  180.     createSimpleRTPSource = True;  
  181.       } else {  
  182.     env().setResultMsg("RTP payload format unknown or not supported");  
  183.     break;  
  184.       }  
  185.         
  186.       if (createSimpleRTPSource) {  
  187.     char* mimeType  
  188.       = new char[strlen(mediumName()) + strlen(codecName()) + 2] ;  
  189.     sprintf(mimeType, "%s/%s", mediumName(), codecName());  
  190.     fReadSource = fRTPSource  
  191.       = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,  
  192.                        fRTPTimestampFrequency, mimeType,  
  193.                        (unsigned)useSpecialRTPoffset,  
  194.                        doNormalMBitRule);  
  195.     delete[] mimeType;  
  196.       }  
  197.     }  
  198.   
  199.     return True;  
  200.   } while (0);  
  201.   
  202.   return False; // an error occurred   
  203. }  
Boolean MediaSubsession::createSourceObjects(int useSpecialRTPoffset) {
  do {
    // First, check "fProtocolName"
    if (strcmp(fProtocolName, "UDP") == 0) {
      // A UDP-packetized stream (*not* a RTP stream)
      fReadSource = BasicUDPSource::createNew(env(), fRTPSocket);
      fRTPSource = NULL; // Note!
      
      if (strcmp(fCodecName, "MP2T") == 0) { // MPEG-2 Transport Stream
	fReadSource = MPEG2TransportStreamFramer::createNew(env(), fReadSource);
	// this sets "durationInMicroseconds" correctly, based on the PCR values
      }
    } else {
      // Check "fCodecName" against the set of codecs that we support,
      // and create our RTP source accordingly
      // (Later make this code more efficient, as this set grows #####)
      // (Also, add more fmts that can be implemented by SimpleRTPSource#####)
      Boolean createSimpleRTPSource = False; // by default; can be changed below
      Boolean doNormalMBitRule = False; // default behavior if "createSimpleRTPSource" is True
      if (strcmp(fCodecName, "QCELP") == 0) { // QCELP audio
	fReadSource =
	  QCELPAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource,
					 fRTPPayloadFormat,
					 fRTPTimestampFrequency);
	// Note that fReadSource will differ from fRTPSource in this case
      } else if (strcmp(fCodecName, "AMR") == 0) { // AMR audio (narrowband)
	fReadSource =
	  AMRAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource,
				       fRTPPayloadFormat, 0 /*isWideband*/,
				       fNumChannels, fOctetalign, fInterleaving,
				       fRobustsorting, fCRC);
	// Note that fReadSource will differ from fRTPSource in this case
      } else if (strcmp(fCodecName, "AMR-WB") == 0) { // AMR audio (wideband)
	fReadSource =
	  AMRAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource,
				       fRTPPayloadFormat, 1 /*isWideband*/,
				       fNumChannels, fOctetalign, fInterleaving,
				       fRobustsorting, fCRC);
	// Note that fReadSource will differ from fRTPSource in this case
      } else if (strcmp(fCodecName, "MPA") == 0) { // MPEG-1 or 2 audio
	fReadSource = fRTPSource
	  = MPEG1or2AudioRTPSource::createNew(env(), fRTPSocket,
					      fRTPPayloadFormat,
					      fRTPTimestampFrequency);
      } else if (strcmp(fCodecName, "MPA-ROBUST") == 0) { // robust MP3 audio
	fReadSource = fRTPSource
	  = MP3ADURTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
				       fRTPTimestampFrequency);
	if (fRTPSource == NULL) break;
	
	if (!fReceiveRawMP3ADUs) {
	  // Add a filter that deinterleaves the ADUs after depacketizing them:
	  MP3ADUdeinterleaver* deinterleaver
	    = MP3ADUdeinterleaver::createNew(env(), fRTPSource);
	  if (deinterleaver == NULL) break;
	
	  // Add another filter that converts these ADUs to MP3 frames:
	  fReadSource = MP3FromADUSource::createNew(env(), deinterleaver);
	}
      } else if (strcmp(fCodecName, "X-MP3-DRAFT-00") == 0) {
	// a non-standard variant of "MPA-ROBUST" used by RealNetworks
	// (one 'ADU'ized MP3 frame per packet; no headers)
	fRTPSource
	  = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
				       fRTPTimestampFrequency,
				       "audio/MPA-ROBUST" /*hack*/);
	if (fRTPSource == NULL) break;
	
	// Add a filter that converts these ADUs to MP3 frames:
	fReadSource = MP3FromADUSource::createNew(env(), fRTPSource,
						  False /*no ADU header*/);
      } else if (strcmp(fCodecName, "MP4A-LATM") == 0) { // MPEG-4 LATM audio
	fReadSource = fRTPSource
	  = MPEG4LATMAudioRTPSource::createNew(env(), fRTPSocket,
					       fRTPPayloadFormat,
					       fRTPTimestampFrequency);
      } else if (strcmp(fCodecName, "VORBIS") == 0) { // Vorbis audio
	fReadSource = fRTPSource
	  = VorbisAudioRTPSource::createNew(env(), fRTPSocket,
					    fRTPPayloadFormat,
					    fRTPTimestampFrequency);
      } else if (strcmp(fCodecName, "VP8") == 0) { // VP8 video
	fReadSource = fRTPSource
	  = VP8VideoRTPSource::createNew(env(), fRTPSocket,
					 fRTPPayloadFormat,
					 fRTPTimestampFrequency);
      } else if (strcmp(fCodecName, "AC3") == 0 || strcmp(fCodecName, "EAC3") == 0) { // AC3 audio
	fReadSource = fRTPSource
	  = AC3AudioRTPSource::createNew(env(), fRTPSocket,
					 fRTPPayloadFormat,
					 fRTPTimestampFrequency);
      } else if (strcmp(fCodecName, "MP4V-ES") == 0) { // MPEG-4 Elementary Stream video
	fReadSource = fRTPSource
	  = MPEG4ESVideoRTPSource::createNew(env(), fRTPSocket,
					     fRTPPayloadFormat,
					     fRTPTimestampFrequency);
      } else if (strcmp(fCodecName, "MPEG4-GENERIC") == 0) {
	fReadSource = fRTPSource
	  = MPEG4GenericRTPSource::createNew(env(), fRTPSocket,
					     fRTPPayloadFormat,
					     fRTPTimestampFrequency,
					     fMediumName, fMode,
					     fSizelength, fIndexlength,
					     fIndexdeltalength);
      } else if (strcmp(fCodecName, "MPV") == 0) { // MPEG-1 or 2 video
	fReadSource = fRTPSource
	  = MPEG1or2VideoRTPSource::createNew(env(), fRTPSocket,
					      fRTPPayloadFormat,
					      fRTPTimestampFrequency);
      } else if (strcmp(fCodecName, "MP2T") == 0) { // MPEG-2 Transport Stream
	fRTPSource = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
						fRTPTimestampFrequency, "video/MP2T",
						0, False);
	fReadSource = MPEG2TransportStreamFramer::createNew(env(), fRTPSource);
	// this sets "durationInMicroseconds" correctly, based on the PCR values
      } else if (strcmp(fCodecName, "H261") == 0) { // H.261
	fReadSource = fRTPSource
	  = H261VideoRTPSource::createNew(env(), fRTPSocket,
					  fRTPPayloadFormat,
					  fRTPTimestampFrequency);
      } else if (strcmp(fCodecName, "H263-1998") == 0 ||
		 strcmp(fCodecName, "H263-2000") == 0) { // H.263+
	fReadSource = fRTPSource
	  = H263plusVideoRTPSource::createNew(env(), fRTPSocket,
					      fRTPPayloadFormat,
					      fRTPTimestampFrequency);
      } else if (strcmp(fCodecName, "H264") == 0) {
	fReadSource = fRTPSource
	  = H264VideoRTPSource::createNew(env(), fRTPSocket,
					  fRTPPayloadFormat,
					  fRTPTimestampFrequency);
      } else if (strcmp(fCodecName, "DV") == 0) {
	fReadSource = fRTPSource
	  = DVVideoRTPSource::createNew(env(), fRTPSocket,
					fRTPPayloadFormat,
					fRTPTimestampFrequency);
      } else if (strcmp(fCodecName, "JPEG") == 0) { // motion JPEG
	fReadSource = fRTPSource
	  = JPEGVideoRTPSource::createNew(env(), fRTPSocket,
					  fRTPPayloadFormat,
					  fRTPTimestampFrequency,
					  videoWidth(),
					  videoHeight());
      } else if (strcmp(fCodecName, "X-QT") == 0
		 || strcmp(fCodecName, "X-QUICKTIME") == 0) {
	// Generic QuickTime streams, as defined in
	// <http://developer.apple.com/quicktime/icefloe/dispatch026.html>
	char* mimeType
	  = new char[strlen(mediumName()) + strlen(codecName()) + 2] ;
	sprintf(mimeType, "%s/%s", mediumName(), codecName());
	fReadSource = fRTPSource
	  = QuickTimeGenericRTPSource::createNew(env(), fRTPSocket,
						 fRTPPayloadFormat,
						 fRTPTimestampFrequency,
						 mimeType);
	delete[] mimeType;
      } else if (  strcmp(fCodecName, "PCMU") == 0 // PCM u-law audio
		   || strcmp(fCodecName, "GSM") == 0 // GSM audio
		   || strcmp(fCodecName, "DVI4") == 0 // DVI4 (IMA ADPCM) audio
		   || strcmp(fCodecName, "PCMA") == 0 // PCM a-law audio
		   || strcmp(fCodecName, "MP1S") == 0 // MPEG-1 System Stream
		   || strcmp(fCodecName, "MP2P") == 0 // MPEG-2 Program Stream
		   || strcmp(fCodecName, "L8") == 0 // 8-bit linear audio
		   || strcmp(fCodecName, "L16") == 0 // 16-bit linear audio
		   || strcmp(fCodecName, "L20") == 0 // 20-bit linear audio (RFC 3190)
		   || strcmp(fCodecName, "L24") == 0 // 24-bit linear audio (RFC 3190)
		   || strcmp(fCodecName, "G726-16") == 0 // G.726, 16 kbps
		   || strcmp(fCodecName, "G726-24") == 0 // G.726, 24 kbps
		   || strcmp(fCodecName, "G726-32") == 0 // G.726, 32 kbps
		   || strcmp(fCodecName, "G726-40") == 0 // G.726, 40 kbps
		   || strcmp(fCodecName, "SPEEX") == 0 // SPEEX audio
		   || strcmp(fCodecName, "T140") == 0 // T.140 text (RFC 4103)
		   || strcmp(fCodecName, "DAT12") == 0 // 12-bit nonlinear audio (RFC 3190)
		   ) {
	createSimpleRTPSource = True;
	useSpecialRTPoffset = 0;
      } else if (useSpecialRTPoffset >= 0) {
	// We don't know this RTP payload format, but try to receive
	// it using a 'SimpleRTPSource' with the specified header offset:
	createSimpleRTPSource = True;
      } else {
	env().setResultMsg("RTP payload format unknown or not supported");
	break;
      }
      
      if (createSimpleRTPSource) {
	char* mimeType
	  = new char[strlen(mediumName()) + strlen(codecName()) + 2] ;
	sprintf(mimeType, "%s/%s", mediumName(), codecName());
	fReadSource = fRTPSource
	  = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
				       fRTPTimestampFrequency, mimeType,
				       (unsigned)useSpecialRTPoffset,
				       doNormalMBitRule);
	delete[] mimeType;
      }
    }

    return True;
  } while (0);

  return False; // an error occurred
}
可以看到这里对于h264是

  1. fReadSource = fRTPSource  
  2.       = H264VideoRTPSource::createNew(env(), fRTPSocket,  
  3.                       fRTPPayloadFormat,  
  4.                       fRTPTimestampFrequency);  
fReadSource = fRTPSource
	  = H264VideoRTPSource::createNew(env(), fRTPSocket,
					  fRTPPayloadFormat,
					  fRTPTimestampFrequency);

在MediaSubsession中把fReadSource和fRTPSource初始化了。

socket建立了,Source也创建了,下一步应该是连接Sink,形成一个流。到此为止还未看到Sink的影子,应该是在下一步SETUP中建立,我们看到在continueAfterDESCRIBE()的最后调用了setupStreams(),那么就来探索一下setupStreams():

  1. void setupStreams() {  
  2.   static MediaSubsessionIterator* setupIter = NULL;  
  3.   if (setupIter == NULL) setupIter = new MediaSubsessionIterator(*session);  
  4.   while ((subsession = setupIter->next()) != NULL) {  
  5.     // We have another subsession left to set up:   
  6.     if (subsession->clientPortNum() == 0) continue// port # was not set   
  7.   
  8.     setupSubsession(subsession, streamUsingTCP, continueAfterSETUP);  
  9.     return;  
  10.   }  
  11.   
  12.   // We're done setting up subsessions.   
  13.   delete setupIter;  
  14.   if (!madeProgress) shutdown();  
  15.   
  16.   // Create output files:   
  17.   if (createReceivers) {  
  18.     if (outputQuickTimeFile) {  
  19.       // Create a "QuickTimeFileSink", to write to 'stdout':   
  20.       qtOut = QuickTimeFileSink::createNew(*env, *session, "stdout",  
  21.                        fileSinkBufferSize,  
  22.                        movieWidth, movieHeight,  
  23.                        movieFPS,  
  24.                        packetLossCompensate,  
  25.                        syncStreams,  
  26.                        generateHintTracks,  
  27.                        generateMP4Format);  
  28.       if (qtOut == NULL) {  
  29.     *env << "Failed to create QuickTime file sink for stdout: " << env->getResultMsg();  
  30.     shutdown();  
  31.       }  
  32.   
  33.       qtOut->startPlaying(sessionAfterPlaying, NULL);  
  34.     } else if (outputAVIFile) {  
  35.       // Create an "AVIFileSink", to write to 'stdout':   
  36.       aviOut = AVIFileSink::createNew(*env, *session, "stdout",  
  37.                       fileSinkBufferSize,  
  38.                       movieWidth, movieHeight,  
  39.                       movieFPS,  
  40.                       packetLossCompensate);  
  41.       if (aviOut == NULL) {  
  42.     *env << "Failed to create AVI file sink for stdout: " << env->getResultMsg();  
  43.     shutdown();  
  44.       }  
  45.   
  46.       aviOut->startPlaying(sessionAfterPlaying, NULL);  
  47.     } else {  
  48.       // Create and start "FileSink"s for each subsession:   
  49.       madeProgress = False;  
  50.       MediaSubsessionIterator iter(*session);  
  51.       while ((subsession = iter.next()) != NULL) {  
  52.     if (subsession->readSource() == NULL) continue// was not initiated   
  53.   
  54.     // Create an output file for each desired stream:   
  55.     char outFileName[1000];  
  56.     if (singleMedium == NULL) {  
  57.       // Output file name is   
  58.       //     "<filename-prefix><medium_name>-<codec_name>-<counter>"   
  59.       static unsigned streamCounter = 0;  
  60.       snprintf(outFileName, sizeof outFileName, "%s%s-%s-%d",  
  61.            fileNamePrefix, subsession->mediumName(),  
  62.            subsession->codecName(), ++streamCounter);  
  63.     } else {  
  64.       sprintf(outFileName, "stdout");  
  65.     }  
  66.     FileSink* fileSink;  
  67.     if (strcmp(subsession->mediumName(), "audio") == 0 &&  
  68.         (strcmp(subsession->codecName(), "AMR") == 0 ||  
  69.          strcmp(subsession->codecName(), "AMR-WB") == 0)) {  
  70.       // For AMR audio streams, we use a special sink that inserts AMR frame hdrs:   
  71.       fileSink = AMRAudioFileSink::createNew(*env, outFileName,  
  72.                          fileSinkBufferSize, oneFilePerFrame);  
  73.     } else if (strcmp(subsession->mediumName(), "video") == 0 &&  
  74.         (strcmp(subsession->codecName(), "H264") == 0)) {  
  75.       // For H.264 video stream, we use a special sink that insert start_codes:   
  76.       fileSink = H264VideoFileSink::createNew(*env, outFileName,  
  77.                           subsession->fmtp_spropparametersets(),  
  78.                           fileSinkBufferSize, oneFilePerFrame);  
  79.     } else {  
  80.       // Normal case:   
  81.       fileSink = FileSink::createNew(*env, outFileName,  
  82.                      fileSinkBufferSize, oneFilePerFrame);  
  83.     }  
  84.     subsession->sink = fileSink;  
  85.     if (subsession->sink == NULL) {  
  86.       *env << "Failed to create FileSink for \"" << outFileName  
  87.           << "\": " << env->getResultMsg() << "\n";  
  88.     } else {  
  89.       if (singleMedium == NULL) {  
  90.         *env << "Created output file: \"" << outFileName << "\"\n";  
  91.       } else {  
  92.         *env << "Outputting data from the \"" << subsession->mediumName()  
  93.             << "/" << subsession->codecName()  
  94.             << "\" subsession to 'stdout'\n";  
  95.       }  
  96.   
  97.       if (strcmp(subsession->mediumName(), "video") == 0 &&  
  98.           strcmp(subsession->codecName(), "MP4V-ES") == 0 &&  
  99.           subsession->fmtp_config() != NULL) {  
  100.         // For MPEG-4 video RTP streams, the 'config' information   
  101.         // from the SDP description contains useful VOL etc. headers.   
  102.         // Insert this data at the front of the output file:   
  103.         unsigned configLen;  
  104.         unsigned char* configData  
  105.           = parseGeneralConfigStr(subsession->fmtp_config(), configLen);  
  106.         struct timeval timeNow;  
  107.         gettimeofday(&timeNow, NULL);  
  108.         fileSink->addData(configData, configLen, timeNow);  
  109.         delete[] configData;  
  110.       }  
  111.   
  112.       subsession->sink->startPlaying(*(subsession->readSource()),  
  113.                      subsessionAfterPlaying,  
  114.                      subsession);  
  115.   
  116.       // Also set a handler to be called if a RTCP "BYE" arrives   
  117.       // for this subsession:   
  118.       if (subsession->rtcpInstance() != NULL) {  
  119.         subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, subsession);  
  120.       }  
  121.   
  122.       madeProgress = True;  
  123.     }  
  124.       }  
  125.       if (!madeProgress) shutdown();  
  126.     }  
  127.   }  
  128.   
  129.   // Finally, start playing each subsession, to start the data flow:   
  130.   if (duration == 0) {  
  131.     if (scale > 0) duration = session->playEndTime() - initialSeekTime; // use SDP end time   
  132.     else if (scale < 0) duration = initialSeekTime;  
  133.   }  
  134.   if (duration < 0) duration = 0.0;  
  135.   
  136.   endTime = initialSeekTime;  
  137.   if (scale > 0) {  
  138.     if (duration <= 0) endTime = -1.0f;  
  139.     else endTime = initialSeekTime + duration;  
  140.   } else {  
  141.     endTime = initialSeekTime - duration;  
  142.     if (endTime < 0) endTime = 0.0f;  
  143.   }  
  144.   
  145.   startPlayingSession(session, initialSeekTime, endTime, scale, continueAfterPLAY);  
  146. }  
void setupStreams() {
  static MediaSubsessionIterator* setupIter = NULL;
  if (setupIter == NULL) setupIter = new MediaSubsessionIterator(*session);
  while ((subsession = setupIter->next()) != NULL) {
    // We have another subsession left to set up:
    if (subsession->clientPortNum() == 0) continue; // port # was not set

    setupSubsession(subsession, streamUsingTCP, continueAfterSETUP);
    return;
  }

  // We're done setting up subsessions.
  delete setupIter;
  if (!madeProgress) shutdown();

  // Create output files:
  if (createReceivers) {
    if (outputQuickTimeFile) {
      // Create a "QuickTimeFileSink", to write to 'stdout':
      qtOut = QuickTimeFileSink::createNew(*env, *session, "stdout",
					   fileSinkBufferSize,
					   movieWidth, movieHeight,
					   movieFPS,
					   packetLossCompensate,
					   syncStreams,
					   generateHintTracks,
					   generateMP4Format);
      if (qtOut == NULL) {
	*env << "Failed to create QuickTime file sink for stdout: " << env->getResultMsg();
	shutdown();
      }

      qtOut->startPlaying(sessionAfterPlaying, NULL);
    } else if (outputAVIFile) {
      // Create an "AVIFileSink", to write to 'stdout':
      aviOut = AVIFileSink::createNew(*env, *session, "stdout",
				      fileSinkBufferSize,
				      movieWidth, movieHeight,
				      movieFPS,
				      packetLossCompensate);
      if (aviOut == NULL) {
	*env << "Failed to create AVI file sink for stdout: " << env->getResultMsg();
	shutdown();
      }

      aviOut->startPlaying(sessionAfterPlaying, NULL);
    } else {
      // Create and start "FileSink"s for each subsession:
      madeProgress = False;
      MediaSubsessionIterator iter(*session);
      while ((subsession = iter.next()) != NULL) {
	if (subsession->readSource() == NULL) continue; // was not initiated

	// Create an output file for each desired stream:
	char outFileName[1000];
	if (singleMedium == NULL) {
	  // Output file name is
	  //     "<filename-prefix><medium_name>-<codec_name>-<counter>"
	  static unsigned streamCounter = 0;
	  snprintf(outFileName, sizeof outFileName, "%s%s-%s-%d",
		   fileNamePrefix, subsession->mediumName(),
		   subsession->codecName(), ++streamCounter);
	} else {
	  sprintf(outFileName, "stdout");
	}
	FileSink* fileSink;
	if (strcmp(subsession->mediumName(), "audio") == 0 &&
	    (strcmp(subsession->codecName(), "AMR") == 0 ||
	     strcmp(subsession->codecName(), "AMR-WB") == 0)) {
	  // For AMR audio streams, we use a special sink that inserts AMR frame hdrs:
	  fileSink = AMRAudioFileSink::createNew(*env, outFileName,
						 fileSinkBufferSize, oneFilePerFrame);
	} else if (strcmp(subsession->mediumName(), "video") == 0 &&
	    (strcmp(subsession->codecName(), "H264") == 0)) {
	  // For H.264 video stream, we use a special sink that insert start_codes:
	  fileSink = H264VideoFileSink::createNew(*env, outFileName,
						  subsession->fmtp_spropparametersets(),
						  fileSinkBufferSize, oneFilePerFrame);
	} else {
	  // Normal case:
	  fileSink = FileSink::createNew(*env, outFileName,
					 fileSinkBufferSize, oneFilePerFrame);
	}
	subsession->sink = fileSink;
	if (subsession->sink == NULL) {
	  *env << "Failed to create FileSink for \"" << outFileName
		  << "\": " << env->getResultMsg() << "\n";
	} else {
	  if (singleMedium == NULL) {
	    *env << "Created output file: \"" << outFileName << "\"\n";
	  } else {
	    *env << "Outputting data from the \"" << subsession->mediumName()
			<< "/" << subsession->codecName()
			<< "\" subsession to 'stdout'\n";
	  }

	  if (strcmp(subsession->mediumName(), "video") == 0 &&
	      strcmp(subsession->codecName(), "MP4V-ES") == 0 &&
	      subsession->fmtp_config() != NULL) {
	    // For MPEG-4 video RTP streams, the 'config' information
	    // from the SDP description contains useful VOL etc. headers.
	    // Insert this data at the front of the output file:
	    unsigned configLen;
	    unsigned char* configData
	      = parseGeneralConfigStr(subsession->fmtp_config(), configLen);
	    struct timeval timeNow;
	    gettimeofday(&timeNow, NULL);
	    fileSink->addData(configData, configLen, timeNow);
	    delete[] configData;
	  }

	  subsession->sink->startPlaying(*(subsession->readSource()),
					 subsessionAfterPlaying,
					 subsession);

	  // Also set a handler to be called if a RTCP "BYE" arrives
	  // for this subsession:
	  if (subsession->rtcpInstance() != NULL) {
	    subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, subsession);
	  }

	  madeProgress = True;
	}
      }
      if (!madeProgress) shutdown();
    }
  }

  // Finally, start playing each subsession, to start the data flow:
  if (duration == 0) {
    if (scale > 0) duration = session->playEndTime() - initialSeekTime; // use SDP end time
    else if (scale < 0) duration = initialSeekTime;
  }
  if (duration < 0) duration = 0.0;

  endTime = initialSeekTime;
  if (scale > 0) {
    if (duration <= 0) endTime = -1.0f;
    else endTime = initialSeekTime + duration;
  } else {
    endTime = initialSeekTime - duration;
    if (endTime < 0) endTime = 0.0f;
  }

  startPlayingSession(session, initialSeekTime, endTime, scale, continueAfterPLAY);
}

  1. fileSink = H264VideoFileSink::createNew(*env, outFileName,  
  2.                           subsession->fmtp_spropparametersets(),  
  3.                           fileSinkBufferSize, oneFilePerFrame);  
fileSink = H264VideoFileSink::createNew(*env, outFileName,
						  subsession->fmtp_spropparametersets(),
						  fileSinkBufferSize, oneFilePerFrame);

然后是subsession->sink = fileSink;

然后比较关键的是就是

  1. subsession->sink->startPlaying(*(subsession->readSource()),  
  2.                      subsessionAfterPlaying,  
  3.                      subsession);  
subsession->sink->startPlaying(*(subsession->readSource()),
					 subsessionAfterPlaying,
					 subsession);

我们来看看这个startPlaying

  1. Boolean MediaSink::startPlaying(MediaSource& source,  
  2.                 afterPlayingFunc* afterFunc,  
  3.                 void* afterClientData) {  
  4.   // Make sure we're not already being played:   
  5.   if (fSource != NULL) {  
  6.     envir().setResultMsg("This sink is already being played");  
  7.     return False;  
  8.   }  
  9.   
  10.   // Make sure our source is compatible:   
  11.   if (!sourceIsCompatibleWithUs(source)) {  
  12.     envir().setResultMsg("MediaSink::startPlaying(): source is not compatible!");  
  13.     return False;  
  14.   }  
  15.   fSource = (FramedSource*)&source;  
  16.   
  17.   fAfterFunc = afterFunc;  
  18.   fAfterClientData = afterClientData;  
  19.   return continuePlaying();  
  20. }  
Boolean MediaSink::startPlaying(MediaSource& source,
				afterPlayingFunc* afterFunc,
				void* afterClientData) {
  // Make sure we're not already being played:
  if (fSource != NULL) {
    envir().setResultMsg("This sink is already being played");
    return False;
  }

  // Make sure our source is compatible:
  if (!sourceIsCompatibleWithUs(source)) {
    envir().setResultMsg("MediaSink::startPlaying(): source is not compatible!");
    return False;
  }
  fSource = (FramedSource*)&source;

  fAfterFunc = afterFunc;
  fAfterClientData = afterClientData;
  return continuePlaying();
}

上面中subsession->readSource()返回的是fReadSource就是在 createSourceObjects()中建立的那个source。我们看到这里赋值给了fSource。

continuePlaying()在MediaSink中为纯虚函数,在FileSink中有定义。

  1. Boolean FileSink::continuePlaying() {  
  2.   if (fSource == NULL) return False;  
  3.   
  4.   fSource->getNextFrame(fBuffer, fBufferSize,  
  5.             afterGettingFrame, this,  
  6.             onSourceClosure, this);  
  7.   
  8.   return True;  
  9. }  
Boolean FileSink::continuePlaying() {
  if (fSource == NULL) return False;

  fSource->getNextFrame(fBuffer, fBufferSize,
			afterGettingFrame, this,
			onSourceClosure, this);

  return True;
}

其实很简单就是fSource的getNextFrame。这里的fSource就是MediaSink中的fSource。就是H264VideoRTPSource。

所有的getNextFrame都一样就是FrameSource中的getNextFrame。把fBuffer给fTo,fBufferSize就是fMaxSize。

我们来看看这个fBuffer,

  1. fBuffer = new unsigned char[bufferSize];  
fBuffer = new unsigned char[bufferSize];
  1. fileSink = H264VideoFileSink::createNew(*env, outFileName,  
  2.                           subsession->fmtp_spropparametersets(),  
  3.                           fileSinkBufferSize, oneFilePerFrame);  
fileSink = H264VideoFileSink::createNew(*env, outFileName,
						  subsession->fmtp_spropparametersets(),
						  fileSinkBufferSize, oneFilePerFrame);
中fileSinkBufferSize是100000。

getNextFrame之后执行的是doGetNextFrame(),一般在子类里面实现。H264VideoRTPSource中没有实现,但在他的父类MultiFramedRTPSource里面有实现

  1. void MultiFramedRTPSource::doGetNextFrame() {  
  2.   if (!fAreDoingNetworkReads) {  
  3.     // Turn on background read handling of incoming packets:   
  4.     fAreDoingNetworkReads = True;  
  5.     TaskScheduler::BackgroundHandlerProc* handler  
  6.       = (TaskScheduler::BackgroundHandlerProc*)&networkReadHandler;  
  7.     fRTPInterface.startNetworkReading(handler);  
  8.   }  
  9.   
  10.   fSavedTo = fTo;  
  11.   fSavedMaxSize = fMaxSize;  
  12.   fFrameSize = 0; // for now   
  13.   fNeedDelivery = True;  
  14.   doGetNextFrame1();  
  15. }  
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值