live555

1、DynamicRTSPServer.cpp

ServerMediaSession* DynamicRTSPServer
::lookupServerMediaSession(char const* streamName, Boolean isFirstLookupInSession) {
  // First, check whether the specified "streamName" exists as a local file:

  // Next, check whether we already have a "ServerMediaSession" for this file:
  ServerMediaSession* sms = RTSPServer::lookupServerMediaSession(streamName);
  Boolean smsExists = sms != NULL; 

  FILE* fid = fopen(streamName, "rb");
  Boolean fileExists = fid != NULL;
  
  if(strcmp(streamName,"live") == 0)
  {
		if (smsExists) {
		  // "sms" was created for a file that no longer exists. Remove it:
		  removeServerMediaSession(sms);
		  sms = NULL;
		}

		if (sms == NULL) {
		  sms = createNewSMS(envir(), streamName, fid); 
		  addServerMediaSession(sms);
		}

		return sms;		   
  }
  
  // Handle the four possibilities for "fileExists" and "smsExists":
  if (!fileExists) {
    if (smsExists) {
      // "sms" was created for a file that no longer exists. Remove it:
      removeServerMediaSession(sms);
      sms = NULL;
    }

    return NULL;
  } else {
    if (smsExists && isFirstLookupInSession) { 
      // Remove the existing "ServerMediaSession" and create a new one, in case the underlying
      // file has changed in some way:
      removeServerMediaSession(sms); 
      sms = NULL;
    } 

    if (sms == NULL) {
      sms = createNewSMS(envir(), streamName, fid); 
      addServerMediaSession(sms);
    }

    fclose(fid);
    return sms;
  }
}
static ServerMediaSession* createNewSMS(UsageEnvironment& env,
					char const* fileName, FILE* /*fid*/) {

  ServerMediaSession* sms = NULL;
  Boolean const reuseSource = False;
  if (strcmp(fileName, "live") == 0) {
    NEW_SMS("live");
    //env << "live detected. addsubsession:\n";
    //sms->addSubsession(LiveADTSAudioServerMediaSubsession::createNew(env, fileName, reuseSource));
    OutPacketBuffer::maxSize = 300000; 
    sms->addSubsession(LiveVideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
    return sms;
  }   

  // Use the file name extension to determine the type of "ServerMediaSession":
  char const* extension = strrchr(fileName, '.');
  if (extension == NULL) return NULL;

  if (strcmp(extension, ".aac") == 0) {
    // Assumed to be an AAC Audio (ADTS format) file:
    NEW_SMS("AAC Audio");
    sms->addSubsession(ADTSAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".amr") == 0) {
    // Assumed to be an AMR Audio file:
    NEW_SMS("AMR Audio");
    sms->addSubsession(AMRAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".ac3") == 0) {
    // Assumed to be an AC-3 Audio file:
    NEW_SMS("AC-3 Audio");
    sms->addSubsession(AC3AudioFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".m4e") == 0) {
    // Assumed to be a MPEG-4 Video Elementary Stream file:
    NEW_SMS("MPEG-4 Video");
    sms->addSubsession(MPEG4VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".264") == 0) {
    // Assumed to be a H.264 Video Elementary Stream file:
    NEW_SMS("H.264 Video");
    OutPacketBuffer::maxSize = 300000; // allow for some possibly large H.264 frames
    sms->addSubsession(H264VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".265") == 0) {
    // Assumed to be a H.265 Video Elementary Stream file:
    NEW_SMS("H.265 Video");
    OutPacketBuffer::maxSize = 300000; // allow for some possibly large H.265 frames
    sms->addSubsession(H265VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".mp3") == 0) {
    // Assumed to be a MPEG-1 or 2 Audio file:
    NEW_SMS("MPEG-1 or 2 Audio");
    // To stream using 'ADUs' rather than raw MP3 frames, uncomment the following:
//#define STREAM_USING_ADUS 1
    // To also reorder ADUs before streaming, uncomment the following:
//#define INTERLEAVE_ADUS 1
    // (For more information about ADUs and interleaving,
    //  see <http://www.live555.com/rtp-mp3/>)
    Boolean useADUs = False;
    Interleaving* interleaving = NULL;
#ifdef STREAM_USING_ADUS
    useADUs = True;
#ifdef INTERLEAVE_ADUS
    unsigned char interleaveCycle[] = {0,2,1,3}; // or choose your own...
    unsigned const interleaveCycleSize
      = (sizeof interleaveCycle)/(sizeof (unsigned char));
    interleaving = new Interleaving(interleaveCycleSize, interleaveCycle);
#endif
#endif
    sms->addSubsession(MP3AudioFileServerMediaSubsession::createNew(env, fileName, reuseSource, useADUs, interleaving));
  } else if (strcmp(extension, ".mpg") == 0) {
    // Assumed to be a MPEG-1 or 2 Program Stream (audio+video) file:
    NEW_SMS("MPEG-1 or 2 Program Stream");
    MPEG1or2FileServerDemux* demux
      = MPEG1or2FileServerDemux::createNew(env, fileName, reuseSource);
    sms->addSubsession(demux->newVideoServerMediaSubsession());
    sms->addSubsession(demux->newAudioServerMediaSubsession());
  } else if (strcmp(extension, ".vob") == 0) {
    // Assumed to be a VOB (MPEG-2 Program Stream, with AC-3 audio) file:
    NEW_SMS("VOB (MPEG-2 video with AC-3 audio)");
    MPEG1or2FileServerDemux* demux
      = MPEG1or2FileServerDemux::createNew(env, fileName, reuseSource);
    sms->addSubsession(demux->newVideoServerMediaSubsession());
    sms->addSubsession(demux->newAC3AudioServerMediaSubsession());
  } else if (strcmp(extension, ".ts") == 0) {
    // Assumed to be a MPEG Transport Stream file:
    // Use an index file name that's the same as the TS file name, except with ".tsx":
    unsigned indexFileNameLen = strlen(fileName) + 2; // allow for trailing "x\0"
    char* indexFileName = new char[indexFileNameLen];
    sprintf(indexFileName, "%sx", fileName);
    NEW_SMS("MPEG Transport Stream");
    sms->addSubsession(MPEG2TransportFileServerMediaSubsession::createNew(env, fileName, indexFileName, reuseSource));
    delete[] indexFileName;
  } else if (strcmp(extension, ".wav") == 0) {
    // Assumed to be a WAV Audio file:
    NEW_SMS("WAV Audio Stream");
    // To convert 16-bit PCM data to 8-bit u-law, prior to streaming,
    // change the following to True:
    Boolean convertToULaw = False;
    sms->addSubsession(WAVAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource, convertToULaw));
  } else if (strcmp(extension, ".dv") == 0) {
    // Assumed to be a DV Video file
    // First, make sure that the RTPSinks' buffers will be large enough to handle the huge size of DV frames (as big as 288000).
    OutPacketBuffer::maxSize = 300000;

    NEW_SMS("DV Video");
    sms->addSubsession(DVVideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".mkv") == 0 || strcmp(extension, ".webm") == 0) {
    // Assumed to be a Matroska file (note that WebM ('.webm') files are also Matroska files)
    OutPacketBuffer::maxSize = 100000; // allow for some possibly large VP8 or VP9 frames
    NEW_SMS("Matroska video+audio+(optional)subtitles");

    // Create a Matroska file server demultiplexor for the specified file.
    // (We enter the event loop to wait for this to complete.)
    MatroskaDemuxCreationState creationState;
    creationState.watchVariable = 0;
    MatroskaFileServerDemux::createNew(env, fileName, onMatroskaDemuxCreation, &creationState);
    env.taskScheduler().doEventLoop(&creationState.watchVariable);

    ServerMediaSubsession* smss;
    while ((smss = creationState.demux->newServerMediaSubsession()) != NULL) {
      sms->addSubsession(smss);
    }
  } else if (strcmp(extension, ".ogg") == 0 || strcmp(extension, ".ogv") == 0 || strcmp(extension, ".opus") == 0) {
    // Assumed to be an Ogg file
    NEW_SMS("Ogg video and/or audio");

    // Create a Ogg file server demultiplexor for the specified file.
    // (We enter the event loop to wait for this to complete.)
    OggDemuxCreationState creationState;
    creationState.watchVariable = 0;
    OggFileServerDemux::createNew(env, fileName, onOggDemuxCreation, &creationState);
    env.taskScheduler().doEventLoop(&creationState.watchVariable);

    ServerMediaSubsession* smss;
    while ((smss = creationState.demux->newServerMediaSubsession()) != NULL) {
      sms->addSubsession(smss);
    }
  }

  return sms;
}

2、添加LiveADTSAudioServerMediaSubsession

#ifndef _H264_STREAM_FILE_SOURCE_HH
#define _H264_STREAM_FILE_SOURCE_HH

#ifndef _FRAMED_FILE_SOURCE_HH
#include "FramedFileSource.hh"
#endif

#include<pthread.h>
#include "semaphore.h"

#define H264_BUF_SIZE 150000
#define H264_BUF_COUNT 10

typedef void (*CB_FUN)(void);
extern void h264_buf_init();
extern void h264_buf_destroy();
extern Boolean h264_buf_full();
extern Boolean h264_buf_empty();
extern int h264_buf_put(unsigned char* buf,int len);
extern unsigned char* h264_buf_get(int* len);

extern sem_t h264_f;
extern sem_t h264_e;
extern int b264IsInit;
extern CB_FUN startfun;
extern CB_FUN endfun;

class LiveH264StreamSource: public FramedSource {
public:
  static LiveH264StreamSource* createNew(UsageEnvironment& env,
					 char const* fileName,
					 unsigned preferredFrameSize = 0,
					 unsigned playTimePerFrame = 0);
  // "preferredFrameSize" == 0 means 'no preference'
  // "playTimePerFrame" is in microseconds
/*
  static LiveH264StreamSource* createNew(UsageEnvironment& env,
					 unsigned preferredFrameSize = 0,
					 unsigned playTimePerFrame = 0);
      // an alternative version of "createNew()" that's used if you already have
      // an open file.
*/
  u_int64_t fileSize() const { return fFileSize; }
      // 0 means zero-length, unbounded, or unknown

  void seekToByteAbsolute(u_int64_t byteNumber, u_int64_t numBytesToStream = 0);
    // if "numBytesToStream" is >0, then we limit the stream to that number of bytes, before treating it as EOF
  void seekToByteRelative(int64_t offset, u_int64_t numBytesToStream = 0);
  void seekToEnd(); // to force EOF handling on the next read

protected:
  LiveH264StreamSource(UsageEnvironment& env,
		       unsigned preferredFrameSize,
		       unsigned playTimePerFrame);
	// called only by createNew()

  virtual ~LiveH264StreamSource();

  static void fileReadableHandler(LiveH264StreamSource* source, int mask);
  void doReadFromFile();

private:
  // redefined virtual functions:
  virtual void doGetNextFrame();
  virtual void doStopGettingFrames();

protected:
  u_int64_t fFileSize;

private:
  unsigned fPreferredFrameSize;
  unsigned fPlayTimePerFrame;
  Boolean fFidIsSeekable;
  unsigned fLastPlayTime;
  Boolean fHaveStartedReading;
  Boolean fLimitNumBytesToStream;
  u_int64_t fNumBytesToStream; // used iff "fLimitNumBytesToStream" is True
};

#endif



#include "LiveADTSAudioServerMediaSubsession.hh"
#include "LiveADTSAudioSource.hh"
#include "MPEG4GenericRTPSink.hh"

LiveADTSAudioServerMediaSubsession*
LiveADTSAudioServerMediaSubsession::createNew(UsageEnvironment& env,
					     char const* fileName,
					     Boolean reuseFirstSource) {
  return new LiveADTSAudioServerMediaSubsession(env, fileName, reuseFirstSource);
}

LiveADTSAudioServerMediaSubsession
::LiveADTSAudioServerMediaSubsession(UsageEnvironment& env,
				    char const* fileName, Boolean reuseFirstSource)
  : FileServerMediaSubsession(env, fileName, reuseFirstSource) {
}

LiveADTSAudioServerMediaSubsession
::~LiveADTSAudioServerMediaSubsession() {
}

FramedSource* LiveADTSAudioServerMediaSubsession
::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
  estBitrate = 96; // kbps, estimate

  return LiveADTSAudioSource::createNew(envir());
}

RTPSink* LiveADTSAudioServerMediaSubsession
::createNewRTPSink(Groupsock* rtpGroupsock,
		   unsigned char rtpPayloadTypeIfDynamic,
		   FramedSource* inputSource) {
  LiveADTSAudioSource* adtsSource = (LiveADTSAudioSource*)inputSource;
  return MPEG4GenericRTPSink::createNew(envir(), rtpGroupsock,
					rtpPayloadTypeIfDynamic,
					adtsSource->samplingFrequency(),
					"audio", "AAC-hbr", adtsSource->configStr(),
					adtsSource->numChannels());
}

3、添加LiveADTSAudioSource类

#ifndef _LiveADTSAudioSource_HH
#define _LiveADTSAudioSource_HH

#ifndef _FRAMED_FILE_SOURCE_HH
#include "FramedFileSource.hh"
#endif

#include<pthread.h>
#include "semaphore.h"

#define AAC_BUF_SIZE 10000
#define AAC_BUF_COUNT 20

extern void aac_buf_init();
extern Boolean aac_buf_full();
extern Boolean aac_buf_empty();
extern int aac_buf_put(unsigned char* buf,int len);
extern unsigned char* aac_buf_get();
extern void aac_buf_destroy();

extern sem_t aac_f;
extern sem_t aac_e;
extern int bIsInit;

class LiveADTSAudioSource: public FramedSource {
public:
  static LiveADTSAudioSource* createNew(UsageEnvironment& env);

  unsigned samplingFrequency() const { return fSamplingFrequency; }
  unsigned numChannels() const { return fNumChannels; }
  char const* configStr() const { return fConfigStr; }
      // returns the 'AudioSpecificConfig' for this stream (in ASCII form)

private:
  LiveADTSAudioSource(UsageEnvironment& env, u_int8_t profile,
		      u_int8_t samplingFrequencyIndex, u_int8_t channelConfiguration);
	// called only by createNew()

  virtual ~LiveADTSAudioSource();

private:
  // redefined virtual functions:
  virtual void doGetNextFrame();

private:
  unsigned fSamplingFrequency;
  unsigned fNumChannels;
  unsigned fuSecsPerFrame;
  char fConfigStr[5];
};

#endif

#include "LiveADTSAudioSource.hh"
#include "InputFile.hh"
#include <GroupsockHelper.hh>

// ADTSAudioFileSource //

static unsigned const samplingFrequencyTable[16] = {
  96000, 88200, 64000, 48000,
  44100, 32000, 24000, 22050,
  16000, 12000, 11025, 8000,
  7350, 0, 0, 0
};

unsigned char aac_framebuf[AAC_BUF_COUNT][AAC_BUF_SIZE];
int aac_frame_len[AAC_BUF_COUNT];
int aac_buf_head;
int aac_buf_tail;
int aac_buf_size;

void aac_buf_init();
void aac_buf_destroy();
Boolean aac_buf_full();
Boolean aac_buf_empty();
int aac_buf_put(unsigned char* buf,int len);
unsigned char* aac_buf_get();

sem_t aac_f;
sem_t aac_e;
sem_t aac_m;
int bIsInit = 0;
void aac_buf_init()
{
	if(bIsInit == 0)
	{    	
		sem_init(&aac_f,0,0);
    		sem_init(&aac_e,0,AAC_BUF_COUNT);
    		sem_init(&aac_m,0,1);
		aac_buf_head = 0;
		aac_buf_tail = 0;
		aac_buf_size = 0;
	}
}
Boolean aac_buf_full()
{
	if(aac_buf_size == AAC_BUF_COUNT)
		return True;
	return False;
}
Boolean aac_buf_empty()
{
	if(aac_buf_size == 0)
		return True;
	return False;
}
int aac_buf_put(unsigned char* buf,int len)
{
	sem_wait(&aac_e);
	sem_wait(&aac_m);
	bzero(aac_framebuf[aac_buf_tail],AAC_BUF_SIZE);
	memcpy(aac_framebuf[aac_buf_tail],buf,len);
	aac_frame_len[aac_buf_tail] = len;
	aac_buf_tail = (aac_buf_tail + 1)%AAC_BUF_COUNT;
	aac_buf_size++;
	sem_post(&aac_m);
	sem_post(&aac_f);
}
unsigned char* aac_buf_get()
{
	sem_wait(&aac_m);
	unsigned char* rt = aac_framebuf[aac_buf_head];
	aac_buf_head = (aac_buf_head+1)%AAC_BUF_COUNT;
	aac_buf_size--;	
	sem_post(&aac_m);

	return rt;
}

void aac_buf_destroy()
{
	sem_destroy(&aac_f);
	sem_destroy(&aac_e);
	sem_destroy(&aac_m);
}

LiveADTSAudioSource*
LiveADTSAudioSource::createNew(UsageEnvironment& env) {
    
    aac_buf_init();
    bIsInit = 1;

    return new LiveADTSAudioSource(env, 1, 4, 2);
}

LiveADTSAudioSource
::LiveADTSAudioSource(UsageEnvironment& env, u_int8_t profile,
		      u_int8_t samplingFrequencyIndex, u_int8_t channelConfiguration)
  : FramedSource(env) { 
  fSamplingFrequency = samplingFrequencyTable[samplingFrequencyIndex];
  fNumChannels = channelConfiguration == 0 ? 2 : channelConfiguration;
  fuSecsPerFrame
    = (1024/*samples-per-frame*/*1000000) / fSamplingFrequency/*samples-per-second*/;

  // Construct the 'AudioSpecificConfig', and from it, the corresponding ASCII string:
  unsigned char audioSpecificConfig[2];
  u_int8_t const audioObjectType = profile + 1;
  audioSpecificConfig[0] = (audioObjectType<<3) | (samplingFrequencyIndex>>1);
  audioSpecificConfig[1] = (samplingFrequencyIndex<<7) | (channelConfiguration<<3);
  sprintf(fConfigStr, "%02X%02x", audioSpecificConfig[0], audioSpecificConfig[1]);
  //env << "liveADTSAudioSource : construct\n";
}

LiveADTSAudioSource::~LiveADTSAudioSource() {

}

// Note: We should change the following to use asynchronous file reading, #####
// as we now do with ByteStreamFileSource. #####
void LiveADTSAudioSource::doGetNextFrame() {
  // Begin by reading the 7-byte fixed_variable headers:
  sem_wait(&aac_f);

  unsigned char* cur = aac_buf_get();
  int pos = 0;
  unsigned char* headers;
  headers = cur;

  // Extract important fields from the headers:
  Boolean protection_absent = headers[1]&0x01;

  u_int16_t frame_length
    = ((headers[3]&0x03)<<11) | (headers[4]<<3) | ((headers[5]&0xE0)>>5);
if(0)
{
  u_int16_t syncword = (headers[0]<<4) | (headers[1]>>4);
  fprintf(stderr, "Read frame: syncword 0x%x, protection_absent %d, frame_length %d\n", syncword, protection_absent, frame_length);
  if (syncword != 0xFFF) fprintf(stderr, "WARNING: Bad syncword!\n");
}
  unsigned numBytesToRead
    = frame_length > 7 ? frame_length - 7 : 0;

  pos = 7;
  // If there's a 'crc_check' field, skip it:
  if (!protection_absent) {
    pos += 2;
    numBytesToRead = numBytesToRead > 2 ? numBytesToRead - 2 : 0;
  }

  // Next, read the raw frame data into the buffer provided:
  if (numBytesToRead > fMaxSize) {
    fNumTruncatedBytes = numBytesToRead - fMaxSize;
    numBytesToRead = fMaxSize;
  }

  memcpy(fTo,cur + pos,numBytesToRead);
  sem_post(&aac_e);

  int numBytesRead = numBytesToRead;
  fFrameSize = numBytesRead;
  fNumTruncatedBytes += numBytesToRead - numBytesRead;

  // Set the 'presentation time':
  if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
    // This is the first frame, so use the current time:
    gettimeofday(&fPresentationTime, NULL);
  } else {
    // Increment by the play time of the previous frame:
    unsigned uSeconds = fPresentationTime.tv_usec + fuSecsPerFrame;
    fPresentationTime.tv_sec += uSeconds/1000000;
    fPresentationTime.tv_usec = uSeconds%1000000;
  }

  fDurationInMicroseconds = fuSecsPerFrame;

  // Switch to another task, and inform the reader that he has data:
  nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
				(TaskFunc*)FramedSource::afterGetting, this);
}


4、添加LiveVideoFileServerMediaSubsession类

#ifndef _LIVE_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
#define _LIVE_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH

#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
#include "FileServerMediaSubsession.hh"
#endif

class LiveVideoFileServerMediaSubsession: public FileServerMediaSubsession {
public:
  static LiveVideoFileServerMediaSubsession*
  createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource);

  // Used to implement "getAuxSDPLine()":
  void checkForAuxSDPLine1();
  void afterPlayingDummy1();

protected:
  LiveVideoFileServerMediaSubsession(UsageEnvironment& env,
				      char const* fileName, Boolean reuseFirstSource);
      // called only by createNew();
  virtual ~LiveVideoFileServerMediaSubsession();

  void setDoneFlag() { fDoneFlag = ~0; }

protected: // redefined virtual functions
  virtual char const* getAuxSDPLine(RTPSink* rtpSink,
				    FramedSource* inputSource);
  virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
					      unsigned& estBitrate);
  virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
                                    unsigned char rtpPayloadTypeIfDynamic,
				    FramedSource* inputSource);

private:
  char* fAuxSDPLine;
  char fDoneFlag; // used when setting up "fAuxSDPLine"
  RTPSink* fDummyRTPSink; // ditto
};

#endif


#include "LiveVideoFileServerMediaSubsession.hh"
#include "H264VideoRTPSink.hh"
#include "LiveH264StreamSource.hh"
#include "H264VideoStreamFramer.hh"

LiveVideoFileServerMediaSubsession*
LiveVideoFileServerMediaSubsession::createNew(UsageEnvironment& env,
					      char const* fileName,
					      Boolean reuseFirstSource) {
  return new LiveVideoFileServerMediaSubsession(env, fileName, reuseFirstSource);
}

LiveVideoFileServerMediaSubsession::LiveVideoFileServerMediaSubsession(UsageEnvironment& env,
								       char const* fileName, Boolean reuseFirstSource)
  : FileServerMediaSubsession(env, fileName, reuseFirstSource),
    fAuxSDPLine(NULL), fDoneFlag(0), fDummyRTPSink(NULL) {
}

LiveVideoFileServerMediaSubsession::~LiveVideoFileServerMediaSubsession() {
  delete[] fAuxSDPLine;
}

static void afterPlayingDummy(void* clientData) {
  LiveVideoFileServerMediaSubsession* subsess = (LiveVideoFileServerMediaSubsession*)clientData;
  subsess->afterPlayingDummy1();
}

void LiveVideoFileServerMediaSubsession::afterPlayingDummy1() {
  // Unschedule any pending 'checking' task:
  envir().taskScheduler().unscheduleDelayedTask(nextTask());
  // Signal the event loop that we're done:
  setDoneFlag();
}

static void checkForAuxSDPLine(void* clientData) {
  LiveVideoFileServerMediaSubsession* subsess = (LiveVideoFileServerMediaSubsession*)clientData;
  subsess->checkForAuxSDPLine1();
}

void LiveVideoFileServerMediaSubsession::checkForAuxSDPLine1() {
  char const* dasl;

  if (fAuxSDPLine != NULL) {
    // Signal the event loop that we're done:
    setDoneFlag();
  } else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL) {
    fAuxSDPLine = strDup(dasl);
    fDummyRTPSink = NULL;

    // Signal the event loop that we're done:
    setDoneFlag();
  } else if (!fDoneFlag) {
    // try again after a brief delay:
    int uSecsToDelay = 100000; // 100 ms
    nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay,
			      (TaskFunc*)checkForAuxSDPLine, this);
  }
}

char const* LiveVideoFileServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) {
  if (fAuxSDPLine != NULL) return fAuxSDPLine; // it's already been set up (for a previous client)

  if (fDummyRTPSink == NULL) { // we're not already setting it up for another, concurrent stream
    // Note: For H264 video files, the 'config' information ("profile-level-id" and "sprop-parameter-sets") isn't known
    // until we start reading the file.  This means that "rtpSink"s "auxSDPLine()" will be NULL initially,
    // and we need to start reading data from our file until this changes.
    fDummyRTPSink = rtpSink;

    // Start reading the file:
    fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this);

    // Check whether the sink's 'auxSDPLine()' is ready:
    checkForAuxSDPLine(this);
  }

  envir().taskScheduler().doEventLoop(&fDoneFlag);

  return fAuxSDPLine;
}

FramedSource* LiveVideoFileServerMediaSubsession::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
  estBitrate = 500; // kbps, estimate

  // Create the video source:
  LiveH264StreamSource* fileSource = LiveH264StreamSource::createNew(envir(), fFileName);
  if (fileSource == NULL) return NULL;
  fFileSize = fileSource->fileSize();

  // Create a framer for the Video Elementary Stream:
  return H264VideoStreamFramer::createNew(envir(), fileSource);
}

RTPSink* LiveVideoFileServerMediaSubsession
::createNewRTPSink(Groupsock* rtpGroupsock,
		   unsigned char rtpPayloadTypeIfDynamic,
		   FramedSource* /*inputSource*/) {
  return H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
}

5、添加LiveH264StreamSource类

#ifndef _H264_STREAM_FILE_SOURCE_HH
#define _H264_STREAM_FILE_SOURCE_HH

#ifndef _FRAMED_FILE_SOURCE_HH
#include "FramedFileSource.hh"
#endif

#include<pthread.h>
#include "semaphore.h"

#define H264_BUF_SIZE 150000
#define H264_BUF_COUNT 10

typedef void (*CB_FUN)(void);
extern void h264_buf_init();
extern void h264_buf_destroy();
extern Boolean h264_buf_full();
extern Boolean h264_buf_empty();
extern int h264_buf_put(unsigned char* buf,int len);
extern unsigned char* h264_buf_get(int* len);

extern sem_t h264_f;
extern sem_t h264_e;
extern int b264IsInit;
extern CB_FUN startfun;
extern CB_FUN endfun;

class LiveH264StreamSource: public FramedSource {
public:
  static LiveH264StreamSource* createNew(UsageEnvironment& env,
					 char const* fileName,
					 unsigned preferredFrameSize = 0,
					 unsigned playTimePerFrame = 0);
  // "preferredFrameSize" == 0 means 'no preference'
  // "playTimePerFrame" is in microseconds
/*
  static LiveH264StreamSource* createNew(UsageEnvironment& env,
					 unsigned preferredFrameSize = 0,
					 unsigned playTimePerFrame = 0);
      // an alternative version of "createNew()" that's used if you already have
      // an open file.
*/
  u_int64_t fileSize() const { return fFileSize; }
      // 0 means zero-length, unbounded, or unknown

  void seekToByteAbsolute(u_int64_t byteNumber, u_int64_t numBytesToStream = 0);
    // if "numBytesToStream" is >0, then we limit the stream to that number of bytes, before treating it as EOF
  void seekToByteRelative(int64_t offset, u_int64_t numBytesToStream = 0);
  void seekToEnd(); // to force EOF handling on the next read

protected:
  LiveH264StreamSource(UsageEnvironment& env,
		       unsigned preferredFrameSize,
		       unsigned playTimePerFrame);
	// called only by createNew()

  virtual ~LiveH264StreamSource();

  static void fileReadableHandler(LiveH264StreamSource* source, int mask);
  void doReadFromFile();

private:
  // redefined virtual functions:
  virtual void doGetNextFrame();
  virtual void doStopGettingFrames();

protected:
  u_int64_t fFileSize;

private:
  unsigned fPreferredFrameSize;
  unsigned fPlayTimePerFrame;
  Boolean fFidIsSeekable;
  unsigned fLastPlayTime;
  Boolean fHaveStartedReading;
  Boolean fLimitNumBytesToStream;
  u_int64_t fNumBytesToStream; // used iff "fLimitNumBytesToStream" is True
};

#endif


#include "LiveH264StreamSource.hh"
#include "InputFile.hh"
#include "GroupsockHelper.hh"

// LiveH264StreamSource //
unsigned char h264_framebuf[H264_BUF_COUNT][H264_BUF_SIZE];
int h264_frame_len[H264_BUF_COUNT];
int h264_buf_head;
int h264_buf_tail;
int h264_buf_size;

void h264_buf_init();
void h264_buf_destroy();
Boolean h264_buf_full();
Boolean h264_buf_empty();
int h264_buf_put(unsigned char* buf,int len);
unsigned char* h264_buf_get();

sem_t h264_f;
sem_t h264_e;
sem_t h264_m;
int b264IsInit = 0;
CB_FUN startfun = NULL;
CB_FUN endfun = NULL;

unsigned char obuf[H264_BUF_SIZE];
unsigned olen = 0;

void h264_buf_init()
{
	if(b264IsInit == 0)
	{    	
		sem_init(&h264_f,1,0);
	    	sem_init(&h264_e,1,H264_BUF_COUNT);
	    	sem_init(&h264_m,1,1);
		h264_buf_head = 0;
		h264_buf_tail = 0;
		h264_buf_size = 0;
		
		//printf("must 1\n");
	}

}
Boolean h264_buf_full()
{
	if(h264_buf_size == H264_BUF_COUNT)
		return True;
	return False;
}
Boolean h264_buf_empty()
{
	if(h264_buf_size == 0)
		return True;
	return False;
}
int h264_buf_put(unsigned char* buf,int len)
{
	sem_wait(&h264_e);
	sem_wait(&h264_m);
	bzero(h264_framebuf[h264_buf_tail],H264_BUF_SIZE);
	memcpy(h264_framebuf[h264_buf_tail],buf,len);
	h264_frame_len[h264_buf_tail] = len;
	
	h264_buf_tail = (h264_buf_tail + 1)%H264_BUF_COUNT;
	h264_buf_size++;
	sem_post(&h264_f);
	sem_post(&h264_m);
}
unsigned char* h264_buf_get(int* len)
{
	sem_wait(&h264_m);
	unsigned char* rt = h264_framebuf[h264_buf_head];
	*len = h264_frame_len[h264_buf_head];
	
	h264_buf_head = (h264_buf_head+1)%H264_BUF_COUNT;
	h264_buf_size--;	
	sem_post(&h264_m);

	return rt;
}

void h264_buf_destroy()
{
	sem_destroy(&h264_f);
	sem_destroy(&h264_e);
	sem_destroy(&h264_m);
}


LiveH264StreamSource*
LiveH264StreamSource::createNew(UsageEnvironment& env, char const* fileName,
				unsigned preferredFrameSize,
				unsigned playTimePerFrame) {

    h264_buf_init();

    b264IsInit = 1;
  LiveH264StreamSource* newSource
    = new LiveH264StreamSource(env, preferredFrameSize, playTimePerFrame);

	if(startfun != NULL)
	{
		startfun();
	}
   
  return newSource;
}
/*
LiveH264StreamSource*
LiveH264StreamSource::createNew(UsageEnvironment& env, 
				unsigned preferredFrameSize,
				unsigned playTimePerFrame) {

  LiveH264StreamSource* newSource = new LiveH264StreamSource(env, preferredFrameSize, playTimePerFrame);

  return newSource;
}
*/

void LiveH264StreamSource::seekToByteAbsolute(u_int64_t byteNumber, u_int64_t numBytesToStream) {
  fNumBytesToStream = numBytesToStream;
  fLimitNumBytesToStream = fNumBytesToStream > 0;
}

void LiveH264StreamSource::seekToByteRelative(int64_t offset, u_int64_t numBytesToStream) {
  fNumBytesToStream = numBytesToStream;
  fLimitNumBytesToStream = fNumBytesToStream > 0;
}

void LiveH264StreamSource::seekToEnd() {

}

LiveH264StreamSource::LiveH264StreamSource(UsageEnvironment& env, 
					   unsigned preferredFrameSize,
					   unsigned playTimePerFrame)
  : FramedSource(env), fFileSize(0), fPreferredFrameSize(preferredFrameSize),
    fPlayTimePerFrame(playTimePerFrame), fLastPlayTime(0),
    fHaveStartedReading(False), fLimitNumBytesToStream(False), fNumBytesToStream(0) {

  // Test whether the file is seekable
  fFidIsSeekable = False;
}

LiveH264StreamSource::~LiveH264StreamSource() {


   if(endfun != NULL)
   {
	endfun();
   }
}

void LiveH264StreamSource::doGetNextFrame() {


  doReadFromFile();

}

void LiveH264StreamSource::doStopGettingFrames() {
  //envir().taskScheduler().unscheduleDelayedTask(nextTask());
}

void LiveH264StreamSource::fileReadableHandler(LiveH264StreamSource* source, int /*mask*/) {
  if (!source->isCurrentlyAwaitingData()) {
    source->doStopGettingFrames(); // we're not ready for the data yet
    return;
  }
  source->doReadFromFile();
}

void LiveH264StreamSource::doReadFromFile() {
  // Try to read as many bytes as will fit in the buffer provided (or "fPreferredFrameSize" if less)
  if (fLimitNumBytesToStream && fNumBytesToStream < (u_int64_t)fMaxSize) {
    fMaxSize = (unsigned)fNumBytesToStream;
  }
  if (fPreferredFrameSize > 0 && fPreferredFrameSize < fMaxSize) {
    fMaxSize = fPreferredFrameSize;
  }

  sem_wait(&h264_f);
  int len = 0;
  unsigned char* frame = h264_buf_get(&len);
  
  if(olen > 0)
  {
  	memcpy(fTo,obuf,olen);
  }
  
  if(len + olen>= fMaxSize)
  {
  	unsigned need = fMaxSize-olen;
  	memcpy(&fTo[olen],frame,need);
	
	fFrameSize = fMaxSize;	
	olen = len - need;
	memcpy(obuf,&frame[need],olen);  	
	
	
  }else{
  	memcpy(&fTo[olen],frame,len+ olen);	
	fFrameSize = olen + len;
	
	olen = 0;	
  }  
  sem_post(&h264_e);
 
  //fFrameSize = fread(fTo, 1, fMaxSize, fFid);
  //fwrite(fTo,fFrameSize,1,file);
  
  

  if (fFrameSize == 0) {
    //handleClosure();
    //return;
  }
  fNumBytesToStream -= fFrameSize;

  // Set the 'presentation time':
  if (fPlayTimePerFrame > 0 && fPreferredFrameSize > 0) {
    if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
      // This is the first frame, so use the current time:
      gettimeofday(&fPresentationTime, NULL);
    } else {
      // Increment by the play time of the previous data:
      unsigned uSeconds	= fPresentationTime.tv_usec + fLastPlayTime;
      fPresentationTime.tv_sec += uSeconds/1000000;
      fPresentationTime.tv_usec = uSeconds%1000000;
    }

    // Remember the play time of this data:
    fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize;
    fDurationInMicroseconds = fLastPlayTime;
  } else {
    // We don't know a specific play time duration for this data,
    // so just record the current time as being the 'presentation time':
    gettimeofday(&fPresentationTime, NULL);
  }

  // To avoid possible infinite recursion, we need to return to the event loop to do this:
  nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
				(TaskFunc*)FramedSource::afterGetting, this);
}

6、修改livemedia.hh,添加

<span style="color:#666666;">#include "LiveADTSAudioSource.hh"
#include "LiveADTSAudioServerMediaSubsession.hh"
#include "LiveVideoFileServerMediaSubsession.hh"
#include "LiveH264StreamSource.hh"</span>

7、添加接口代码

#include <BasicUsageEnvironment.hh>
#include "DynamicRTSPServer.hh"
#include <liveMedia.hh>
#include "version.hh"

typedef void (*CB_FUN)(void);

int Rtsp_Server_Start();
int Rtsp_Server_Stop();
int Rtsp_AAC_Frame(unsigned char* frame,int framelen);
int Rtsp_H264_Frame(unsigned char* frame,int framelen);
int Rtsp_Regist_Start_Routine(CB_FUN fun);
int Rtsp_Regist_Stop_Routine(CB_FUN fun);
int Rtsp_Port();

#include "librtspserv.hh"
#include <pthread.h>

pthread_t id;

void* server_start(void* arg) {
  // Begin by setting up our usage environment:
  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);

  UserAuthenticationDatabase* authDB = NULL;
#ifdef ACCESS_CONTROL
  // To implement client access control to the RTSP server, do the following:
  authDB = new UserAuthenticationDatabase;
  authDB->addUserRecord("username1", "password1"); // replace these with real strings
  // Repeat the above with each <username>, <password> that you wish to allow
  // access to the server.
#endif

  // Create the RTSP server.  Try first with the default port number (554),
  // and then with the alternative port number (8554):
  RTSPServer* rtspServer;
  portNumBits rtspServerPortNum = 554;
  rtspServer = DynamicRTSPServer::createNew(*env, rtspServerPortNum, authDB);
  if (rtspServer == NULL) {
    rtspServerPortNum = 8554;
    rtspServer = DynamicRTSPServer::createNew(*env, rtspServerPortNum, authDB);
  }
  if (rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }

  *env << "LIVE555 Media Server\n";
  *env << "\tversion " << MEDIA_SERVER_VERSION_STRING
       << " (LIVE555 Streaming Media library version "
       << LIVEMEDIA_LIBRARY_VERSION_STRING << ").\n";

  char* urlPrefix = rtspServer->rtspURLPrefix();
  *env << "Play streams from this server using the URL\n\t"
       << urlPrefix << "<filename>\nwhere <filename> is a file present in the current directory.\n";
  *env << "Each file's type is inferred from its name suffix:\n";
  *env << "\t\".264\" => a H.264 Video Elementary Stream file\n";
  *env << "\t\".265\" => a H.265 Video Elementary Stream file\n";
  *env << "\t\".aac\" => an AAC Audio (ADTS format) file\n";
  *env << "\t\".ac3\" => an AC-3 Audio file\n";
  *env << "\t\".amr\" => an AMR Audio file\n";
  *env << "\t\".dv\" => a DV Video file\n";
  *env << "\t\".m4e\" => a MPEG-4 Video Elementary Stream file\n";
  *env << "\t\".mkv\" => a Matroska audio+video+(optional)subtitles file\n";
  *env << "\t\".mp3\" => a MPEG-1 or 2 Audio file\n";
  *env << "\t\".mpg\" => a MPEG-1 or 2 Program Stream (audio+video) file\n";
  *env << "\t\".ogg\" or \".ogv\" or \".opus\" => an Ogg audio and/or video file\n";
  *env << "\t\".ts\" => a MPEG Transport Stream file\n";
  *env << "\t\t(a \".tsx\" index file - if present - provides server 'trick play' support)\n";
  *env << "\t\".vob\" => a VOB (MPEG-2 video with AC-3 audio) file\n";
  *env << "\t\".wav\" => a WAV Audio file\n";
  *env << "\t\".webm\" => a WebM audio(Vorbis)+video(VP8) file\n";
  *env << "See http://www.live555.com/mediaServer/ for additional documentation.\n";

  // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling.
  // Try first with the default HTTP port (80), and then with the alternative HTTP
  // port numbers (8000 and 8080).

  if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) {
    *env << "(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling, or for HTTP live streaming (for indexed Transport Stream files only).)\n";
  } else {
    *env << "(RTSP-over-HTTP tunneling is not available.)\n";
  }

  env->taskScheduler().doEventLoop(); // does not return

  return 0; // only to prevent compiler warning
}

int Rtsp_Server_Start()
{
	pthread_create(&id,NULL,server_start,NULL);
}

int Rtsp_AAC_Frame(unsigned char* frame,int framelen)
{
	while(bIsInit == 0)
	{
		usleep(1000*100);
	}

	aac_buf_put(frame,framelen);

	usleep(0);
	return 0;
}

int Rtsp_Server_Stop()
{
	//printf("destory.....\n");
	h264_buf_destroy();
	aac_buf_destroy();

}

int Rtsp_H264_Frame(unsigned char* frame,int framelen)
{
	while(b264IsInit == 0)
	{
		usleep(1000*100);
		//printf("usleep(1000*80);\n");
	}
	
	int i = 0;

//printf("framelen = %d,count = %d,mod = %d\n",framelen,framelen/H264_BUF_SIZE,framelen%H264_BUF_SIZE);
	for(i = 0; i < framelen/H264_BUF_SIZE;i++)
	{
		
		h264_buf_put(&frame[i*H264_BUF_SIZE],H264_BUF_SIZE);
		
	}

	int lastsize = framelen % H264_BUF_SIZE;
	if(lastsize > 0)
	{

		h264_buf_put(&frame[i*H264_BUF_SIZE],lastsize);

	}
	
	usleep(0);
	return 0;

}

int Rtsp_Regist_Start_Routine(CB_FUN fun)
{
	startfun = fun;
	return 0;
}

int Rtsp_Regist_Stop_Routine(CB_FUN fun)
{
	endfun = fun;
	return 0;
}

int Rtsp_Port()
{

}


  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值