live555的几个类改写

API_H264Decode.hpp
#ifndef			_API_H264_DECODE_HPP_
#define			_API_H264_DECODE_HPP_

typedef void(*LPGetFramePtr)(u_int8_t* pImage ,int& frame,const char* CameraIp);
extern "C"
{
	#include <libavutil/opt.h>
	#include <libavcodec/avcodec.h>
	#include <libavutil/channel_layout.h>
	#include <libavutil/common.h>
	#include <libavutil/imgutils.h>
	#include <libavutil/mathematics.h>
	#include <libavutil/samplefmt.h>
	#include <libswscale/swscale.h>
	#include <libavformat/avformat.h> 
};

namespace API
{
	class H264DecodeDev{
	public:
		H264DecodeDev();
		virtual ~H264DecodeDev();
	private:
		AVCodec 		    *m_pcodec;
		AVCodecContext              *m_pCodeCtx;
		AVFormatContext		    *m_pFormat;
		AVCodecParserContext	    *m_pCodecParserCtx;
		AVFrame 		     *m_pFrame;
		AVPacket 		      m_Packet;
	private:
		LPGetFramePtr  		    m_pCallBack;
	private:
		int 			    m_nWidth;
		int 			    m_nHeight;	
	public://ffmpeg 
		virtual bool 		InitializeDev(LPGetFramePtr  pCallBack,int width,int height);//
		int 						FrameChangeRGB24(AVFrame* pFrame, uint8_t** pRGB);
		void 						RedChangeBlue(uint8_t* pRGB, int nWidth, int nHeight);
		virtual bool 				ProcessDecode(uint8_t* pBuffer,int nSize,const char* fStreamId);
	public:
		int  						m_nFrameNumber;
	};
};

API_H264Decode.cpp
#include "API_H264Decode.hpp"
//----------------------------ffmpeg  h264解码类
API::H264DecodeDev::H264DecodeDev():
m_pcodec(nullptr),
m_pCodeCtx(nullptr),
m_pCodecParserCtx(nullptr),
m_pFrame(nullptr),
m_nFrameNumber(0),
m_nWidth(0),
m_nHeight(0){
}
API::H264DecodeDev::~H264DecodeDev(){
  if(m_pCodecParserCtx != nullptr)
  {
    av_parser_close(m_pCodecParserCtx);
    m_pCodecParserCtx = nullptr;
  }
  if (m_pFrame != nullptr)
  {
    av_frame_free(&m_pFrame);
    m_pFrame = nullptr;
  }
  if (m_pCodeCtx->extradata != nullptr)
  {
    av_free(m_pCodeCtx->extradata);
    m_pCodeCtx->extradata = nullptr;
  }
  if (m_pcodec != nullptr)
  {
    avcodec_close(m_pCodeCtx);
    av_free(m_pcodec);
    m_pcodec = nullptr;
  }
}
bool API::H264DecodeDev::InitializeDev(LPGetFramePtr  pCallBack,int width,int height){
   if (nullptr == pCallBack){
      LLERROR("H264  CallBack_Ptr is nullptr");
      return false;
    }
    if (0 == width || 0 == height){
      LLERROR("H264  width or height is zero");
      return false;
    }

    m_pCallBack = pCallBack;

    m_nWidth    = width;
    m_nHeight   = height;

    avcodec_register_all();//若初始化多个,(多路rtsp),这两个函数应只初始化一次。
    av_register_all();

    
    m_pcodec = avcodec_find_decoder(AV_CODEC_ID_H264);
    if(m_pcodec)
    {
       //m_pcodec->capabilities |= CODEC_CAP_DELAY;
       m_pCodeCtx = avcodec_alloc_context3(m_pcodec);

       //m_pCodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
       if(m_pCodeCtx){
          //if((nullptr ==  m_pCodeCtx->extradata) || (0 == m_pCodeCtx->extradata_size))
              //m_pCodeCtx->width =   m_nWidth;
              //m_pCodeCtx->height =  m_nHeight;
              //m_pCodeCtx->codec_id =  AV_CODEC_ID_H264;
              //m_pCodeCtx->codec_type = AVMEDIA_TYPE_VIDEO;
              //m_pCodeCtx->pix_fmt =   AV_PIX_FMT_YUV420P;
          m_pCodecParserCtx = av_parser_init(AV_CODEC_ID_H264);
          if(m_pCodecParserCtx){
               if(avcodec_open2(m_pCodeCtx,m_pcodec,nullptr) < 0){
                   LLERROR("avcodec_open2  failed ");
                   return false;
               }
               else
               {
                    m_pFrame = av_frame_alloc();
                    av_init_packet(&m_Packet);//av_init_pack
                    //LLDEBUG("Init Decode ok");
                    return true;
               }
          }
          else{
              LLERROR("av_parser_init failed ");
              return false;
          }
       }
       else{
          LLERROR("avcodec_alloc_context3 failed");
          return false;
       }
    }
    else{
       LLERROR("avcodec_find_decoder failed");
       return false;
    }
    return false;
}
bool API::H264DecodeDev::ProcessDecode(uint8_t* pBuffer,int nSize,const char* fStreamId) //rtsp方式获取数据流回调函数
{
    uint8_t*    CurPtr = pBuffer;
    bool        bReturn = true;
    av_init_packet(&m_Packet);
    m_Packet.data = pBuffer;
    m_Packet.size = nSize;
    //AVFormatContext*  pFormat = avformat_alloc_context(); 
    //int ret = av_read_frame(m_pFormat, &m_Packet);//返回0说明读取正常
    while(nSize > 0)
    {
        //LLDEBUG("av_init_packet ...");
        //av_init_packet(&m_Packet);
        int len = av_parser_parse2(m_pCodecParserCtx, m_pCodeCtx, &(m_Packet.data), &(m_Packet.size), CurPtr, nSize, AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE);
            
        CurPtr += len;
        nSize  -= len;
        //LLDEBUG("Packet.size..: %d",m_Packet.size);
        if(m_Packet.size)
        {
            int res = avcodec_send_packet(m_pCodeCtx, &m_Packet);
            if(res)
            {
              //char  strError[256];
              //memset(strError, 0, 256);
              //av_strerror(res, strError, 256);
              //LLERROR("avcodec_send_packet false Error = %s", strError);
              bReturn = false;
              continue;
            }
            res = avcodec_receive_frame(m_pCodeCtx, m_pFrame);
            if(res)
            {
              //char  strError[256];
             // memset(strError, 0, 256);
              //av_strerror(res, strError, 256);
              //LLERROR("avcodec_receive_frame false Error = %s", strError);
              bReturn = false;
              continue;
            }
            //如果成功,则帧数加1
            ++m_nFrameNumber;
            //LLDEBUG("FrameNum:%d",m_nFrameNumber);
            int   nDataSize = FrameChangeRGB24(m_pFrame, nullptr); //获取每帧长度
            if (nDataSize){
              unsigned char*    pImage = new unsigned char[nDataSize];
              if(pImage){
                memset(pImage,0,nDataSize);
                FrameChangeRGB24(m_pFrame,&pImage);
                RedChangeBlue(pImage,m_nWidth,m_nHeight);
                if(m_pCallBack != nullptr){
                    m_pCallBack(pImage,m_nFrameNumber,fStreamId);
                }
              }
              delete[]    pImage;
              pImage = nullptr;
            }
        }
  }
  return bReturn;
}
//RGB24红蓝调换
void API::H264DecodeDev::RedChangeBlue(uint8_t* pRGB, int nWidth, int nHeight)
{
  for (int x = 0; x < nHeight; ++x)
  {
    for (int y = 0; y < nWidth; ++y)
    {
      int   nSize = (x * nWidth + y) * 3;
      uint8_t uTemp = pRGB[nSize + 0];
      pRGB[nSize + 0] = pRGB[nSize + 2];
      pRGB[nSize + 2] = uTemp;
    }
  }
}
int API::H264DecodeDev::FrameChangeRGB24(AVFrame* pFrame, uint8_t** pRGB)
{
    int   iSize = m_nWidth * m_nHeight * 3;
    if(!pRGB){
        return iSize;
    }
    struct SwsContext* img_convert_ctx = 0;
    int linesize[4] = { 3 * m_pCodeCtx->width, 0, 0, 0 };
    img_convert_ctx = sws_getContext(pFrame->width, pFrame->height, AV_PIX_FMT_YUV420P, m_nWidth, m_nHeight, AV_PIX_FMT_RGB24, SWS_POINT, 0, 0, 0);
    if (img_convert_ctx != nullptr){
      sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pFrame->height, (uint8_t**)pRGB, linesize);
      if (img_convert_ctx != nullptr){
        sws_freeContext(img_convert_ctx);
      }
    }
    return 0;
}

API_RTSPClient.hpp

#ifndef		_API_RTSP_CLIENT_HPP_
#define		_API_RTSP_CLIENT_HPP_
#include 	"UsageEnvironment.hh"
#include 	"liveMedia.hh"
#include 	"BasicUsageEnvironment.hh"
#include	"Base64.hh"
#include   	"./MenethilAPI/API_Socket.hpp"
#include 	"./VideoDecode/API_H264Decode.hpp"
#include  	<queue>
#include 	<pthread.h>
#include    <iostream>
#include 	<string>
#include    <vector>
#include  	<memory>
#include    <opencv2/opencv.hpp>
#define 	INBUF_SIZE 4096
#define 	AUDIO_INBUF_SIZE 20480
#define 	AUDIO_REFILL_THRESH 4096

typedef struct _live555task
{
	int 	time;
	void* 	task;
	void* 	arg;
	_live555task()
	{
		time = 0;
		task = NULL;
		arg  = NULL;
	};
}live555task;
typedef struct _live555videoParam
{
	UsageEnvironment 				*env;

	int 							nIndex;
	void 							*local;
	_live555videoParam()
	{
		env = NULL;
		local = NULL;
		nIndex = -1;
	}
}live555videoParam;

void GetCurrTime(char* strTime);
void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString);
void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString);
void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString);

void subsessionAfterPlaying(void* clientData); 
void subsessionByeHandler(void* clientData); 
void streamTimerHandler(void* clientData);
// The main streaming routine (for each "rtsp://" URL):
//void openURL(UsageEnvironment& env, char const* progName, char const* rtspURL);
// Used to iterate through each stream's 'subsessions', setting up each one:
void setupNextSubsession(RTSPClient* rtspClient);
// Used to shut down and close a stream (including its "RTSPClient" object):
void shutdownStream(RTSPClient* rtspClient, int exitCode = 1);

// Define a class to hold per-stream state that we maintain throughout each stream's lifetime:
class StreamClientState 
{
	public:
  		StreamClientState();
  	virtual ~StreamClientState();
	public:
	  MediaSubsessionIterator* iter;
	  MediaSession* session;
	  MediaSubsession* subsession;
	  TaskToken streamTimerTask;
	  double duration;
};
class ourRTSPClient: public RTSPClient
{
	public:
	  static ourRTSPClient* createNew(UsageEnvironment& env, char const* rtspURL,
					  int verbosityLevel = 0,
					  char const* applicationName = NULL,
					  portNumBits tunnelOverHTTPPortNum = 0);
	protected:
  		ourRTSPClient(UsageEnvironment& env, char const* rtspURL,
		int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum);
    // called only by createNew();
  	virtual ~ourRTSPClient();
	public:
  		StreamClientState scs;
};
class H264MediaSink: public MediaSink {
public:
  static H264MediaSink* createNew(UsageEnvironment& env,
			      MediaSubsession& subsession,
			      char const*sPropParameterSetsStr, // identifies the kind of data that's being received
			      char const* streamId = NULL); // identifies the stream itself (optional)
protected:
  H264MediaSink(UsageEnvironment& env, MediaSubsession& subsession,char const*sPropParameterSetsStr, char const* streamId);
  virtual ~H264MediaSink();
  static void afterGettingFrame(void* clientData, unsigned frameSize,
                                unsigned numTruncatedBytes,
				struct timeval presentationTime,
                                unsigned durationInMicroseconds);
  void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
			 struct timeval presentationTime);
  int  SetParameterStr();
private:
  // redefined virtual functions:
  	virtual Boolean continuePlaying();
private:
	u_int8_t* 			fReceiveBuffer;
	MediaSubsession& 	fSubsession;
	char* 				fStreamId;
	u_int8_t* 				sps;
	u_int8_t* 				pps;
	unsigned 			spsSize;
	unsigned 			ppsSize; 
	Boolean             bFirstFrame;
	int 				extraDataSize;
	char const*  		fSPropParameterSetsStr[3];
private://ffmpeg
	API::H264DecodeDev	*m_pDecode;
};
class API_RTSPSession 
{
	public:
 		API_RTSPSession();
 	virtual ~API_RTSPSession();
 	public:
 		virtual int 		Initialize(std::string& rtspUrl,int& nIndex);
 		virtual int  		startRTSPStream(LPGetFramePtr pCallBack);
 		virtual int 		Initlive555();
 		virtual void 		live555_loop();	
 		virtual int 		stopRTSP();
 		virtual int 		openURL(UsageEnvironment& env, char const* progName, char const* rtspURL);
 	public:
		static  void        live555_task(void* clientData);
		static  void        Play_video(void* clientData);
		static  void*       live555_Thread(void* arg);
	 	static  void 		SubStreamThread(void* arg);
	 	static  void        Set_live555task(int Time,void* Task,void* arg);
	public:
		pthread_t 					nSessionPid;
		int 						nSessionId;
	public:
		API::API_Mutex              m_SetMutex;
 		std::string 				m_progName;
 		std::string 				m_rtspUrl;
 		int 						m_debugLevel;
 		volatile int 				m_nStatus;
 		volatile int 				m_ndataflag;
 		ourRTSPClient*				m_rtspClient;
		char     					eventLoopWatchVariable;
		TaskScheduler*        		m_scheduler;
		UsageEnvironment*           m_env;
};
#endif

API_RTSPClient.cpp

#include "API_RTSPClient.hpp"
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <arpa/inet.h>
#include <signal.h>
#include <fstream>
#define RTSP_CLIENT_VERBOSITY_LEVEL 1
#define REQUEST_STREAMING_OVER_TCP False
#define DUMMY_SINK_RECEIVE_BUFFER_SIZE (921700)

//#define DEBUG_PRINT_EACH_RECEIVED_FRAME 1
//#define DEBUG_PRINT_RTSP_ClIENT_INTO 1
static unsigned  int rtspClientCount = 0;
static LPGetFramePtr   n_pRtspCall = nullptr;
void GetCurrTime(char* strTime){
    struct timeval    tv;
    struct timezone   tz;
    struct tm*    p;
    gettimeofday(&tv, &tz);
    p = localtime(&tv.tv_sec);
    sprintf(strTime, "%02d:%02d:%02d", p->tm_hour, p->tm_min, p->tm_sec);
    return;
}
// A function that outputs a string that identifies each stream (for debugging output). Modify this if you wish:
UsageEnvironment& operator<<(UsageEnvironment& env, const RTSPClient& rtspClient){
    return env << "[URL:\"" << rtspClient.url() << "\"]: ";
}
// A function that outputs a string that identifies each subsession (for debugging output). Modify this if you wish:
UsageEnvironment& operator<<(UsageEnvironment& env, const MediaSubsession& subsession) {
    return env << subsession.mediumName() << "/" << subsession.codecName();
}
void usage(UsageEnvironment& env, char const* progName)
{
	 env << "Usage: " << progName << "  ... \n";
	 env << "\t(where each  is a \"rtsp://\" URL)\n";
}

void setupNextSubsession(RTSPClient* rtspClient) {
    UsageEnvironment& env = rtspClient->envir(); // alias
    StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
  
    scs.subsession = scs.iter->next();
    if (scs.subsession != NULL)
    {
      if (!scs.subsession->initiate())
     {
        env << *rtspClient << "Failed to initiate the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n";
        setupNextSubsession(rtspClient);
     } else {
#ifdef DEBUG_PRINT_RTSP_ClIENT_INTO
      env << *rtspClient << "Initiated the \"" << *scs.subsession << "\" subsession (";
#endif
      if (scs.subsession->rtcpIsMuxed()) {
#ifdef DEBUG_PRINT_RTSP_ClIENT_INTO
	     env << "client port " << scs.subsession->clientPortNum();
#endif
      } else {
#ifdef DEBUG_PRINT_RTSP_ClIENT_INTO
	     env << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1;
#endif
      }
#ifdef DEBUG_PRINT_RTSP_ClIENT_INTO
      env << ")\n";
#endif
      // Continue setting up this subsession, by sending a RTSP "SETUP" command:
      rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False, REQUEST_STREAMING_OVER_TCP);
    }
    return;
  }

  // We've finished setting up all of the subsessions.  Now, send a RTSP "PLAY" command to start the streaming:
  if (scs.session->absStartTime() != NULL) {
    // Special case: The stream is indexed by 'absolute' time, so send an appropriate "PLAY" command:
    rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, scs.session->absStartTime(), scs.session->absEndTime());
  } else {
    scs.duration = scs.session->playEndTime() - scs.session->playStartTime();
    rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY);
  }
}

void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString){
  do
   {
      UsageEnvironment& env = rtspClient->envir(); // alias
      StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs;

    if (resultCode != 0)
    {
      env << *rtspClient << "Failed to get a SDP description: " << resultString << "\n";// @leger date:2017-0726 
      env << *rtspClient << "Network is unreachable" << "\n";
      delete[] resultString;
      break;
    }
    char* const sdpDescription = resultString;
#ifdef DEBUG_PRINT_RTSP_ClIENT_INTO
    env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n";
#endif
    // Create a media session object from this SDP description:
    scs.session = MediaSession::createNew(env, sdpDescription);
    delete[] sdpDescription; // because we don't need it anymore
    
    if (scs.session == NULL) 
    {
      env << *rtspClient << "Failed to create a MediaSession object from the SDP description: " << env.getResultMsg() << "\n";
      break;
    } 
    else if (!scs.session->hasSubsessions())
    {
      env << *rtspClient << "This session has no media subsessions (i.e., no \"m=\" lines)\n";
      break;
    }

    // Then, create and set up our data source objects for the session.  We do this by iterating over the session's 'subsessions',
    // calling "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command, on each one.
    // (Each 'subsession' will have its own data source.)
    scs.iter = new MediaSubsessionIterator(*scs.session);
    setupNextSubsession(rtspClient);
    return;
  }while (0);

      // An unrecoverable error occurred with this stream.
      shutdownStream(rtspClient);
}

void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) {
  do {
    UsageEnvironment& env = rtspClient->envir(); // alias
    StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias

    if (resultCode != 0) {
      env << *rtspClient << "Failed to set up the \"" << *scs.subsession << "\" subsession: " << resultString << "\n";
      break;
    }
#ifdef DEBUG_PRINT_RTSP_ClIENT_INTO
    env << *rtspClient << "Set up the \"" << *scs.subsession << "\" subsession (";
#endif
    if (scs.subsession->rtcpIsMuxed()) {
#ifdef DEBUG_PRINT_RTSP_ClIENT_INTO    
      env << "client port " << scs.subsession->clientPortNum();
#endif
    } else {
#ifdef DEBUG_PRINT_RTSP_ClIENT_INTO  
      env << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1;
#endif    
    }
#ifdef DEBUG_PRINT_RTSP_ClIENT_INTO    
    env << ")\n";
#endif
    // Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it.
    // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later,
    // after we've sent a RTSP "PLAY" command.)
    if (0 == strncmp(scs.subsession->mediumName(),"video",5))
    {
       do
       {
          if (0 ==  strcmp(scs.subsession->codecName(),"H264"))
          {
              scs.subsession->sink = H264MediaSink::createNew(env,*scs.subsession,scs.subsession->fmtp_spropparametersets(),rtspClient->url());
          }
       }
       while(0);
    }
    // perhaps use your own custom "MediaSink" subclass instead
    if (scs.subsession->sink == NULL)
     {
      //env << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession
	  //<< "\" subsession: " << env.getResultMsg() << "\n";
      break;
    }
#ifdef DEBUG_PRINT_RTSP_ClIENT_INTO 
    env << *rtspClient << "Created a data sink for the \"" << *scs.subsession << "\" subsession\n";
#endif
    scs.subsession->miscPtr = rtspClient; // a hack to let subsession handler functions get the "RTSPClient" from the subsession 
    
    scs.subsession->sink->startPlaying(*(scs.subsession->readSource()),
				       subsessionAfterPlaying, scs.subsession);
    // Also set a handler to be called if a RTCP "BYE" arrives for this subsession:
    if (scs.subsession->rtcpInstance() != NULL) {
      scs.subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, scs.subsession);
    }
  } while (0);
  delete[] resultString;

  // Set up the next subsession, if any:
  setupNextSubsession(rtspClient);
}

void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString) {
  Boolean success = False;
  do {
    UsageEnvironment& env = rtspClient->envir(); // alias
    StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs;

    if (resultCode != 0) {
      env << *rtspClient << "Failed to start playing session: " << resultString << "\n";
      break;
    }

    // Set a timer to be handled at the end of the stream's expected duration (if the stream does not already signal its end
    // using a RTCP "BYE").  This is optional.  If, instead, you want to keep the stream active - e.g., so you can later
    // 'seek' back within it and do another RTSP "PLAY" - then you can omit this code.
    // (Alternatively, if you don't want to receive the entire stream, you could set this timer for some shorter value.)
    if (scs.duration > 0) {
      unsigned const delaySlop = 2; // number of seconds extra to delay, after the stream's expected duration.  (This is optional.)
      scs.duration += delaySlop;
      unsigned uSecsToDelay = (unsigned)(scs.duration*1000000);
      scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient);
    }
#ifdef DEBUG_PRINT_RTSP_ClIENT_INTO
    env << *rtspClient << "Started playing session";
#endif
    if (scs.duration > 0) {
      env << " (for up to " << scs.duration << " seconds)";
    }
#ifdef DEBUG_PRINT_RTSP_ClIENT_INTO
    env << "...\n";
#endif
    success = True;
  } while (0);
  delete[] resultString;

  if (!success) {
    // An unrecoverable error occurred with this stream.
    shutdownStream(rtspClient);
  }
}

void subsessionAfterPlaying(void* clientData) {
  MediaSubsession* subsession = (MediaSubsession*)clientData;
  RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr);

  // Begin by closing this subsession's stream:
  Medium::close(subsession->sink);
  subsession->sink = NULL;

  // Next, check whether *all* subsessions' streams have now been closed:
  MediaSession& session = subsession->parentSession();
  MediaSubsessionIterator iter(session);
  while ((subsession = iter.next()) != NULL) {
    if (subsession->sink != NULL) return; // this subsession is still active
  }

  // All subsessions' streams have now been closed, so shutdown the client:
  shutdownStream(rtspClient);
}
void subsessionByeHandler(void* clientData) {
  MediaSubsession* subsession = (MediaSubsession*)clientData;
  RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr;
  UsageEnvironment& env = rtspClient->envir(); // alias

  env << *rtspClient << "Received RTCP \"BYE\" on \"" << *subsession << "\" subsession\n";

  // Now act as if the subsession had closed:
  subsessionAfterPlaying(subsession);
}
void streamTimerHandler(void* clientData) {
  ourRTSPClient* rtspClient = (ourRTSPClient*)clientData;
  StreamClientState& scs = rtspClient->scs; // alias

  scs.streamTimerTask = NULL;

  // Shut down the stream:
  shutdownStream(rtspClient);
}
void shutdownStream(RTSPClient* rtspClient, int exitCode)
{
    UsageEnvironment& env = rtspClient->envir(); // alias
    StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
 
    // First, check whether any subsessions have still to be closed:
    if (scs.session != NULL)
    { 
        Boolean someSubsessionsWereActive = False;
        MediaSubsessionIterator iter(*scs.session);
        MediaSubsession* subsession;
  
        while ((subsession = iter.next()) != NULL) 
        {
            if (subsession->sink != NULL)
            {
               	Medium::close(subsession->sink);
              	subsession->sink = NULL;
                
        	      if (subsession->rtcpInstance() != NULL)
                {
        	         subsession->rtcpInstance()->setByeHandler(NULL, NULL); // in case the server sends a RTCP "BYE" while handling "TEARDOWN"
        	      }

      	       someSubsessionsWereActive = True;
               //LLDEBUG("someSubsessionsWereActive  is true");
            }
        }

        if (someSubsessionsWereActive)
         {
          // Send a RTSP "TEARDOWN" command, to tell the server to shutdown the stream.
          // Don't bother handling the response to the "TEARDOWN".
          rtspClient->sendTeardownCommand(*scs.session, NULL);
          LLDEBUG("sendTeardownCommand   is true");
        }
    }

      env << *rtspClient << "Closing the stream.\n"; // @leger date:2017-0726
      Medium::close(rtspClient);
      // Note that this will also cause this stream's "StreamClientState" structure to get reclaimed.

      if (--rtspClientCount == 0)
      {
          // The final stream has ended, so exit the application now.
          // (Of course, if you're embedding this code into your own application, you might want to comment this out,
          // and replace it with "eventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, and continue running "main()".)
          //LLDEBUG(",,,  -rtspClientCount ");
          exit(exitCode);
     }
}

// Implementation of "ourRTSPClient":
ourRTSPClient* ourRTSPClient::createNew(UsageEnvironment& env, char const* rtspURL,
					int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum) {
  return new ourRTSPClient(env, rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum);
}

ourRTSPClient::ourRTSPClient(UsageEnvironment& env, char const* rtspURL,
			     int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum)
  : RTSPClient(env,rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum, -1) {
}

ourRTSPClient::~ourRTSPClient() {
}
// Implementation of "StreamClientState":

StreamClientState::StreamClientState()
  : iter(NULL), session(NULL), subsession(NULL), streamTimerTask(NULL), duration(0.0) {
}

StreamClientState::~StreamClientState() {
  delete iter;
  if (session != NULL) {
    // We also need to delete "session", and unschedule "streamTimerTask" (if set)
    UsageEnvironment& env = session->envir(); // alias

    env.taskScheduler().unscheduleDelayedTask(streamTimerTask);
    Medium::close(session);
  }
}
//---------------------------------------------------
H264MediaSink* H264MediaSink::createNew(UsageEnvironment& env, MediaSubsession& subsession, char const* sPropParameterSetsStr,char const* streamId) {
  return new H264MediaSink(env, subsession, sPropParameterSetsStr,streamId);
}

H264MediaSink::H264MediaSink(UsageEnvironment& env, MediaSubsession& subsession,char const* sPropParameterSetsStr, char const* streamId)
  :MediaSink(env), 
  fSubsession(subsession),
  sps(nullptr),
  pps(nullptr),
  spsSize(0),
  ppsSize(0),
  bFirstFrame(False),
  extraDataSize(0),
  m_pDecode(nullptr)
{
  fStreamId = strDup(streamId);
  fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE];
  fSPropParameterSetsStr[0] = sPropParameterSetsStr;
  m_pDecode = new API::H264DecodeDev;
  if (m_pDecode != nullptr){
    m_pDecode->InitializeDev(n_pRtspCall,IMAGE_WIDTH,IMAGE_HEIGHT);
    sleep(2);
  }
}
H264MediaSink::~H264MediaSink(){
  delete[] fReceiveBuffer;
  delete[] fStreamId;
  if (m_pDecode != nullptr){
    delete   m_pDecode;
    m_pDecode = nullptr;  
  }
}
int  H264MediaSink::SetParameterStr()
{
    unsigned char const start_code[4] = {0x00, 0x00, 0x00, 0x01};
    if (nullptr == fReceiveBuffer){
       LLERROR("fReceiveBuffer  is nullptr");
       return -1;
    }
    if (nullptr == fSPropParameterSetsStr[0]){
        LLERROR("SPropParameter  is nullptr");
       return -2;
    }
    if (!bFirstFrame){
        unsigned numSPropRecords;
      SPropRecord* sPropRecords = parseSPropParameterSets(fSPropParameterSetsStr[0], numSPropRecords);  
      for (unsigned i = 0; i < numSPropRecords; i++)
      {
          if (sPropRecords[i].sPropLength == 0) continue;
          u_int8_t nal_unit_type = (sPropRecords[i].sPropBytes[0])&0x1F;
          if (nal_unit_type == 7)//SPS
          {
              memmove(fReceiveBuffer + extraDataSize,start_code,4);
              extraDataSize += 4;
              memmove(fReceiveBuffer + extraDataSize,sPropRecords[i].sPropBytes,sPropRecords[i].sPropLength);
              extraDataSize += sPropRecords[i].sPropLength;
              //LLDEBUG("Set  ParameterStr ...  sps...   ");
          } 
          else if (nal_unit_type == 8)//PPS
          {
              memmove(fReceiveBuffer + extraDataSize,start_code,4);
              extraDataSize += 4;
              memmove(fReceiveBuffer + extraDataSize,sPropRecords[i].sPropBytes,sPropRecords[i].sPropLength);
              extraDataSize += sPropRecords[i].sPropLength;
              //LLDEBUG("Set ParameterStr ...  pps...    ");
          }
      }
      bFirstFrame = True;
      delete[] sPropRecords;
    }
    memmove(fReceiveBuffer + extraDataSize,start_code,4);
    if(bFirstFrame && extraDataSize){
      return 4 + extraDataSize;
    }
    extraDataSize = 0 ; 
    return 4;
}
void H264MediaSink::afterGettingFrame(void* clientData, unsigned frameSize, 
  unsigned numTruncatedBytes,
  struct timeval presentationTime,
  unsigned durationInMicroseconds)
{
  H264MediaSink* sink = (H264MediaSink*)clientData;
  sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime);
}

void H264MediaSink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
          struct timeval presentationTime) {
  // We've just received a frame of data.  (Optionally) print out information about it:
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
  if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
  envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
  if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
  char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
  sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
  envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr;
  if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
    envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
  }
#ifdef DEBUG_PRINT_NPT
  envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime);
#endif
  envir() << "\n";
#endif
  if (0 ==  strcmp(fSubsession.codecName(),"H264"))
  {
    int SetSize = SetParameterStr();
    memmove(fReceiveBuffer + SetSize,fReceiveBuffer,frameSize);
    m_pDecode->ProcessDecode(fReceiveBuffer,SetSize + frameSize,fStreamId);
  }
  //Then continue, to request the next frame of data:
  continuePlaying();
}
Boolean H264MediaSink::continuePlaying()
{
  if (fSource == NULL) return False; // sanity check (should not happen)
  // Request the next frame of data from our input source.  "afterGettingFrame()" will get called later, when it arrives:
  fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE,
                        afterGettingFrame, this,
                        onSourceClosure, this);
  return True;
}
//----2017-0913-----------------------------------
API_RTSPSession::API_RTSPSession():m_progName("TestRtsp"),
m_rtspUrl(""),
m_debugLevel(0),
m_nStatus(0),
m_ndataflag(0),
m_rtspClient(nullptr),
eventLoopWatchVariable(0),
m_scheduler(nullptr),
m_env(nullptr)
{
}
API_RTSPSession::~API_RTSPSession(){
}
int API_RTSPSession::Initialize(std::string& rtspUrl,int& nIndex){
    if (rtspUrl != ""){
       m_rtspUrl = rtspUrl;
    }
    else{
      return -1;
    }
    if (nIndex < 0){
      return -2;
    }
    nSessionId = nIndex;
    //LLDEBUG("Init avcodec end ..");
    return 0;
}
void API_RTSPSession::SubStreamThread(void* arg){
   API_RTSPSession* pThis = (API_RTSPSession*)arg;
   while(true)
   {
      sleep(1);//每一秒更新
      if (pThis->m_ndataflag)
      {
          pThis->m_nStatus = 1; 
          //有数据状态值为1
          //LLDEBUG("Stream Thread  Status 1");
      }
   }
   return ;
}
int  API_RTSPSession::startRTSPStream(LPGetFramePtr pCallBack)
{
    if(0 != Initlive555()){
      LLERROR("Initlive555 failed! Please Try again");
      return -1;
    }if (rtspClientCount < 1){
      n_pRtspCall = pCallBack;
    }
    
    eventLoopWatchVariable = 0; 
    pthread_attr_t attr;
    pthread_attr_init(&attr);
    pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_DETACHED);
    pthread_create(&nSessionPid,&attr,live555_Thread,this);
    return 0; 
}
int  API_RTSPSession::stopRTSP(){
    eventLoopWatchVariable = 1;
    pthread_cancel(nSessionPid);//关闭线程
    return 0;
}
void API_RTSPSession::Set_live555task(int Time,void* Task,void* arg)
{
    API_RTSPSession* This = (API_RTSPSession*)arg;
/*    This->m_SetMutex.Lock();
    This->liveTaskParam.time =  Time;
    This->liveTaskParam.task =  Task;
    This->liveTaskParam.arg  =   arg;
    This->m_SetMutex.UnLock();*/
    return ;
}
int API_RTSPSession::Initlive555(){
    m_scheduler  = BasicTaskScheduler::createNew();
    if (nullptr == m_scheduler){
       return -1;
    }
    m_env        = BasicUsageEnvironment::createNew(*m_scheduler);
    if (nullptr == m_env){
      return -2;
    }
    m_scheduler->scheduleDelayedTask(3000, Play_video, (void*)this);
    //LLDEBUG("live555 pthread_exit");
    //pthread_exit(nullptr);
    return 0;
}
void* API_RTSPSession::live555_Thread(void* arg)
{
    API_RTSPSession*   pThis = (API_RTSPSession*)arg;
    pThis->live555_loop();
    pthread_exit(nullptr);
}
void API_RTSPSession::live555_loop()
{
    //LLDEBUG("live555_loop");
    m_env->taskScheduler().doEventLoop(&eventLoopWatchVariable);
    if (m_env != nullptr){
      Boolean bEnv = m_env->reclaim();
      /*只能调用reclaim  (回收)函数,不能直接删除,(析构函数为virtual的)
      返回值为true时,说明已经delete对象.
      m_env->liveMediaPriv = nullptr;
      m_env->groupsockPriv = nullptr;
      */
      m_env = nullptr;
    }
    if (m_scheduler != nullptr){
      delete  m_scheduler; 
      m_scheduler = nullptr;
    }
    return ;
}
//static funtion -----------------------------
void API_RTSPSession::live555_task(void* clientData)
{
    live555videoParam* para = (live555videoParam*)clientData;
/*  API_RTSPSession*   This = (API_RTSPSession*)(para->local);
    if (This->liveTaskParam.time == 0)
    {
       return;
    }
    if (This->liveTaskParam.task == nullptr)
    {
       return;
    }
    (para->env)->taskScheduler().scheduleDelayedTask(This->liveTaskParam.time, (TaskFunc*)(This->liveTaskParam.task), clientData);*/
    //LLDEBUG("set live555 task"); 
    return ;
}
//void API_RTSPSession::taskInterrupKeepAlive(void *clientData)
//{
     //UsageEnvironment* env = (UsageEnvironment*)clientData;
     //LLDEBUG(": Keep ");
//}
void API_RTSPSession::Play_video(void* clientData)
{
    //LLDEBUG("Play_video address");
    //live555videoParam* para = (live555videoParam*)clientData;
    API_RTSPSession*  pThis = (API_RTSPSession*)clientData;
    pThis->openURL(*(pThis->m_env),pThis->m_progName.c_str(), pThis->m_rtspUrl.c_str());
    return ;
}
//---------------------------------------------
int API_RTSPSession::openURL(UsageEnvironment& env, char const* progName, char const* rtspURL)
{
    m_rtspClient  = ourRTSPClient::createNew(env, rtspURL, m_debugLevel, progName);
    if(m_rtspClient == nullptr){
        env << "Failed to create a RTSP  client for URL \"" << rtspURL << "\": " << env.getResultMsg() << "\n";
        return -1;
    }  
    m_rtspClient->sendDescribeCommand(continueAfterDESCRIBE); //begin   建立rtsp会话
    rtspClientCount++;
    return 0;
}//--------------------------
API_RtspImage.hpp

#ifndef			_API_RTSP_IMAGE_HPP_
#define			_API_RTSP_IMAGE_HPP_

#include   		"../MenethilAPI/API_Mutex.hpp"
#include 		"../API_RTSPClient.hpp"
namespace API
{
typedef struct _SP_CAMERA_STATE{
	int       	m_nInterval;    //当前间隔帧
   	int       	m_nNormal;    	//默认间隔帧
   	bool      	m_bGuard;  		//是否属于看守状态
}SP_CAMERA_STATE,*PSP_CAMERA_STATE;
	class API_RTSP_IMAGE
	{
	public:
		API_RTSP_IMAGE();
		virtual ~API_RTSP_IMAGE();
	private:
		PSP_CAMERA_STATE 						m_pCameraState;
		double									m_dbRisk;		//相似度
		std::vector<std::string> 				m_Url_v;		//存rtsp 地址的容器
		int 									m_nCurMax;		//当前最大个数
		API::API_Mutex*							m_pMutexImage;		//图像互斥量
		std::queue<PSP_NEURALNETWORK_IMAGE>*	m_pQueueImage;		//图像队列
	public:	
		virtual int 				Initialize(const char* StrRtspIp);
		virtual bool				RtspConnect(int nIndex);
		virtual int 				ConnectClosed(int nIndex);

 		virtual void 				SetInterval(int nInterval);
 		virtual void 				SelectRisk(int nIndex,double dbRisk);
 		virtual void 				SetCameraInterval(int nIndex, bool bInterval);
 		virtual bool 				SelectCameraState(int nIndex);
 		virtual void 				SetCameraState(int nIndex, bool bGuard);
 	public:
 		int       					SelectRtspCameraIndex(const char* CameraIp);	
		char*     					RegexRtspCameraIp(const char* CameraIp);
 		int 						GetCameraMaxNum();
 		bool 						ReadRtspConfig(const char* strConfig);
 		PSP_NEURALNETWORK_IMAGE 	MallocImageMemory(u_int8_t *pImage, int nNumber);
 	    void 						DelImage(PSP_NEURALNETWORK_IMAGE* ppImage);
 	    bool 						GetRTSPImage(int nIndex, PSP_NEURALNETWORK_IMAGE* ppImage);
 	    void 						GetCurrTime(char* strTime);
 	public:
 		API::API_Mutex  			m_DataMutex;
 	public:
 		API_RTSPSession*			m_nRtspRession;	
 	public:
 		static void 				CallBackdata(u_int8_t* pImage ,int& frame,const char* CameraIp);
 	public:
 		void 						IplImg2unchar(const char* filePath, unsigned char *pImg, int nW, int nH);
	};
};

#endif


API_RtspImage.cpp

#include "API_RtspImage.hpp"
#include <regex>
static API::API_RTSP_IMAGE *g_This_Rtsp = nullptr;

API::API_RTSP_IMAGE::API_RTSP_IMAGE():
m_pCameraState(nullptr),
m_pMutexImage(nullptr),
m_pQueueImage(nullptr),
m_nRtspRession(nullptr),
m_dbRisk(0.01),
m_nCurMax(0)
{
      g_This_Rtsp = this;
}
API::API_RTSP_IMAGE::~API_RTSP_IMAGE(){
  if (m_pCameraState != nullptr){
      delete[] m_pCameraState;
      m_pCameraState = nullptr;
  }
  if (m_pMutexImage != nullptr){
    delete[] m_pMutexImage;
    m_pMutexImage = nullptr;
  }
  if (m_pQueueImage != nullptr){
    delete[] m_pQueueImage;
    m_pQueueImage = nullptr;
  }
  if (m_nRtspRession != nullptr){
    delete[] m_nRtspRession;
    m_nRtspRession = nullptr;
  }
}
void API::API_RTSP_IMAGE::GetCurrTime(char* strTime){
    struct timeval    tv;
    struct timezone   tz;
    struct tm*    p;
    gettimeofday(&tv, &tz);
    p = localtime(&tv.tv_sec);
    sprintf(strTime, "%02d:%02d:%02d", p->tm_hour, p->tm_min, p->tm_sec);
}
int API::API_RTSP_IMAGE::SelectRtspCameraIndex(const char* CameraIp){
    int nIndex = -1;
    for(int nI = 0; nI < m_Url_v.size(); ++nI)
    {
      char* strCamera = RegexRtspCameraIp(CameraIp);
      char* strCamera1 = RegexRtspCameraIp(m_Url_v[nI].c_str());

       //std::cout<< "Select Camera1 : " << strCamera << std::endl;
       //std::cout<< "Select Camera2 : " << strCamera1 << std::endl;

      if(!strcmp(strCamera, strCamera1)){
             nIndex = nI;
            delete[] strCamera;
            strCamera = nullptr;
            delete[] strCamera1;
            strCamera1 = nullptr;
            break;
      }
      if (strCamera){  
        delete[] strCamera;
        strCamera = nullptr;
      }
      if (strCamera1){
        delete[] strCamera1;
        strCamera1 = nullptr;
      }

    }
    return nIndex; 
}

//从rtsp地址中截取IP地址(192.168.xx.xx)
char* API::API_RTSP_IMAGE::RegexRtspCameraIp(const char* CameraIp){
    char   *strLine = nullptr;
    int    MatchState = -1;
    std::string pattern{"(\\d{1,3})[.](\\d{1,3}[.](\\d{1,3})[.])(\\d{1,3})"};//@leger 2017-0811 更改了匹配

    //^(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|[1-9])\\.
    //(\\d{1,3}):(\\d{1,3}):(\\d{1,3}):(\\d{1,3})

    //[0-9].[0-9].[0-9].[0-9][^/,:]{1,}
    std::regex re(pattern);

    std::string m_str(CameraIp);
    //最初的字符串 未匹配前(包括554端口号以及通道号,次码流)  
    //rtsp://192.168.200.52:554/channel1/2
    std::smatch results;
      
    if(std::regex_search(m_str,results,re)){
      MatchState = 0;

      strLine = new char [strlen(CameraIp) + 1];
      std::string str1;
      str1 = results.str();
      strcpy(strLine,str1.c_str());   
    }
    else{
        LLERROR("[%d] regex_search Str False", MatchState);
        delete[] strLine;
        strLine = nullptr;
    }
    return strLine;//192.168.200.xx
}

bool API::API_RTSP_IMAGE::ReadRtspConfig(const char* strConfig){
  //int     nIndex = 0;
  char    strTime[64]={0};
  FILE*   pFile = fopen(strConfig, "r");  //读取配置文件

  if(!pFile)
  {
      GetCurrTime(strTime);
      LLERROR("[%s] Read Camera Config File False", strTime);
      return false;
  }
  char    strLine[1024]={0};
  int     nLength = 1024;

  m_Url_v.clear();
  
  while (!feof(pFile)) //循环读取配置
  {
    usleep(3*100);
    //摄像头参数
    //char    strCameraIP[64];    //char    strPort[64];
    if(!fgets(strLine, nLength, pFile))    //读取一行
    {
        break;
    }

    for (int i = 0; i < strlen(strLine); ++i)  //去掉换行符
    {
      if(strLine[i] == '\n')
      {
        strLine[i] = '\0';
      }
    }
     std::string m_str(strLine);//最初的字符串 未匹配前(包括554端口号以及通道号,次码流)  rtsp://192.168.200.52:554/channel1/2
   
     m_Url_v.push_back(m_str);//rtsp://192.168.200.52:554/channel1/2  用于openUrl 函数 打开
  }
  fclose(pFile);    //关闭文件
  return true;
}

void API::API_RTSP_IMAGE::SetInterval(int nInterval)
{
    for (int nIndex = 0; nIndex < m_nCurMax ; ++nIndex)
    {
        m_pCameraState[nIndex].m_nInterval = nInterval;
        m_pCameraState[nIndex].m_nNormal = nInterval;
        m_pCameraState[nIndex].m_bGuard  = true;
    }
    return;
}
void API::API_RTSP_IMAGE::SelectRisk(int nIndex,double dbRisk){
    m_dbRisk = dbRisk;
    return ;
}
//设置摄像头间隔帧
void API::API_RTSP_IMAGE::SetCameraInterval(int nIndex, bool bInterval) {
   m_pCameraState[nIndex].m_nInterval = bInterval ? 1 : m_pCameraState[nIndex].m_nNormal;
   return;
}
//查询摄像头状态
bool API::API_RTSP_IMAGE::SelectCameraState(int nIndex){
    return m_pCameraState[nIndex].m_bGuard;
}
//设置摄像头是否属于看守状态
void API::API_RTSP_IMAGE::SetCameraState(int nIndex, bool bGuard) {
    if(bGuard){
      m_pCameraState[nIndex].m_nInterval = m_pCameraState[nIndex].m_nNormal;
    }
    else{
      m_pCameraState[nIndex].m_nInterval = 1;
    }
    m_pCameraState[nIndex].m_bGuard = bGuard;
    return;
}
/*void  API::API_RTSP_IMAGE::SetTimer()
{
    API::API_RTSP_IMAGE* pThis = g_This_Rtsp;

    for (int nIndex = 0; nIndex < pThis->m_nCurMax; ++nIndex)
    {
        if (pThis->m_nRtspRession[nIndex].m_ndataflag)//有数据 ..说明连接正常
        {    
            pThis->m_nRtspRession[nIndex].m_ndataflag = 0;
            //LLDEBUG("This is a Timer ,%d Connect....    ",nIndex);// 连接
        }
        else
        {
            flag += 1;
           // LLDEBUG("Disconnect : %d \n ......  ......\n",nIndex);//断开 连接 
            pThis->m_nRtspRession[nIndex].stopRTSPClient();
            pThis->m_nRtspRession[nIndex].m_nStatus = -1;
            if (flag >= 8)
            {
               pThis->m_nRtspRession[nIndex].startRTSPClient();
               flag = 0;
            }  
        }
    }
    return;
}*/

//解码器回调函数
void API::API_RTSP_IMAGE::CallBackdata(u_int8_t* pImage ,int& frame,const char* CameraIp) {
    API::API_RTSP_IMAGE* pThis = g_This_Rtsp;

    char            strTime[64]={0}; 
    pThis->GetCurrTime(strTime);
    int nIndex = pThis->SelectRtspCameraIndex(CameraIp);//  
    if (nIndex < 0)return ;

    //pThis->m_DataMutex.Lock();
    //pThis->m_nRtspRession[nIndex].m_ndataflag = 1;//有数据 
    //pThis->m_DataMutex.UnLock();
    
    if (!(frame % pThis->m_pCameraState[nIndex].m_nInterval)) //判断是否是允许的帧
    {
        if(pThis->m_pQueueImage[nIndex].empty()){
            pThis->m_pMutexImage[nIndex].Lock();
            pThis->m_pQueueImage[nIndex].push(pThis->MallocImageMemory(pImage, nIndex));
            pThis->m_pMutexImage[nIndex].UnLock();
        }
    }
    //LLDEBUG("data callback : %s",strTime);
    return;
}
//申请图像结构
PSP_NEURALNETWORK_IMAGE API::API_RTSP_IMAGE::MallocImageMemory(u_int8_t *pImage, int nNumber) {
    PSP_NEURALNETWORK_IMAGE   pStruct = nullptr;
    pStruct = new SP_NEURALNETWORK_IMAGE;
    memset(pStruct,0,sizeof(SP_NEURALNETWORK_IMAGE));   
    if(pStruct){
      char *RtspIP = RegexRtspCameraIp(m_Url_v[nNumber].c_str());
      strcpy(pStruct->strCamera, RtspIP);
      pStruct->pImage = new unsigned char[IMAGE_SIZE];
      memmove(pStruct->pImage, pImage, IMAGE_SIZE);//memmove 允许拷贝区域重叠
     
      pStruct->nNumber      = nNumber;
      pStruct->nFrameSpeed  = m_pCameraState[nNumber].m_nInterval - 1;

      pStruct->nLength     = IMAGE_SIZE;
      pStruct->dbRisk      = m_dbRisk;
      delete[] RtspIP;
      RtspIP = nullptr;
    }
    else{
       return nullptr;
    }
    return pStruct;
}
//释放内存
void API::API_RTSP_IMAGE::DelImage(PSP_NEURALNETWORK_IMAGE* ppImage) {
    PSP_NEURALNETWORK_IMAGE   pImage = *ppImage;
    if(pImage){  
      //LLDEBUG("Begin delete ...   .. ");
      delete[] pImage->pImage;
      delete pImage;
      *ppImage = nullptr;     
    }
    return ;
}

int API::API_RTSP_IMAGE::GetCameraMaxNum(){
   return m_nCurMax;
}

bool API::API_RTSP_IMAGE::GetRTSPImage(int nIndex, PSP_NEURALNETWORK_IMAGE* ppImage){
  //查看是否有图像
  if(m_pQueueImage[nIndex].empty())
  {
      if (m_nRtspRession[nIndex].m_nStatus == -1){
        //usleep(8*100);
        //sleep(8);
      }
      else{
        //usleep(2*100);
      }
      return false;
  }
  m_pMutexImage[nIndex].Lock();
  *ppImage = m_pQueueImage[nIndex].front(); //获取图像
  m_pQueueImage[nIndex].pop();
  m_pMutexImage[nIndex].UnLock();
  return true;
}
void API::API_RTSP_IMAGE::IplImg2unchar(const char* filePath, unsigned char *pImg, int nW, int nH)//@leger 2017-07-22
{
    if(nullptr == filePath)return;
    IplImage* image = cvLoadImage(filePath);
    if (nullptr == image) return;
  
    for (int j = 0; j< nH; j++) 
    {
       memcpy(&pImg[j*nW*3],&image->imageData[(nH-j-1)*nW*3],nW*3);
    }
    cvReleaseImage(&image);
    return ;
}
int API::API_RTSP_IMAGE::Initialize(const char* StrRtspIp)
{
    if (!ReadRtspConfig(StrRtspIp))//"rtspconfig.ini"
    {
        LLERROR("Read RtspConfig.  failed !");
        return -1;
    }
    m_nCurMax = m_Url_v.size();
    //LLDEBUG(".......CurMax:%d",m_nCurMax);
    if (m_nCurMax){
        m_pCameraState  = new SP_CAMERA_STATE[m_nCurMax];
        m_pMutexImage   = new API::API_Mutex[m_nCurMax];       //图像互斥量
        m_pQueueImage   = new std::queue<PSP_NEURALNETWORK_IMAGE>[m_nCurMax];  //图像队列
        m_nRtspRession  = new API_RTSPSession[m_nCurMax];
    }else{
      LLERROR("URL read failed !!!");
      return -2;
    }
    if((m_pCameraState == nullptr) || (m_nRtspRession == nullptr))return -3;
    if((m_pMutexImage== nullptr) || (m_pQueueImage == nullptr))return -4;

    for (int nIndex = 0; nIndex < m_Url_v.size(); ++nIndex)
    {
        m_nRtspRession[nIndex].Initialize(m_Url_v[nIndex],nIndex);
    }
    return 0;
}
bool  API::API_RTSP_IMAGE::RtspConnect(int nIndex){
    if(nIndex < 0){
      LLERROR("Connect nIndex is failed");
      return false;
    }
    //LLDEBUG(".......Connect:%d",nIndex);
    m_nRtspRession[nIndex].startRTSPStream(CallBackdata);
    return true;
}
int API::API_RTSP_IMAGE::ConnectClosed(int nIndex){
    m_nRtspRession[nIndex].stopRTSP();
    return 0;
}





评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Teleger

你的支持是我前进的方向

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值