live555 RTSP客户端注释翻译



// 一个示例程序,用来展示怎么创建和运行一个RTSP客户端(也可能同时接收多个流)
// 注意:程序虽然被写成了一个可执行程序,但是它的目的是用来展示怎么去编写你自己的RTSP客户端程序。
// 如果想查看具有所有RTSP客户端特征的程序 —— 具有更多的功能和选项的,可以查看
// "openRTSP": http://www.live555.com/openRTSP/

// Copyright (c) 1996-2018, Live Networks, Inc.  All rights reserved
// A demo application, showing how to create and run a RTSP client (that can potentially receive multiple streams concurrently).
//
// NOTE: This code - although it builds a running application - is intended only to illustrate how to develop your own RTSP
// client application.  For a full-featured RTSP client application - with much more functionality, and many options - see
// "openRTSP": http://www.live555.com/openRTSP/


/// Forward function definitions: ///

// RTSP 'response handlers': //

void continueAfterDESCRIBE(RTSPClient *rtspClient, int resultCode, char *resultString);

void continueAfterSETUP(RTSPClient *rtspClient, int resultCode, char *resultString);

void continueAfterPLAY(RTSPClient *rtspClient, int resultCode, char *resultString);

// Other event handler functions: //

// 当流的子会话(比如音频、视频子流)结束时调用。
// called when a stream's subsession (e.g., audio or video substream) ends
void subsessionAfterPlaying(void *clientData);

// 当子会话收到 RTCP 的 “BYE” 命令时调用。
// called when a RTCP "BYE" is received for a subsession
void subsessionByeHandler(void *clientData);

// 在流的预期时长结束时调用(如果流还没有被 RTCP 的 “BYE” 标记为结束)。
// called at the end of a stream's expected duration (if the stream has not already signaled its end using a RTCP "BYE")
void streamTimerHandler(void *clientData);

// 接收流的主例程(对每一个 RTSP 链接来说)
// The main streaming routine (for each "rtsp://" URL):
void openURL(UsageEnvironment &env, char const *progName, char const *rtspURL);

// 用来遍历每个流的子会话,并设置每个子会话。iterate 迭代 iterate through 遍历
// Used to iterate through each stream's 'subsessions', setting up each one:
void setupNextSubsession(RTSPClient *rtspClient);

// 用来停止和关闭一个流(包括销毁它的 RTSPClient 对象)
// Used to shut down and close a stream (including its "RTSPClient" object):
void shutdownStream(RTSPClient *rtspClient, int exitCode = 1);

// 这个函数输出一个的字符串,来标识每一个流(打印调试信息)。可以根据需要修改
// A function that outputs a string that identifies each stream (for debugging output).  Modify this if you wish:
UsageEnvironment &operator<<(UsageEnvironment &env, const RTSPClient &rtspClient) {
    return env << "[URL:\"" << rtspClient.url() << "\"]: ";
}

// 这个函数输出一个字符串,来标识每个子会话(打印调试信息)。可以根据需要修改
// A function that outputs a string that identifies each subsession (for debugging output).  Modify this if you wish:
UsageEnvironment &operator<<(UsageEnvironment &env, const MediaSubsession &subsession) {
    return env << subsession.mediumName() << "/" << subsession.codecName();
}

void usage(UsageEnvironment &env, char const *progName) {
    env << "Usage: " << progName << " <rtsp-url-1> ... <rtsp-url-N>\n";
    env << "\t(where each <rtsp-url-i> is a \"rtsp://\" URL)\n";
}

char eventLoopWatchVariable = 0;

int live555_main(int argc, char **argv) {

    // 首先要创建自己的用户环境上下文。
    // Begin by setting up our usage environment:
    TaskScheduler *scheduler = BasicTaskScheduler::createNew();
    UsageEnvironment *env = BasicUsageEnvironment::createNew(*scheduler);

    // 我们需要至少一个 RTSP 地址
    // We need at least one "rtsp://" URL argument:
    if (argc < 2) {
        usage(*env, argv[0]);
        return 1;
    }

    // There are argc-1 URLs: argv[1] through argv[argc-1].  Open and start streaming each one:
    for (int i = 1; i <= argc - 1; ++i) {
        openURL(*env, argv[0], argv[i]);
    }

    // 后面所有的活动都是在下面事件循环中使用。
    // All subsequent activity takes place within the event loop:

    // 这个函数只有在它的参数 eventLoopWatchVariable 设置为不为0的时候退出。
    // This function call does not return, unless, at some point in time, "eventLoopWatchVariable" gets set to something non-zero.
    env->taskScheduler().doEventLoop(&eventLoopWatchVariable);

    return 0;

    // 如果你选择不返回,同时你也不准备继续使用 “TaskScheduler” 和 "UsageEnvironment" 对象,
    // 那么你应该去掉下面代码的注释,来释放被这些对象占用的内存(虽然比较小)。

    // If you choose to continue the application past this point (i.e., if you comment out the "return 0;" statement above),
    // and if you don't intend to do anything more with the "TaskScheduler" and "UsageEnvironment" objects,
    // then you can also reclaim the (small) memory used by these objects by uncommenting the following code:
    /*
      env->reclaim(); env = NULL;
      delete scheduler; scheduler = NULL;
    */
}
// 定义一个类来代表每个流在它的生命周期中的不同状态
// 定义一个类来代表RTSP连接中流的不同状态。
// Define a class to hold per-stream state that we maintain throughout each stream's lifetime:

class StreamClientState {

public:
    StreamClientState();

    virtual ~StreamClientState();

public:
    MediaSubsessionIterator *iter;
    MediaSession *session;
    MediaSubsession *subsession;
    TaskToken streamTimerTask;
    double duration;
};

// 如果你只想获取一个RTSP流,那么你可以在程序中定义一个全局的"StreamClientState"。但是,这个例子中为了展示怎么
// 去接收多个RTSP流,所以我们并不能那样做。相反,我们必须为每个RTSP客户端定义一个独立的"StreamClientState"。为了
// 实现这一点,我们可以继承RTSPClient类,并在子类中添加 "StreamClientState" 成员。

// If you're streaming just a single stream (i.e., just from a single URL, once), then you can define and use just a single
// "StreamClientState" structure, as a global variable in your application.  However, because - in this demo application - we're
// showing how to play multiple streams, concurrently, we can't do that.  Instead, we have to have a separate "StreamClientState"
// structure for each "RTSPClient".  To do this, we subclass "RTSPClient", and add a "StreamClientState" field to the subclass:

class ourRTSPClient : public RTSPClient {
public:
    static ourRTSPClient *createNew(UsageEnvironment &env, char const *rtspURL,
                                    int verbosityLevel = 0,
                                    char const *applicationName = NULL,
                                    portNumBits tunnelOverHTTPPortNum = 0);

protected:
    ourRTSPClient(UsageEnvironment &env, char const *rtspURL,
                  int verbosityLevel, char const *applicationName,
                  portNumBits tunnelOverHTTPPortNum);

    // called only by createNew();
    virtual ~ourRTSPClient();

public:
    StreamClientState scs;
    void *param;
};

// 定义一个数据接收器(“MediaSink” 的子类)来接收来自每个子会话的数据(也就是接收每个音频或者视频子流)。
// 实际上,这个数据接收器可以是一个类(或者一组类),用来解码和渲染收到的音频、视频数据。
// 或者,也可以是一个文件接收器,用来把接收到的数据保存到文件中(就像 "openRTSP" 程序做的那样)
// 在这个示例中,我们定义了一个虚拟的数据接收器来接收数据,但是不对数据做任何处理。

// Define a data sink (a subclass of "MediaSink") to receive the data for each subsession (i.e., each audio or video 'substream').
// In practice, this might be a class (or a chain of classes) that decodes and then renders the incoming audio or video.
// Or it might be a "FileSink", for outputting the received data into a file (as is done by the "openRTSP" application).
// In this example code, however, we define a simple 'dummy' sink that receives incoming data, but does nothing with it.

class DummySink : public MediaSink {

public:

    // 参数 MediaSubsession 用来表明接收的数据种类
    // 参数 streamId 用来表明流本身
    static DummySink *createNew(UsageEnvironment &env,
                                MediaSubsession &subsession, // identifies the kind of data that's being received
                                char const *streamId = NULL); // identifies the stream itself (optional)

private:
    DummySink(UsageEnvironment &env, MediaSubsession &subsession, char const *streamId);

    // called only by "createNew()"
    virtual ~DummySink();

    static void afterGettingFrame(void *clientData, unsigned frameSize,
                                  unsigned numTruncatedBytes,
                                  struct timeval presentationTime,
                                  unsigned durationInMicroseconds);

    void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
                           struct timeval presentationTime, unsigned durationInMicroseconds);

private:
    // 重新定义这个虚函数
    // redefined virtual functions:
    virtual Boolean continuePlaying();

private:
    u_int8_t *fReceiveBuffer;
    MediaSubsession &fSubsession;
    char *fStreamId;
public:
    void *param;
};

#define RTSP_CLIENT_VERBOSITY_LEVEL 1 // by default, print verbose output from each "RTSPClient"

static unsigned rtspClientCount = 0; // Counts how many streams (i.e., "RTSPClient"s) are currently in use.

void openURL(UsageEnvironment &env, char const *progName, char const *rtspURL) {

    // 首先,创建一个 "RTSPClient" 对象。注意,我们想接收的每个流(即使多个流使用相同的RTSP地址)都有一个独立的 "RTSPClient" 对象。

    // Begin by creating a "RTSPClient" object.  Note that there is a separate "RTSPClient" object for each stream that we wish
    // to receive (even if more than stream uses the same "rtsp://" URL).
    RTSPClient *rtspClient = ourRTSPClient::createNew(env, rtspURL, RTSP_CLIENT_VERBOSITY_LEVEL,
                                                      progName);
    if (rtspClient == NULL) {
        env << "Failed to create a RTSP client for URL \"" << rtspURL << "\": "
            << env.getResultMsg() << "\n";
        return;
    }

    ++rtspClientCount;

    // SDP全称是Session Description Protocol,翻译过来就是描述会话的协议。主要用于两个会话实体之间的媒体协商

    // 然后,发送 RTSP 的 "DESCRIBE" 命令,来获取流的 SDP 描述信息。
    // 注意,这个命令就像所有RTSP命令一样,是被异步发送的;我们不用阻塞去获取回复。
    // 所以,下面这个函数会立刻返回,我们会在事件循环中来响应对应的RTSP回复。

    // Next, send a RTSP "DESCRIBE" command, to get a SDP description for the stream.
    // Note that this command - like all RTSP commands - is sent asynchronously; we do not block, waiting for a response.
    // Instead, the following function call returns immediately, and we handle the RTSP response later, from within the event loop:
    rtspClient->sendDescribeCommand(continueAfterDESCRIBE);
}


void *live555_recv_task(void *param) {

    LOGD("live555_recv_task called!");

    //    pthread_detach(pthread_self());

    //    char url[] = "rtsp://192.168.1.122:8554/xiongmao.264";
    char url[] = "rtsp://192.168.1.72/hdmi21";

    TaskScheduler *scheduler = BasicTaskScheduler::createNew();
    UsageEnvironment *env = AndroidUsageEnvironment::createNew(*scheduler);

    /// 打开流,可以多个 /

    openURL(*env, "live555", url);

    /// 打开一个流结束 /


    env->taskScheduler().doEventLoop(&eventLoopWatchVariable);



    // 退出rtsp客户端
    env->reclaim();
    env = NULL;
    delete scheduler;
    scheduler = NULL;

    return NULL;
}


// 处理收到 DESCRIBE 命令,获取到SDP信息

// Implementation of the RTSP 'response handlers':
void continueAfterDESCRIBE(RTSPClient *rtspClient, int resultCode, char *resultString) {

    do {

        UsageEnvironment &env = rtspClient->envir(); // alias
        StreamClientState &scs = ((ourRTSPClient *) rtspClient)->scs; // alias

        if (resultCode != 0) {
            env << *rtspClient << "Failed to get a SDP description: " << resultString << "\n";
            delete[] resultString;
            break;
        }

        char *const sdpDescription = resultString;
        env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n";

        // 根据SDP的描述信息创建会话
        // Create a media session object from this SDP description:
        scs.session = MediaSession::createNew(env, sdpDescription);

        delete[] sdpDescription; // because we don't need it anymore

        if (scs.session == NULL) {
            env << *rtspClient
                << "Failed to create a MediaSession object from the SDP description: "
                << env.getResultMsg() << "\n";
            break;
        } else if (!scs.session->hasSubsessions()) {
            env << *rtspClient << "This session has no media subsessions (i.e., no \"m=\" lines)\n";
            break;
        }

        // 接着,为这个会话创建和设置数据源。可以通过下面步骤实现:遍历会话的子会话,调用 "MediaSubsession::initiate()" 函数,并且给每个子会话
        // 发送 RTSP 的 "SETUP" 命令。
        // (每个子会话将会有它自己的数据源)

        // Then, create and set up our data source objects for the session.  We do this by iterating over the session's 'subsessions',
        // calling "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command, on each one.
        // (Each 'subsession' will have its own data source.)
        scs.iter = new MediaSubsessionIterator(*scs.session);
        setupNextSubsession(rtspClient);
        return;
    } while (0);

    // 这个流发生了不可恢复的错误。
    // An unrecoverable error occurred with this stream.
    shutdownStream(rtspClient);
}

// 默认情况下,我们请求服务器使用 RTP/UDP 协议来发送数据。
// 如果你想让服务器使用 RTP-over-TCP 协议发送,请把下面的设置为Ture

// By default, we request that the server stream its data using RTP/UDP.
// If, instead, you want to request that the server stream via RTP-over-TCP, change the following to True:
#define REQUEST_STREAMING_OVER_TCP False

// 建立子会话
void setupNextSubsession(RTSPClient *rtspClient) {
    UsageEnvironment &env = rtspClient->envir(); // alias
    StreamClientState &scs = ((ourRTSPClient *) rtspClient)->scs; // alias

    scs.subsession = scs.iter->next();
    if (scs.subsession != NULL) {

        if (!scs.subsession->initiate()) {
            env << *rtspClient << "Failed to initiate the \"" << *scs.subsession
                << "\" subsession: " << env.getResultMsg() << "\n";

            // 放弃这个子会话
            setupNextSubsession(rtspClient); // give up on this subsession; go to the next one
        } else {

            env << *rtspClient << "Initiated the \"" << *scs.subsession << "\" subsession (";
            if (scs.subsession->rtcpIsMuxed()) {
                env << "client port " << scs.subsession->clientPortNum();
            } else {
                env << "client ports " << scs.subsession->clientPortNum() << "-"
                    << scs.subsession->clientPortNum() + 1;
            }
            env << ")\n";

            // 通过发送 SETUP 命令来建立这个子会话
            // Continue setting up this subsession, by sending a RTSP "SETUP" command:
            rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False,
                                         REQUEST_STREAMING_OVER_TCP);
        }
        return;
    }

    // 建立完所有的子会话后,通过发送 PLAY 命令来开始流的传输。
    // We've finished setting up all of the subsessions.  Now, send a RTSP "PLAY" command to start the streaming:
    if (scs.session->absStartTime() != NULL) {
        // Special case: The stream is indexed by 'absolute' time, so send an appropriate "PLAY" command:
        rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, scs.session->absStartTime(),
                                    scs.session->absEndTime());
    } else {
        scs.duration = scs.session->playEndTime() - scs.session->playStartTime();
        rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY);
    }
}

void continueAfterSETUP(RTSPClient *rtspClient, int resultCode, char *resultString) {
    LOGD("continueAfterSETUP");
    do {
        UsageEnvironment &env = rtspClient->envir(); // alias
        StreamClientState &scs = ((ourRTSPClient *) rtspClient)->scs; // alias

        if (resultCode != 0) {
            env << *rtspClient << "Failed to set up the \"" << *scs.subsession << "\" subsession: "
                << resultString << "\n";
            break;
        }

        env << *rtspClient << "Set up the \"" << *scs.subsession << "\" subsession (";
        if (scs.subsession->rtcpIsMuxed()) {
            env << "client port " << scs.subsession->clientPortNum();
        } else {
            env << "client ports " << scs.subsession->clientPortNum() << "-"
                << scs.subsession->clientPortNum() + 1;
        }
        env << ")\n";

        // 运行到这里,已经成功建立了子会话,并且为它创建了数据接收器,同时调用了 "startPlaying()" 命令。
        // (这将让数据接收器做好接收数据的准备,实际的数据流直到我们发送 RTSP 的 “PLAY” 命令之后才会被发送过来。)

        // Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it.
        // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later,
        // after we've sent a RTSP "PLAY" command.)

        scs.subsession->sink = DummySink::createNew(env, *scs.subsession, rtspClient->url());

        // 你也可以使用你自己的 "MediaSink" 子类来取代下面的代码。
        // perhaps use your own custom "MediaSink" subclass instead
        if (scs.subsession->sink == NULL) {
            env << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession
                << "\" subsession: " << env.getResultMsg() << "\n";
            break;
        }
        ((DummySink *) scs.subsession->sink)->param = rtspClient;
        LOGD("## 02");
        env << *rtspClient << "Created a data sink for the \"" << *scs.subsession
            << "\" subsession\n";

        // 让子会话处理程序从 “subsession” 中获取到 "RTSPClient" 类的技巧。
        scs.subsession->miscPtr = rtspClient; // a hack to let subsession handler functions get the "RTSPClient" from the subsession
        scs.subsession->sink->startPlaying(*(scs.subsession->readSource()),
                                           subsessionAfterPlaying, scs.subsession);

        // 同时设置一个处理程序来处理从子会话中收到 RTCP 命令 "BYE" 的情况。
        // Also set a handler to be called if a RTCP "BYE" arrives for this subsession:
        if (scs.subsession->rtcpInstance() != NULL) {
            scs.subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, scs.subsession);
        }
    } while (0);
    delete[] resultString;

    // 如果还有子会话就继续设置。
    // Set up the next subsession, if any:
    setupNextSubsession(rtspClient);
}

// 处理发送过 PLAY 命令
void continueAfterPLAY(RTSPClient *rtspClient, int resultCode, char *resultString) {

    Boolean success = False;

    do {

        UsageEnvironment &env = rtspClient->envir(); // alias
        StreamClientState &scs = ((ourRTSPClient *) rtspClient)->scs; // alias

        if (resultCode != 0) {
            env << *rtspClient << "Failed to start playing session: " << resultString << "\n";
            break;
        }


        // 设置一个定时器用来处理流超时的情况。(比如流没有使用 RTCP 命令 “BYE” 来表明流结束)。
        // 这段代码是可选的,如果你想在流超时后保持流 —— 以便你稍后可以执行“seek”或者RTSP的“PLAY”命令,那么你可以忽略这段代码。
        // (同时,如果你不想接收完整的流,你可以把这个定时器设置一个小一点的值。)

        // Set a timer to be handled at the end of the stream's expected duration (if the stream does not already signal its end
        // using a RTCP "BYE").  This is optional.  If, instead, you want to keep the stream active - e.g., so you can later
        // 'seek' back within it and do another RTSP "PLAY" - then you can omit this code.
        // (Alternatively, if you don't want to receive the entire stream, you could set this timer for some shorter value.)
        if (scs.duration > 0) {
            unsigned const delaySlop = 2; // number of seconds extra to delay, after the stream's expected duration.  (This is optional.)
            scs.duration += delaySlop;
            unsigned uSecsToDelay = (unsigned) (scs.duration * 1000000);
            scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask(uSecsToDelay,
                                                                          (TaskFunc *) streamTimerHandler,
                                                                          rtspClient);
        }

        env << *rtspClient << "Started playing session";
        if (scs.duration > 0) {
            env << " (for up to " << scs.duration << " seconds)";
        }
        env << "...\n";

        success = True;

    } while (0);

    delete[] resultString;

    if (!success) {
        // An unrecoverable error occurred with this stream.
        shutdownStream(rtspClient);
    }
}


// Implementation of the other event handlers:
// 处理播放完毕,关闭sink,等待子会话关闭,关闭会话。
void subsessionAfterPlaying(void *clientData) {

    MediaSubsession *subsession = (MediaSubsession *) clientData;
    RTSPClient *rtspClient = (RTSPClient *) (subsession->miscPtr);

    // 首先,关闭子会话的流
    // Begin by closing this subsession's stream:
    Medium::close(subsession->sink);
    subsession->sink = NULL;

    // 然后,检查是否所有的子会话流都被关闭了。
    // Next, check whether *all* subsessions' streams have now been closed:
    MediaSession &session = subsession->parentSession();
    MediaSubsessionIterator iter(session);
    while ((subsession = iter.next()) != NULL) {
        // 这个子会话仍然处于活动状态
        if (subsession->sink != NULL) return; // this subsession is still active
    }

    // 走到这里,所有子会话的流都被关闭了。然后就关闭这个RTSP客户端。
    // All subsessions' streams have now been closed, so shutdown the client:
    shutdownStream(rtspClient);
}

void subsessionByeHandler(void *clientData) {

    MediaSubsession *subsession = (MediaSubsession *) clientData;
    RTSPClient *rtspClient = (RTSPClient *) subsession->miscPtr;
    UsageEnvironment &env = rtspClient->envir(); // alias

    env << *rtspClient << "Received RTCP \"BYE\" on \"" << *subsession << "\" subsession\n";

    // 现在认为这个子会话已经被关闭了。
    // Now act as if the subsession had closed:
    subsessionAfterPlaying(subsession);
}

void streamTimerHandler(void *clientData) {
    ourRTSPClient *rtspClient = (ourRTSPClient *) clientData;
    StreamClientState &scs = rtspClient->scs; // alias

    scs.streamTimerTask = NULL;

    // Shut down the stream:
    shutdownStream(rtspClient);
}

// 关闭会话,
// 这里在所有会话都退出时做了一个退出程序的处理。
void shutdownStream(RTSPClient *rtspClient, int exitCode) {

    UsageEnvironment &env = rtspClient->envir(); // alias
    StreamClientState &scs = ((ourRTSPClient *) rtspClient)->scs; // alias

    // 首先,检查是否还有子会话需要被关闭。
    // First, check whether any subsessions have still to be closed:
    if (scs.session != NULL) {

        Boolean someSubsessionsWereActive = False;
        MediaSubsessionIterator iter(*scs.session);
        MediaSubsession *subsession;

        while ((subsession = iter.next()) != NULL) {
            if (subsession->sink != NULL) {
                Medium::close(subsession->sink);
                subsession->sink = NULL;

                // 如果服务端在我们处理 "TEARDOWN" 命令时发送了 RTCP 的 "BYE" 命令
                if (subsession->rtcpInstance() != NULL) {
                    subsession->rtcpInstance()->setByeHandler(NULL,
                                                              NULL); // in case the server sends a RTCP "BYE" while handling "TEARDOWN"
                }

                someSubsessionsWereActive = True;
            }
        }

        if (someSubsessionsWereActive) {

            // 发送 RTSP 的 "TEARDOWN" 命令,来告诉 服务端关闭这个RTSP流
            // 不用关心处理 "TEARDOWN" 的回复。

            // Send a RTSP "TEARDOWN" command, to tell the server to shutdown the stream.
            // Don't bother handling the response to the "TEARDOWN".
            rtspClient->sendTeardownCommand(*scs.session, NULL);
        }
    }

    env << *rtspClient << "Closing the stream.\n";

    // 注意,这将同时释放这个流对应的 "StreamClientState" 数据结构。
    Medium::close(rtspClient);
    // Note that this will also cause this stream's "StreamClientState" structure to get reclaimed.

    if (--rtspClientCount == 0) {

        // 最后一个流也结束了,现在可以退出程序了。
        // (当然了,如果你是在自己的程序中嵌入了这个RTSP接收代码,你可能需要注释掉这段代码,并且用 "eventLoopWatchVariable = 1;" 替代,
        // 这样我们会离开LIVE555的事件循环,并继续执行 main() 的其他代码)

        // The final stream has ended, so exit the application now.
        // (Of course, if you're embedding this code into your own application, you might want to comment this out,
        // and replace it with "eventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, and continue running "main()".)

        // exit(exitCode);
    }
}


// Implementation of "ourRTSPClient":

ourRTSPClient *ourRTSPClient::createNew(UsageEnvironment &env, char const *rtspURL,
                                        int verbosityLevel, char const *applicationName,
                                        portNumBits tunnelOverHTTPPortNum) {
    return new ourRTSPClient(env, rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum);
}

ourRTSPClient::ourRTSPClient(UsageEnvironment &env, char const *rtspURL,
                             int verbosityLevel, char const *applicationName,
                             portNumBits tunnelOverHTTPPortNum)
        : RTSPClient(env, rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum, -1) {
}

ourRTSPClient::~ourRTSPClient() {
}


// Implementation of "StreamClientState":

StreamClientState::StreamClientState()
        : iter(NULL), session(NULL), subsession(NULL), streamTimerTask(NULL), duration(0.0) {
}

StreamClientState::~StreamClientState() {
    delete iter;
    if (session != NULL) {

        // 我们也需要删除 "session",并且取消 "streamTimerTask" 任务(如果有的话)。

        // We also need to delete "session", and unschedule "streamTimerTask" (if set)
        UsageEnvironment &env = session->envir(); // alias

        env.taskScheduler().unscheduleDelayedTask(streamTimerTask);
        Medium::close(session);
    }
}


// Implementation of "DummySink":

// 景观我们不准备处理接收到的数据,但是我们仍然需要去接受它。
// 定义我们准备使用的缓冲区的大小。

// Even though we're not going to be doing anything with the incoming data, we still need to receive it.
// Define the size of the buffer that we'll use:
#define DUMMY_SINK_RECEIVE_BUFFER_SIZE 500000 //

DummySink *
DummySink::createNew(UsageEnvironment &env, MediaSubsession &subsession, char const *streamId) {
    return new DummySink(env, subsession, streamId);
}

DummySink::DummySink(UsageEnvironment &env, MediaSubsession &subsession, char const *streamId)
        : MediaSink(env),
          fSubsession(subsession) {
    fStreamId = strDup(streamId);
    fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE];
}

DummySink::~DummySink() {
    delete[] fReceiveBuffer;
    delete[] fStreamId;
}

void DummySink::afterGettingFrame(void *clientData, unsigned frameSize, unsigned numTruncatedBytes,
                                  struct timeval presentationTime,
                                  unsigned durationInMicroseconds) {
    DummySink *sink = (DummySink *) clientData;
    sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
}

// 如果你不想看到每个帧的打印信息,就注释掉这行代码。

// If you don't want to see debugging output for each received frame, then comment out the following line:
#define DEBUG_PRINT_EACH_RECEIVED_FRAME 1


static unsigned char tmpHeader[] = {0x00, 0x00, 0x00, 0x01};

static int isSave = 1;



void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
                                  struct timeval presentationTime,
                                  unsigned /*durationInMicroseconds*/) {

    // 我们收到了一个帧的数据。(可选的)打印它的有关信息。

    // We've just received a frame of data.  (Optionally) print out information about it:
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
    if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
    envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived "
            << frameSize << " bytes";
    if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
    char uSecsStr[6 + 1]; // used to output the 'microseconds' part of the presentation time
    sprintf(uSecsStr, "%06u", (unsigned) presentationTime.tv_usec);
    envir() << ".\tPresentation time: " << (int) presentationTime.tv_sec << "." << uSecsStr;
    if (fSubsession.rtpSource() != NULL &&
        !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
        envir()
                << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
    }
    // 注意,这个打印信息表明这个时间戳不是RTCP同步的
#ifdef DEBUG_PRINT_NPT
    envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime);
#endif
    envir() << "\n";
#endif

    LOGD("recv %d numTruncatedBytes %d NAL: %d", frameSize, numTruncatedBytes,
         fReceiveBuffer[0] & 0x1F);

    /// 消费数据开始 

    /// 消费数据结束 

    // 继续请求下一帧的数据。

    // Then continue, to request the next frame of data:
    continuePlaying();
}

Boolean DummySink::continuePlaying() {
    if (fSource == NULL) return False; // sanity check (should not happen)

    // 通过我们的输入源,请求下一帧的数据。当收到下一帧的数据时,"afterGettingFrame()" 将被调用。

    // Request the next frame of data from our input source.  "afterGettingFrame()" will get called later, when it arrives:
    fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE,
                          afterGettingFrame, this,
                          onSourceClosure, this);
    return True;
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值