OWT Server video_agent源码分析

发现阅读代码很容易忘记,所以想写文档,加深理解记忆。

一,VideoMixer创建过程

  • 配置限定了最多合并16个视频(input),每个input会创建一个SoftInput,它还会创建FrameConverter转换帧。
  • 创建两个generators,是根据fps创建的,一个是6到48帧,一个是15到60帧,它们会启动不同间隔的定时器。
void VideoMixer::New(const v8::FunctionCallbackInfo<v8::Value>& args) {
......
VideoMixer* obj = new VideoMixer();
obj->me = new mcu::VideoMixer(config);

VideoMixer::VideoMixer(const VideoMixerConfig& config)
: m_nextOutputIndex(0)
, m_maxInputCount(16)
{
......
m_frameMixer.reset(new VideoFrameMixerImpl(m_maxInputCount, rootSize, bgColor, true, config.crop));
}

// 创建硬件编码MsdkVideoCompositor或者软编码SoftVideoCompositor

VideoFrameMixerImpl::VideoFrameMixerImpl(uint32_t maxInput, owt_base::VideoSize rootSize, owt_base::YUVColor bgColor, bool useSimulcast, bool crop)
: m_useSimulcast(useSimulcast)
{
#ifdef ENABLE_MSDK
if (!m_compositor)
m_compositor.reset(new MsdkVideoCompositor(maxInput, rootSize, bgColor, crop));
#endif
if (!m_compositor)
m_compositor.reset(new SoftVideoCompositor(maxInput, rootSize, bgColor, crop));
}

//创建SoftInput(),  AvatarManager(), SoftFrameGenerator()

SoftVideoCompositor::SoftVideoCompositor(uint32_t maxInput, VideoSize rootSize, YUVColor bgColor, bool crop)
: m_maxInput(maxInput)
{
m_inputs.resize(m_maxInput);

for (auto& input : m_inputs) {
input.reset(new SoftInput());
}

m_avatarManager.reset(new AvatarManager(maxInput));
m_generators.resize(2);
m_generators[0].reset(new SoftFrameGenerator(this, rootSize, bgColor, crop, 60, 15));
m_generators[1].reset(new SoftFrameGenerator(this, rootSize, bgColor, crop, 48, 6));
}

CALL STACK:

我们调试下mixer的工作过程,MCU是默认模式,会使用mixer合流。 先看in的过程,也就是从webr-agent读取包,并合流的过程。

videoMixer-sw.node!mcu::SoftVideoCompositor::SoftVideoCompositor(mcu::SoftVideoCompositor * const this, uint32_t maxInput, owt_base::VideoSize rootSize, owt_base::YUVColor bgColor, bool crop) (/home/oem/git/owt-server/source/agent/video/videoMixer/SoftVideoCompositor.cpp:609)

videoMixer-sw.node!mcu::VideoFrameMixerImpl::VideoFrameMixerImpl(mcu::VideoFrameMixerImpl * const this, uint32_t maxInput, owt_base::VideoSize rootSize, owt_base::YUVColor bgColor, bool useSimulcast, bool crop) (/home/oem/git/owt-server/source/agent/video/videoMixer/VideoFrameMixerImpl.h:114)

videoMixer-sw.node!mcu::VideoMixer::VideoMixer(mcu::VideoMixer * const this, const mcu::VideoMixerConfig & config) (/home/oem/git/owt-server/source/agent/video/videoMixer/VideoMixer.cpp:68)

videoMixer-sw.node!VideoMixer::New(const v8::FunctionCallbackInfo<v8::Value> & args) (/home/oem/git/owt-server/source/agent/video/videoMixer/VideoMixerWrapper.cc:90)

第一个用户进入房间时,就会进入的构造函数:

这时还只初始化了对象,还没有开始拉流。后面就会创建InternalServer,InternalCcient对象,它包装了internalIO.node的c++对象.

5.0.x之后的master分支,对InternalIO模块进行了重写。

/home/oem/git/owt-server/dist-debug/video_agent/video/internalConnectionRouter.js
const internalIO = require('../internalIO/build/Release/internalIO');

const {InternalServer, InternalClient} = internalIO;

  constructor({protocol, minport, maxport}) {
    setSecurePromise.then(() => {
      this.internalServer = new InternalServer(


/home/oem/git/owt-server/source/agent/addons/internalIO/InternalServerWrapper.cc
NAN_METHOD(InternalServer::New) {

    InternalServer* obj = new InternalServer();
    obj->me = new owt_base::InternalServer(


/home/oem/git/owt-server/source/core/owt_base/internal/InternalServer.cpp

/home/oem/git/owt-server/dist-debug/video_agent/video/vmixer.js
    that.publish = function (stream_id, stream_type, options, callback) {
            addInput(stream_id, options.video.codec, options, options.avatar, function () {


    var addInput = function (stream_id, codec, options, avatar, on_ok, on_error) {

        if (engine) {
            var conn = router.getOrCreateRemoteSource({
                id: stream_id,
                ip: options.ip,
                port: options.port
            }


/home/oem/git/owt-server/dist-debug/video_agent/video/internalConnectionRouter.js
const internalIO = require('../internalIO/build/Release/internalIO');

const {InternalServer, InternalClient} = internalIO;

  getOrCreateRemoteSource({id, ip, port}, onStat) {
    if (!this.remoteStreams.has(id) && ip && port) {
      log.debug('RemoteSource created:', id, ip, port);
      let conn = new InternalClient(id, this.protocol, ip, port, onStat);

/home/oem/git/owt-server/source/agent/addons/internalIO/InternalClientWrapper.cc
NAN_METHOD(InternalClient::New) {

    InternalClient* obj = new InternalClient();
    obj->me = new owt_base::InternalClient(

InternalClient::InternalClient(
    const std::string& streamId,
    const std::string& protocol,
    const std::string& ip,
    unsigned int port,
    Listener* listener)
    : InternalClient(streamId, protocol, listener)
{
    if (!TransportSecret::getPassphrase().empty()) {
        m_client->enableSecure();
    }
    m_client->createConnection(ip, port);
}

/home/oem/git/owt-server/source/core/owt_base/internal/TransportClient.cpp
void TransportClient::createConnection(const std::string& ip, uint32_t port)
{
        m_sslSocket->lowest_layer().async_connect(*iterator,
            boost::bind(&TransportClient::connectHandler, this,
                boost::asio::placeholders::error));

void TransportClient::connectHandler(const boost::system::error_code& ec)
{

二,VideoMixer: in代码流程

InternalIO 接收数据流程。

//调用boost库,设置接收数据的回调函数TransportSession::readHandler()
void TransportSession::receiveData()
{
m_socket.async_read_some(boost::asio::buffer(m_receivedBuffer.get(), bytesToRead),
            boost::bind(&TransportSession::readHandler, self,
                boost::asio::placeholders::error,
                boost::asio::placeholders::bytes_transferred));

void TransportSession::readHandler(
    const boost::system::error_code& ec, std::size_t bytes)
{
      receiveData();

void TransportSession::receiveData()
{

   if (m_receivedMessage.isComplete()) {
        TransportData data{m_receivedMessage.payloadData(),
                           m_receivedMessage.payloadLength()};
        m_listener->onData(m_id, data);
        m_receivedMessage.clear();
    }


void TransportClient::onData(uint32_t id, TransportData data)
{
    if (m_listener) {
        m_listener->onData(data.buffer.get(), data.length);
    }
}

void InternalClient::onData(uint8_t* buf, uint32_t len)
{
    deliverFrame(*frame);

void FrameSource::deliverFrame(const Frame& frame)
{
        for (auto it = m_video_dests.begin(); it != m_video_dests.end(); ++it) {
            (*it)->onFrame(frame);
        }

Remark: 注意InternalClient是继承了FrameSource,在JS创建的是InternalClient,而在VideoMixer中转换的参数是FrameSource,取的是基类。

这时候,和webrtc-agent的通道已经建立好了,对于video来说是in也就是输入的:

oem@svr1804:~/git/owt-server/dist-debug$ ps aux|grep video|grep workingNode
oem      27202 28.4  0.4 2075488 78384 ?       Ssl  18:22   4:22 node ./workingNode video-100f0c9d1e12e143e20e@192.168.1.106_1 video-100f0c9d1e12e143e20e@192.168.1.106 {"agent":{"maxProcesses":1,"prerunProcesses":1},"cluster":{"name":"owt-cluster","join_retry":60,"report_load_interval":1000,"max_load":0.85,"worker":{"ip":"192.168.1.106","join_retry":60,"load":{"max":0.85,"period":1000,"item":{"name":"cpu"}}}},"capacity":{"isps":[],"regions":[],"video":{"decode":["vp8","vp9","h264","h265","av1x"],"encode":["vp8","vp9","av1x","h264_CB","h264_B","h265"]}},"rabbit":{"host":"localhost","port":5672,"hostname":"localhost"},"internal":{"ip_address":"192.168.1.106","maxport":0,"minport":0},"video":{"hardwareAccelerated":false,"enableBetterHEVCQuality":false,"MFE_timeout":0,"codecs":{"decode":["vp8","vp9","h264","h265","av1x"],"encode":["vp8","vp9","av1x","h264_CB","h264_B","h265"]}},"avatar":{"location":"avatars/avatar_blue.180x180.yuv"},"purpose":"video"}
oem@svr1804:~/git/owt-server/dist-debug$ lsof -p 27202 |grep TCP
svr1804:43609->svr1804:44166 (ESTABLISHED)
node    27202  oem   49u     IPv4             152519      0t0      TCP svr1804:56902->svr1804:41285 (ESTABLISHED)

//其中端口43609 <--->svr1804:44166  是 video -- webrtc的连接

oem@svr1804:~/git/owt-server/dist-debug$ netstat -anp|grep 44166
(Not all processes could be identified, non-owned process info
 will not be shown, you would have to be root to see it all.)
tcp        0      0 192.168.1.106:44166     192.168.1.106:43609     ESTABLISHED 26833/node          
tcp        0      0 192.168.1.106:43609     192.168.1.106:44166     ESTABLISHED 27202/node          
oem@svr1804:~/git/owt-server/dist-debug$ ps aux | grep 26833
oem       2937  0.0  0.0  17740  1044 pts/0    S+   18:41   0:00 grep --color=auto 26833
oem      26833 10.4  0.3 4259404 57852 ?       Ssl  18:22   2:02 node ./workingNode webrtc-632490e78d30ccd9b0a4@192.168.1.106_4 webrtc-632490e78d30ccd9b0a4@192.168.1.106 {"agent":{"maxProcesses":2,"prerunProcesses":1},"cluster":{"name":"owt-cluster","join_retry":60,"report_load_interval":1000,"max_load":0.85,"network_max_scale":1000,"worker":{"ip":"192.168.1.106","join_retry":60,"load":{"max":0.85,"period":1000,"item":{"name":"network","interf":"lo","max_scale":1000}}}},"capacity":{"isps":[],"regions":[]},"rabbit":{"host":"localhost","port":5672,"hostname":"localhost"},"internal":{"ip_address":"192.168.1.106","maxport":0,"minport":0},"webrtc":{"network_interfaces":[],"keystorePath":"./cert/certificate.pfx","maxport":0,"minport":0,"stunport":0,"stunserver":"","num_workers":24,"use_nicer":false,"io_workers":8},"purpose":"webrtc"}

我们设置断点在addInput函数,注意在JS中的对象是InternalClient,而我们转成的是它的父类FrameSource

/home/oem/git/owt-server/dist-debug/video_agent/video/vmixer.js
that.publish = function (stream_id, stream_type, options, callback) {
addInput(stream_id, options.video.codec, options, options.avatar, function () {

/home/oem/git/owt-server/source/agent/video/videoMixer/VideoMixerWrapper.cc
void VideoMixer::addInput(const v8::FunctionCallbackInfo<v8::Value>& args) {

  int inputIndex = Nan::To<int32_t>(args[0]).FromJust();
  std::string codec = getString(args[1]);
  FrameSource* param2 = ObjectWrap::Unwrap<FrameSource>(
    Nan::To<v8::Object>(args[2]).ToLocalChecked());
  owt_base::FrameSource* src = param2->src;

  int r = me->addInput(inputIndex, codec, src, avatarData);

/home/oem/git/owt-server/source/agent/video/videoMixer/VideoMixer.cpp
bool VideoMixer::addInput(const int inputIndex, const std::string& codec, owt_base::FrameSource* source, const std::string& avatar)
{
  if (m_frameMixer->addInput(inputIndex, format, source, avatar)) {

/home/oem/git/owt-server/source/agent/video/videoMixer/VideoFrameMixerImpl.h
inline bool VideoFrameMixerImpl::addInput(int input, owt_base::FrameFormat format, owt_base::FrameSource* source, const std::string& avatar)
{
    if (decoder->init(format)) {
        boost::shared_ptr<CompositeIn> compositorIn(new CompositeIn(input, avatar, m_compositor));
        //添加关联:  FrameSource(webrtc) --> VCMFrameDecoder  -->CompositeIn
        source->addVideoDestination(decoder.get());
        decoder->addVideoDestination(compositorIn.get());

/home/oem/git/owt-server/source/core/owt_base/VCMFrameDecoder.cpp
bool VCMFrameDecoder::init(FrameFormat format)
{
//创建解码器,并初始化
switch (format) {
    case FRAME_FORMAT_VP8:
        codecType = VideoCodecType::kVideoCodecVP8;
        m_decoder.reset(VP8Decoder::Create());
        break;
if (m_decoder->InitDecode(&codecSettings, webrtc::CpuInfo::DetectNumberOfCores()) != 0) {
        ELOG_ERROR_T("Video decoder init faild.");
        return false;
    }

    m_decoder->RegisterDecodeCompleteCallback(this);



int32_t VCMFrameDecoder::Decoded(VideoFrame& decodedImage)
{
    deliverFrame(frame);
    return 0;
}


void VCMFrameDecoder::onFrame(const Frame& frame)
{
    int ret = m_decoder->Decode(image, false, nullptr, &m_codecInfo);
    if (ret != 0) {
        m_needKeyFrame = true;
        FeedbackMsg msg {.type = VIDEO_FEEDBACK, .cmd = REQUEST_KEY_FRAME};
        deliverFeedbackMsg(msg);
    }
}

Note: Mixer对于每个input会使用一个decoder,对于vp8/vp9/h.264会使用VCMFrameDecoder,其他的使用FFmpegFrameDecoder。

Note: source:FrameSource*也就是从webrtc收到的包,输出设置为decoder:VCMFrameDecoder*,也就是解码器。

Note: decoder:VCMFrameDecoder*也就是解码出来的图像,设置为compositorIn:CompositeIn*,实际上调用SoftVideoCompositor合流。

Note: Decoder实现了webrtc::DecodedImageCallback,这个接口是个回调,也就是解码成Frame后会调用VCMFrameDecoder::Decoded,会将解码的帧送其他模块处理。

Note: 从webrtc-agent收到Frame(编码的帧)后,会调用VCMFrameDecoder::onFrame函数,它会送给decoder解码,如果解码失败会请求关键帧。

Note: VCM就是Video Coding Module的意思。

三,VideoMixer: OUT

VideoMixer关联 Output操作,即合成画面输出到webrtc_agent模块;

//关联internalIO进程通讯的输出

    if (m_width != frame->width() || m_height != frame->height()) {
        ELOG_DEBUG_T("Update encoder resolution %dx%d->%dx%d", m_width, m_height, frame->width(), frame->height());

        ret = m_encoder->SetResolution(frame->width(), frame->height());

// 初始化编码器,设置输出。
int32_t VCMFrameEncoder::generateStream(uint32_t width, uint32_t height, uint32_t frameRate, uint32_t bitrateKbps, uint32_t keyFrameIntervalSeconds, owt_base::FrameDestination* dest)
{
//创建编码器
      case FRAME_FORMAT_VP8:
            m_encoder.reset(VP8Encoder::Create());
//初始化编码器
    ret = m_encoder->InitEncode(&codecSettings, webrtc::CpuInfo::DetectNumberOfCores(), 0);

//注册编码完成回调函数(this)
    m_encoder->RegisterEncodeCompleteCallback(this);

//创建编码输出对象EncodeOut
encodeOut.reset(new EncodeOut(m_streamId, this, dest));
// 根据条件,创建其中一种编码器:MsdkFrameEncoder / SVTHEVCEncoder / VCMFrameEncoder
/home/oem/git/owt-server/source/agent/video/videoMixer/VideoFrameMixerImpl.h

inline bool VideoFrameMixerImpl::addOutput(int output,

#ifdef ENABLE_MSDK
        if (!encoder&& owt_base::MsdkFrameEncoder::supportFormat(format))
            encoder.reset(new owt_base::MsdkFrameEncoder(format, profile, m_useSimulcast));
#endif

#if ENABLE_SVT_HEVC_ENCODER
        if (!encoder && format == owt_base::FRAME_FORMAT_H265)
            encoder.reset(new owt_base::SVTHEVCEncoder(format, profile, m_useSimulcast));
#endif

        if (!encoder && owt_base::VCMFrameEncoder::supportFormat(format))
            encoder.reset(new owt_base::VCMFrameEncoder(format, profile, m_useSimulcast));

//设置对应视频参数 width/height/fps 的视频合成器, 关联输出数据流程如下:

// boost::shared_ptr<VideoFrameCompositor> m_compositor;   ===>

//boost::shared_ptr<owt_base::VideoFrameEncoder> encoder;  ===>

// webrtc_aggent

创建MediaFrameMulticaster,添加关联engine.addOutput

/home/oem/git/owt-server/dist-debug/video_agent/video/index.js
module.exports = function (rpcClient, selfRpcId, parentRpcId, clusterWorkerIP) {
    const that = {
      agentID: parentRpcId,
      clusterIP: clusterWorkerIP
    };
    let processor = undefined;

    that.init = function (service, config, belongTo, controller, mixView, callback) {
        if (processor === undefined) {
            if (service === 'mixing') {
                processor = new VMixer(rpcClient, clusterWorkerIP, VideoMixer, router);

/home/oem/git/owt-server/dist-debug/video_agent/video/vmixer.js
that.generate = function (codec, resolution, framerate, bitrate, keyFrameInterval, callback) {
addOutput(codec, resolution, framerate, bitrate, keyFrameInterval, function (stream_id) {


/home/oem/git/owt-server/dist-debug/video_agent/video/vmixer.js

   var addOutput = function (codec, resolution, framerate, bitrate, keyFrameInterval, on_ok, on_error) {
        log.debug('addOutput: codec', codec, 'resolution:', resolution, 'framerate:', framerate, 'bitrate:', bitrate, 'keyFrameInterval:', keyFrameInterval);
        if (engine) {
            var stream_id = Math.random() * 1000000000000000000 + '';
            var dispatcher = new MediaFrameMulticaster();
            if (engine.addOutput(stream_id,
                                 codec,
                                 resolution2String(resolution),
                                 framerate,
                                 bitrate,
                                 keyFrameInterval,
                                 dispatcher))
videoMixer-sw.node!mcu::SoftFrameGenerator::addOutput(mcu::SoftFrameGenerator * const this, const uint32_t width, const uint32_t height, const uint32_t fps, owt_base::FrameDestination * dst) (/home/oem/git/owt-server/source/agent/video/videoMixer/SoftVideoCompositor.cpp:366)

videoMixer-sw.node!mcu::SoftVideoCompositor::addOutput(mcu::SoftVideoCompositor * const this, const uint32_t width, const uint32_t height, const uint32_t framerateFPS, owt_base::FrameDestination * dst) (/home/oem/git/owt-server/source/agent/video/videoMixer/SoftVideoCompositor.cpp:681)

videoMixer-sw.node!mcu::VideoFrameMixerImpl::addOutput(mcu::VideoFrameMixerImpl * const this, int output, owt_base::FrameFormat format, const owt_base::VideoCodecProfile profile, const owt_base::VideoSize & outputSize, const unsigned int framerateFPS, const unsigned int bitrateKbps, const unsigned int keyFrameIntervalSeconds, owt_base::FrameDestination * dest) (/home/oem/git/owt-server/source/agent/video/videoMixer/VideoFrameMixerImpl.h:274)

videoMixer-sw.node!mcu::VideoMixer::addOutput(mcu::VideoMixer * const this, const std::__cxx11::string & outStreamID, const std::__cxx11::string & codec, const owt_base::VideoCodecProfile profile, const std::__cxx11::string & resolution, const unsigned int framerateFPS, const unsigned int bitrateKbps, const unsigned int keyFrameIntervalSeconds, owt_base::FrameDestination * dest) (/home/oem/git/owt-server/source/agent/video/videoMixer/VideoMixer.cpp:132)

videoMixer-sw.node!VideoMixer::addOutput(const v8::FunctionCallbackInfo<v8::Value> & args) (/home/oem/git/owt-server/source/agent/video/videoMixer/VideoMixerWrapper.cc:184)
// 前面创建了两个generators,一个是6到48帧,一个是15到60帧
//  isSupported()这个函数,会从起始帧开始翻倍找,比如6,12,24,48,或者15,30,60,这样看哪个能匹配到。
# vi source/core/owt_base/SoftVideoCompositor.cpp +679
bool SoftVideoCompositor::addOutput(const uint32_t width, const uint32_t height, const uint32_t framerateFPS, owt_base::FrameDestination *dst)
    for (auto& generator : m_generators) {
        if (generator->isSupported(width, height, framerateFPS)) {
            return generator->addOutput(width, height, framerateFPS, dst);

// m_outputs结构是vector<list>,每个fps可以有多个output。
# vi source/core/owt_base/SoftVideoCompositor.cpp +361
bool SoftFrameGenerator::addOutput(const uint32_t width, const uint32_t height, const uint32_t fps, owt_base::FrameDestination *dst) {
    int index = m_maxSupportedFps / fps - 1;
    Output_t output{.width = width, .height = height, .fps = fps, .dest = dst}; // MediaFrameMulticaster
    m_outputs[index].push_back(output);

这样编码器就建立起来了,接下来我们看看如何将编码的Frame送到webrtc-agent。 可以看到是客户端订阅了MCU的流,video就会侦听端口(InternalOut),webrtc-agent会主动连接到video取流。

Note: video模块的decoder是只要推流就会触发,而encoder则只有在订阅MCU的流时才会触发,所以在MCU模式下访问forward=true的流,也会有一个解码。

视频帧编码后输出流程:

CALL STACK:

mediaFrameMulticaster.node!owt_base::FrameSource::deliverFrame(owt_base::FrameSource * const this, const owt_base::Frame & frame) (/home/oem/git/owt-server/source/core/owt_base/MediaFramePipeline.cpp:82)

mediaFrameMulticaster.node!owt_base::MediaFrameMulticaster::onFrame(owt_base::MediaFrameMulticaster * const this, const owt_base::Frame & frame) (/home/oem/git/owt-server/source/core/owt_base/MediaFrameMulticaster.cpp:36)

videoMixer-sw.node!owt_base::FrameSource::deliverFrame(owt_base::FrameSource * const this, const owt_base::Frame & frame) (/home/oem/git/owt-server/source/core/owt_base/MediaFramePipeline.cpp:90)

videoMixer-sw.node!owt_base::EncodeOut::onEncoded(owt_base::EncodeOut * const this, const owt_base::Frame & frame) (/home/oem/git/owt-server/source/core/owt_base/VCMFrameEncoder.h:52)

videoMixer-sw.node!owt_base::VCMFrameEncoder::OnEncodedImage(owt_base::VCMFrameEncoder * const this, const webrtc::EncodedImage & encoded_frame, const webrtc::CodecSpecificInfo * codec_specific_info, const webrtc::RTPFragmentationHeader * fragmentation) (/home/oem/git/owt-server/source/core/owt_base/VCMFrameEncoder.cpp:458)

videoMixer-sw.node!webrtc::VP8EncoderImpl::GetEncodedPartitions(webrtc::VideoFrame const&) (Unknown Source:0)

videoMixer-sw.node!webrtc::VP8EncoderImpl::Encode(webrtc::VideoFrame const&, webrtc::CodecSpecificInfo const*, std::vector<webrtc::FrameType, std::allocator<webrtc::FrameType> > const*) (Unknown Source:0)

videoMixer-sw.node!owt_base::VCMFrameEncoder::encode(owt_base::VCMFrameEncoder * const this, boost::shared_ptr<webrtc::VideoFrame> frame) (/home/oem/git/owt-server/source/core/owt_base/VCMFrameEncoder.cpp:421)

videoMixer-sw.node!owt_base::VCMFrameEncoder::Encode(owt_base::VCMFrameEncoder * This, boost::shared_ptr<webrtc::VideoFrame> videoFrame) (/home/oem/git/owt-server/source/core/owt_base/VCMFrameEncoder.h:94)


// 从buffer取出一个图像(Frame),已经合流之后的图像了。
/home/oem/git/owt-server/source/agent/video/videoMixer/SoftVideoCompositor.cpp
void SoftFrameGenerator::onTimeout()
    rtc::scoped_refptr<webrtc::VideoFrameBuffer> compositeBuffer = generateFrame();

    // 获得layout bufffer,即合成画面的buffer.
/home/oem/git/owt-server/source/agent/video/videoMixer/SoftVideoCompositor.cpp
rtc::scoped_refptr<webrtc::VideoFrameBuffer> SoftFrameGenerator::generateFrame()
{
    reconfigureIfNeeded();
    return layout();
}

    // 将多个input合并成一个,人多了还有并行合流的逻辑。
    # vi source/core/owt_base/SoftVideoCompositor.cpp +525
rtc::scoped_refptr<webrtc::VideoFrameBuffer> SoftFrameGenerator::layout()
{
    
if (isParallelFrameComposition) {
        int nParallelRegions = (m_layout.size() + m_parallelNum - 1) / m_parallelNum;
        int nRegions = m_layout.size();

        LayoutSolution::iterator regions_begin = m_layout.begin();
        LayoutSolution::iterator regions_end = m_layout.begin();

        std::vector<boost::shared_ptr<boost::packaged_task<void>>> tasks;
        ..............

            nRegions -= nParallelRegions;
        }

        for (auto& task : tasks)
            task->get_future().wait();
    } else {
        layout_regions(this, compositeBuffer, m_layout);
    }


    // 将多个input绘制到compositeBuffer中去。
    # vi source/core/owt_base/SoftVideoCompositor.cpp +525
   void SoftFrameGenerator::layout_regions(SoftFrameGenerator *t, rtc::scoped_refptr<webrtc::I420Buffer> compositeBuffer, const LayoutSolution &regions)
{
    for (LayoutSolution::const_iterator it = regions.begin(); it != regions.end(); ++it) {
        boost::shared_ptr<webrtc::VideoFrame> inputFrame = t->m_owner->getInputFrame(it->input);

rtc::scoped_refptr<webrtc::VideoFrameBuffer> inputBuffer = inputFrame->video_frame_buffer();

int ret = libyuv::I420Scale(
                inputBuffer->DataY() + src_y * inputBuffer->StrideY() + src_x, inputBuffer->StrideY(),
                inputBuffer->DataU() + (src_y * inputBuffer->StrideU() + src_x) / 2, inputBuffer->StrideU(),
                inputBuffer->DataV() + (src_y * inputBuffer->StrideV() + src_x) / 2, inputBuffer->StrideV(),
                src_width, src_height,
                compositeBuffer->MutableDataY() + dst_y * compositeBuffer->StrideY() + dst_x, compositeBuffer->StrideY(),
                compositeBuffer->MutableDataU() + (dst_y * compositeBuffer->StrideU() + dst_x) / 2, compositeBuffer->StrideU(),
                compositeBuffer->MutableDataV() + (dst_y * compositeBuffer->StrideV() + dst_x) / 2, compositeBuffer->StrideV(),
                cropped_dst_width, cropped_dst_height,
                libyuv::kFilterBox);



// 将合并的图像,送到编码器编码。
# vi source/core/owt_base/SoftVideoCompositor.cpp +404
void SoftFrameGenerator::onTimeout()
    rtc::scoped_refptr<webrtc::VideoFrameBuffer> compositeBuffer = generateFrame();
    webrtc::VideoFrame compositeFrame(
            compositeBuffer,
            webrtc::kVideoRotation_0,
            m_clock->TimeInMilliseconds()
            );
    // 转成Frame,I420格式。
            owt_base::Frame frame;
            memset(&frame, 0, sizeof(frame));
            frame.format = owt_base::FRAME_FORMAT_I420;
            frame.payload = reinterpret_cast<uint8_t*>(&compositeFrame);

            m_textDrawer->drawFrame(frame);


    // 送编码器编码,m_outputs里面是每个不同的fps会有不同的size的编码器。
                for (uint32_t i = 0; i <  m_outputs.size(); i++) {
                    for (auto it = m_outputs[i].begin(); it != m_outputs[i].end(); ++it) {
                        it->dest->onFrame(frame);
                    }
                }

编码输出流程:

// 转换Frame,并开始编码这个Frame。
    # vi source/core/owt_base/VCMFrameEncoder.cpp +311
void VCMFrameEncoder::onFrame(const Frame& frame)
{
    boost::shared_lock<boost::shared_mutex> lock(m_mutex);

    if (m_streams.size() == 0) {
        return;
    }

    boost::shared_ptr<webrtc::VideoFrame> videoFrame = frameConvert(frame);
    if (videoFrame == NULL) {
        return;
    }

    m_srv->post(boost::bind(&VCMFrameEncoder::Encode, this, videoFrame));
}

// 上面是用bind异步调用了encode函数,最终会调用VCMFrameEncoder::encode
static void Encode(VCMFrameEncoder *This, boost::shared_ptr<webrtc::VideoFrame> videoFrame) {This->encode(videoFrame);};

void VCMFrameEncoder::encode(boost::shared_ptr<webrtc::VideoFrame> frame)
{

    if (m_width != frame->width() || m_height != frame->height()) {
        ret = m_encoder->SetResolution(frame->width(), frame->height());
    }

    if (m_updateBitrateKbps) {
            ret = m_encoder->SetRateAllocation(bitrate, m_frameRate);
    }
    if (m_requestKeyFrame) {
        types.push_back(kVideoFrameKey);
        m_requestKeyFrame = false;
    }

    ret = m_encoder->Encode(*frame.get(), NULL, types.size() ? &types : NULL);

参考链接:

https://github.com/winlinvip/owt-docker/blob/master/CodeVideo.md#gdb-debug

  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值