owt-server开启水印功能

转载请注明出处:https://blog.csdn.net/impingo
项目地址:https://github.com/im-pingo/pingos
项目官网:https://pingos.io

问题描述

owt-server默认已经将水印功能屏蔽,并且从代码上分析,它原本也只打算支持文字水印。
我模仿FFmpegDrawText类写了个视频水印类,支持文字水印和图片水印功能。至于调用接口,就需要各位自己修改nodejs接口实现接口调用了。


#ifndef FFmpegVideoFilter_h
#define FFmpegVideoFilter_h

#include <boost/scoped_ptr.hpp>
#include <boost/shared_ptr.hpp>
#include <logger.h>

#include <webrtc/api/video/video_frame.h>
#include <webrtc/api/video/i420_buffer.h>

#include "MediaFramePipeline.h"

extern "C" {
#include <libavformat/avformat.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/mathematics.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
}

namespace owt_base {

class FFmpegVideoFilter {
    DECLARE_LOGGER();

public:
    FFmpegVideoFilter();
    ~FFmpegVideoFilter();

    int drawFrame(Frame&);
    int setDesc(int frame_rate, std::string arg);
    void enable(bool enabled) {m_enabled = enabled;}

protected:
    bool init(int width, int height, int frame_rate, const char desc[]);
    int configure(std::string arg);
    void deinit();

    int copyFrame(AVFrame *dstAVFrame, Frame &srcFrame);
    int copyFrame(Frame &dstFrame, AVFrame *srcAVFrame);

private:
    AVFilterGraph *m_filter_graph;
    AVFilterContext *m_buffersink_ctx;
    AVFilterContext *m_buffersrc_ctx;

    AVFrame *m_input_frame;
    AVFrame *m_filt_frame;

    int m_width;
    int m_height;

    std::string m_filter_desc;
    bool m_reconfigured;
    bool m_validConfig;

    bool m_enabled;

    char m_errbuff[500];
    char *ff_err2str(int errRet);
    int m_frame_rate;
};

} /* namespace owt_base */

#endif /* FFmpegDrawText_h */



#include "FFmpegVideoFilter.h"

#include <libyuv/convert.h>
#include <libyuv/planar_functions.h>
#include <libyuv/scale.h>

using namespace webrtc;

namespace owt_base {

DEFINE_LOGGER(FFmpegVideoFilter, "owt.FFmpegVideoFilter");

FFmpegVideoFilter::FFmpegVideoFilter()
    : m_filter_graph(NULL)
    , m_buffersink_ctx(NULL)
    , m_buffersrc_ctx(NULL)
    , m_input_frame(NULL)
    , m_filt_frame(NULL)
    , m_width(0)
    , m_height(0)
    , m_reconfigured(false)
    , m_validConfig(false)
    , m_enabled(false)
{
}

FFmpegVideoFilter::~FFmpegVideoFilter()
{
    deinit();
}

bool FFmpegVideoFilter::init(int width, int height, int frame_rate, const char desc[])
{
    int ret = -1;
    const AVFilter *buffersrc  = avfilter_get_by_name("buffer");
    const AVFilter *buffersink = avfilter_get_by_name("buffersink");
    enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
    char src_args[512];

    ELOG_TRACE_T("init: %s", desc);

    auto filter_outputs = avfilter_inout_alloc();
    auto filter_inputs  = avfilter_inout_alloc();

    m_filter_graph = avfilter_graph_alloc();
    if (!filter_outputs || !filter_inputs || !m_filter_graph) {
        ELOG_ERROR_T("Cannot alloc filter resource");
        goto end;
    }

    snprintf(src_args, sizeof(src_args),
            "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d",
            width, height, AV_PIX_FMT_YUV420P, 1, frame_rate);
    ret = avfilter_graph_create_filter(&m_buffersrc_ctx, buffersrc, "in",
            src_args, NULL, m_filter_graph);
    if (ret < 0) {
        ELOG_ERROR_T("Cannot create buffer source");
        goto end;
    }

    ret = avfilter_graph_create_filter(&m_buffersink_ctx, buffersink, "out",
            NULL, NULL, m_filter_graph);
    if (ret < 0) {
        ELOG_ERROR_T("Cannot create buffer sink");
        goto end;
    }

    ret = av_opt_set_int_list(m_buffersink_ctx, "pix_fmts", pix_fmts,
            AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
    if (ret < 0) {
        ELOG_ERROR_T("Cannot set output pixel format");
        goto end;
    }

    filter_outputs->name       = av_strdup("in");
    filter_outputs->filter_ctx = m_buffersrc_ctx;
    filter_outputs->pad_idx    = 0;
    filter_outputs->next       = NULL;

    filter_inputs->name       = av_strdup("out");
    filter_inputs->filter_ctx = m_buffersink_ctx;
    filter_inputs->pad_idx    = 0;
    filter_inputs->next       = NULL;

    ret = avfilter_graph_parse_ptr(m_filter_graph, desc,
            &filter_inputs, &filter_outputs, NULL);
    if (ret < 0) {
        ELOG_ERROR_T("Cannot parse graph config: %s", desc);
        goto end;
    }

    ret = avfilter_graph_config(m_filter_graph, NULL);
    if (ret < 0) {
        ELOG_ERROR_T("Cannot set graph config");
        goto end;
    }

    m_input_frame = av_frame_alloc();
    m_filt_frame = av_frame_alloc();
    if (!m_input_frame || !m_filt_frame) {
        ELOG_ERROR_T("Could not allocate av frames");
        goto end;
    }

    m_height = height;
    m_width = width;

    m_input_frame->format = AV_PIX_FMT_YUV420P;
    m_input_frame->width  = m_width;
    m_input_frame->height = m_height;
    ret = av_frame_get_buffer(m_input_frame, 32);
    if (ret < 0) {
        ELOG_ERROR_T("Could not get  av frame buffer");
        goto end;
    }

    if (filter_inputs) {
        avfilter_inout_free(&filter_inputs);
        filter_inputs = NULL;
    }

    if (filter_outputs) {
        avfilter_inout_free(&filter_outputs);
        filter_outputs = NULL;
    }

    return true;

end:
    if (m_input_frame) {
        av_frame_free(&m_input_frame);
        m_input_frame = NULL;
    }

    if (m_filt_frame) {
        av_frame_free(&m_filt_frame);
        m_filt_frame = NULL;
    }

    if (filter_inputs) {
        avfilter_inout_free(&filter_inputs);
        filter_inputs = NULL;
    }

    if (filter_outputs) {
        avfilter_inout_free(&filter_outputs);
        filter_outputs = NULL;
    }

    if (m_filter_graph) {
        avfilter_graph_free(&m_filter_graph);
        m_filter_graph = NULL;
    }

    m_buffersrc_ctx = NULL;
    m_buffersink_ctx = NULL;

    return false;
}

void FFmpegVideoFilter::deinit()
{
    if (m_input_frame) {
        av_frame_free(&m_input_frame);
        m_input_frame = NULL;
    }

    if (m_filt_frame) {
        av_frame_free(&m_filt_frame);
        m_filt_frame = NULL;
    }

    if (m_filter_graph) {
        avfilter_graph_free(&m_filter_graph);
        m_filter_graph = NULL;
    }

    m_buffersrc_ctx = NULL;
    m_buffersink_ctx = NULL;
}

int FFmpegVideoFilter::configure(std::string arg)
{
    int ret;

    ELOG_INFO_T("config: %s", arg.c_str());

    if (!m_filter_graph) {
        ELOG_TRACE_T("Invalid filter graph");
        return 0;
    }

    ret = avfilter_graph_send_command(m_filter_graph, "drawtext", "reinit", arg.c_str(), NULL, 0, 0);
    if (ret < 0) {
        ELOG_ERROR_T("Cannot configure filter: %s", arg.c_str());
        return 0;
    }

    return 1;
}

int FFmpegVideoFilter::setDesc(int frame_rate, std::string arg)
{
    m_frame_rate = frame_rate;
    m_filter_desc = arg;
    m_reconfigured = true;

    return 1;
}

int FFmpegVideoFilter::drawFrame(Frame& frame)
{
    int ret;
    bool changed = false;

    switch (frame.format) {
        case FRAME_FORMAT_I420:
            break;

        default:
            ELOG_TRACE_T("Unspported video frame format: %s", getFormatStr(frame.format));
            return 0;
    }

    if (frame.additionalInfo.video.width == 0
        || frame.additionalInfo.video.height == 0) {
        ELOG_ERROR_T("Invalid size: %dx%d",
                frame.additionalInfo.video.width,
                frame.additionalInfo.video.height
                );
        return 0;
    }

    if (m_width != frame.additionalInfo.video.width
            || m_height != frame.additionalInfo.video.height) {
        ELOG_DEBUG_T("re-config size: %dx%d -> %dx%d",
                m_width, m_height,
                frame.additionalInfo.video.width,
                frame.additionalInfo.video.height);

        m_width = frame.additionalInfo.video.width;
        m_height = frame.additionalInfo.video.height;
        changed = true;
    }

    if (!m_enabled) {
        return 1;
    }

    if (m_reconfigured || changed) {
        m_validConfig = false;

        deinit();
        if (init(m_width, m_height, m_frame_rate, m_filter_desc.c_str()) == true) {
            m_validConfig = true;
        }

        m_reconfigured = false;
        changed = false;
    }

    if (!m_validConfig) {
        return 1;
    }

    if (!m_filter_graph) {
        ELOG_TRACE_T("filter graph not ready!");
        return 0;
    }

    ELOG_TRACE_T("do drawFrame");

    if (!copyFrame(m_input_frame, frame)) {
        return 0;
    }

    m_input_frame->pts = 0;
    if (av_buffersrc_add_frame_flags(m_buffersrc_ctx, m_input_frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
        ELOG_ERROR_T("Error while feeding the filtergraph");
        return 0;
    }
    /* pull filtered frames from the filtergraph */
    while (1) {
        ret = av_buffersink_get_frame(m_buffersink_ctx, m_filt_frame);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
            break;
        }
        if (ret < 0) {
            ELOG_ERROR_T("av_buffersink_get_frame error");
            return 0;
        }

        if (!copyFrame(frame, m_filt_frame)) {
            return 0;
        }

        av_frame_unref(m_filt_frame);
    }

    return 1;
}

int FFmpegVideoFilter::copyFrame(AVFrame *dstAVFrame, Frame &srcFrame)
{
    int ret;
    VideoFrame *videoFrame = reinterpret_cast<VideoFrame*>(srcFrame.payload);
    rtc::scoped_refptr<webrtc::VideoFrameBuffer> i420Buffer = videoFrame->video_frame_buffer();

    av_frame_make_writable(dstAVFrame);

    if (i420Buffer->width() == dstAVFrame->width && i420Buffer->height() == dstAVFrame->height) {
        ret = libyuv::I420Copy(
                i420Buffer->DataY(), i420Buffer->StrideY(),
                i420Buffer->DataU(), i420Buffer->StrideU(),
                i420Buffer->DataV(), i420Buffer->StrideV(),
                dstAVFrame->data[0], dstAVFrame->linesize[0],
                dstAVFrame->data[1], dstAVFrame->linesize[1],
                dstAVFrame->data[2], dstAVFrame->linesize[2],
                i420Buffer->width(), i420Buffer->height());
        if (ret != 0) {
            ELOG_ERROR_T("libyuv::I420Copy failed(%d)", ret);
            return false;
        }
    } else {
        ret = libyuv::I420Scale(
                i420Buffer->DataY(), i420Buffer->StrideY(),
                i420Buffer->DataU(), i420Buffer->StrideU(),
                i420Buffer->DataV(), i420Buffer->StrideV(),
                i420Buffer->width(), i420Buffer->height(),
                dstAVFrame->data[0], dstAVFrame->linesize[0],
                dstAVFrame->data[1], dstAVFrame->linesize[1],
                dstAVFrame->data[2], dstAVFrame->linesize[2],
                dstAVFrame->width,   dstAVFrame->height,
                libyuv::kFilterBox);
        if (ret != 0) {
            ELOG_ERROR_T("libyuv::I420Scale failed(%d)", ret);
            return false;
        }
    }

    return true;
}

int FFmpegVideoFilter::copyFrame(Frame &dstFrame, AVFrame *srcAVFrame)
{
    int ret;
    VideoFrame *videoFrame = reinterpret_cast<VideoFrame*>(dstFrame.payload);
    rtc::scoped_refptr<webrtc::VideoFrameBuffer> i420Buffer = videoFrame->video_frame_buffer();

    if (i420Buffer->width() == srcAVFrame->width && i420Buffer->height() == srcAVFrame->height) {
        ret = libyuv::I420Copy(
                srcAVFrame->data[0], srcAVFrame->linesize[0],
                srcAVFrame->data[1], srcAVFrame->linesize[1],
                srcAVFrame->data[2], srcAVFrame->linesize[2],
                const_cast< uint8_t*>(i420Buffer->DataY()), i420Buffer->StrideY(),
                const_cast< uint8_t*>(i420Buffer->DataU()), i420Buffer->StrideU(),
                const_cast< uint8_t*>(i420Buffer->DataV()), i420Buffer->StrideV(),
                i420Buffer->width(), i420Buffer->height());
        if (ret != 0) {
            ELOG_ERROR_T("libyuv::I420Copy failed(%d)", ret);
            return false;
        }
    } else {
        ret = libyuv::I420Scale(
                srcAVFrame->data[0], srcAVFrame->linesize[0],
                srcAVFrame->data[1], srcAVFrame->linesize[1],
                srcAVFrame->data[2], srcAVFrame->linesize[2],
                srcAVFrame->width, srcAVFrame->height,
                const_cast< uint8_t*>(i420Buffer->DataY()), i420Buffer->StrideY(),
                const_cast< uint8_t*>(i420Buffer->DataU()), i420Buffer->StrideU(),
                const_cast< uint8_t*>(i420Buffer->DataV()), i420Buffer->StrideV(),
                i420Buffer->width(), i420Buffer->height(),
                libyuv::kFilterBox);
        if (ret != 0) {
            ELOG_ERROR_T("libyuv::I420Scale failed(%d)", ret);
            return false;
        }
    }

    return true;
}

char *FFmpegVideoFilter::ff_err2str(int errRet)
{
    av_strerror(errRet, (char*)(&m_errbuff), 500);
    return m_errbuff;
}

}//namespace owt_base

QQ交流群:697773082

微信(cczjp1989)

在这里插入图片描述

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值