一 命令行
ffmpeg命令行:
//叠加水印
//overlay=5:15,图片距左边5个像素点,距上边15个像素点
ffmpeg -i test.flv -vf "movie=my_logo.png[wm];[in][wm]overlay=5:15[out]" out.flv
//去除水印,
ffmpeg -i test.flv -vf "delogo=x=5:y=15:w=180:h=60:show=0" out.flv
-vf:简单的,处理一个。
-filter_complex:复杂的。
ffmpeg -i input.mp4 -i watermark.png -filter_complex "overlay=W-w-10:H-h-10" output.mp4
二 代码
ffmpeg版本5.1.2,dll是:ffmpeg-5.1.2-full_build-shared。x64的。
代码是windows端,用VS编译。
怎么使用这个代码?新建win32工程,复制这三个文件。设置ffmpeg库的include和lib目录。
最基本的知识:需要解码的,图片是叠加在YUV上的,然后在编码。
命令行更方便。
在[ffmpeg系列 03] 文件、流地址(视频)解码为YUV 的代码基础上添加的,直接存yuv文件,验证添加水印是否生效。一般是要编码的。
/*
author: ashleycoder
CSDN blog: https://blog.csdn.net/chenquangobeijing
*/
#pragma once
#include <string>
extern "C"
{
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libswscale/swscale.h"
#include "libavutil/avutil.h"
#include "libavutil/mathematics.h"
#include "libavutil/time.h"
#include "libavutil/pixdesc.h"
#include "libavutil/display.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
};
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "avfilter.lib")
class CAddWatermark
{
public:
CAddWatermark();
~CAddWatermark();
public:
int DecodeH264();
int Start();
int Close();
int DecodeH264File_Init();
int ReleaseDecode();
void H264Decode_Thread_Fun();
std::string dup_wchar_to_utf8(const wchar_t* wstr);
double get_rotation(AVStream *st);
public:
AVFormatContext* m_pInputFormatCtx = nullptr;
AVCodecContext* m_pVideoDecodeCodecCtx = nullptr;
const AVCodec* m_pCodec = nullptr;
SwsContext* m_pSwsContext = nullptr;
AVFrame* m_pFrameScale = nullptr;
AVFrame* m_pFrameYUV = nullptr;
AVPacket* m_pAVPacket = nullptr;
enum AVMediaType m_CodecType;
int m_output_pix_fmt;
int m_nVideoStream = -1;
int m_nFrameHeight = 0;
int m_nFrameWidth = 0;
int m_nFPS;
int m_nVideoSeconds;
public:
AVFilterContext* m_BufferSinkCtx = nullptr;
AVFilterContext* m_BufferSrcCtx = nullptr;//src是source
AVFilterGraph* m_FilterGraph = nullptr;
int Filters_Init(const char *FiltersDescr);
//const char* filter_descr = "movie=my_logo.png[wm];[in][wm]overlay=5:5[out]";
const char* filter_descr = "movie=xiaohongshu.png[wm];[in][wm]overlay=40:40[out]";
FILE* m_pfOutYUV = nullptr;
FILE* m_pfOutYUV2 = nullptr;
char av_error[AV_ERROR_MAX_STRING_SIZE] = { 0 };
#define av_err2str(errnum) av_make_error_string(av_error, AV_ERROR_MAX_STRING_SIZE, errnum)
};
/*
author: ashleycoder
CSDN blog: https://blog.csdn.net/chenquangobeijing
*/
#include "AddWatermark.h"
#include <thread>
#include <functional>
#include <codecvt>
#include <locale>
CAddWatermark::CAddWatermark()
{
}
CAddWatermark::~CAddWatermark()
{
ReleaseDecode();
}
std::string CAddWatermark::dup_wchar_to_utf8(const wchar_t* wstr)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>> converter;
return converter.to_bytes(wstr);
}
int CAddWatermark::DecodeH264File_Init()
{
m_pInputFormatCtx = avformat_alloc_context();
//std::string strFilename = dup_wchar_to_utf8(L"cuc_ieschool.flv");
std::string strFilename = dup_wchar_to_utf8(L"flower.h264");
int ret = avformat_open_input(&m_pInputFormatCtx, strFilename.c_str(), nullptr, nullptr);
if (ret != 0) {
char* err_str = av_err2str(ret);
printf("fail to open filename: %s, return value: %d, %s\n", strFilename.c_str(), ret, err_str);
return -1;
}
ret = avformat_find_stream_info(m_pInputFormatCtx, nullptr);
if (ret < 0) {
char* err_str = av_err2str(ret);
printf("fail to get stream information: %d, %s\n", ret, err_str);
return -1;
}
for (int i = 0; i < m_pInputFormatCtx->nb_streams; ++i) {
const AVStream* stream = m_pInputFormatCtx->streams[i];
if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
m_nVideoStream = i;
printf("type of the encoded data: %d, dimensions of the video frame in pixels: width: %d, height: %d, pixel format: %d\n",
stream->codecpar->codec_id, stream->codecpar->width, stream->codecpar->height, stream->codecpar->format);
}
}
if (m_nVideoStream == -1) {
printf("no video stream\n");
return -1;
}
printf("m_nVideoStream=%d\n", m_nVideoStream);
m_pVideoDecodeCodecCtx = avcodec_alloc_context3(nullptr);
m_pVideoDecodeCodecCtx->time_base.num = 1;
m_pVideoDecodeCodecCtx->time_base.den = 25;
m_pVideoDecodeCodecCtx->sample_aspect_ratio.num = 1;
m_pVideoDecodeCodecCtx->sample_aspect_ratio.den = 1;
avcodec_parameters_to_context(m_pVideoDecodeCodecCtx, m_pInputFormatCtx->streams[m_nVideoStream]->codecpar);
m_pCodec = avcodec_find_decoder(m_pVideoDecodeCodecCtx->codec_id);
if (m_pCodec == nullptr)
{
return -1;
}
m_nFrameHeight = m_pVideoDecodeCodecCtx->height;
m_nFrameWidth = m_pVideoDecodeCodecCtx->width;
printf("w=%d h=%d\n", m_pVideoDecodeCodecCtx->width, m_pVideoDecodeCodecCtx->height);
printf("pix_fmt=%d\n", m_pVideoDecodeCodecCtx->pix_fmt);
if (avcodec_open2(m_pVideoDecodeCodecCtx, m_pCodec, nullptr) < 0)
{
return -1;
}
int nRet = Filters_Init(filter_descr);
if (nRet<0)
{
return -1;
}
m_pSwsContext = sws_getContext(m_pVideoDecodeCodecCtx->width, m_pVideoDecodeCodecCtx->height,
m_pVideoDecodeCodecCtx->pix_fmt, m_pVideoDecodeCodecCtx->width, m_pVideoDecodeCodecCtx->height,
(AVPixelFormat)m_output_pix_fmt, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
m_pFrameScale = av_frame_alloc();
m_pFrameScale->format = m_output_pix_fmt;
m_pFrameYUV = av_frame_alloc();
m_pFrameYUV->format = m_output_pix_fmt;
m_pFrameYUV->width = m_pVideoDecodeCodecCtx->width;
m_pFrameYUV->height = m_pVideoDecodeCodecCtx->height;
printf("m_pFrameYUV pix_fmt=%d\n", m_pVideoDecodeCodecCtx->pix_fmt);
av_frame_get_buffer(m_pFrameYUV, 64); //notice: 64?
char cYUVName[256];
sprintf_s(cYUVName, "%d_%d_%s.yuv", m_nFrameWidth, m_nFrameHeight, av_get_pix_fmt_name(m_pVideoDecodeCodecCtx->pix_fmt));
fopen_s(&m_pfOutYUV, cYUVName, "wb");
char cYUVName2[256];
sprintf_s(cYUVName2, "%d_%d_%s_2.yuv", m_nFrameWidth, m_nFrameHeight, av_get_pix_fmt_name(m_pVideoDecodeCodecCtx->pix_fmt));
fopen_s(&m_pfOutYUV2, cYUVName2, "wb");
printf("leave init\n");
return 0;
}
int CAddWatermark::Filters_Init(const char *FiltersDescr)
{
char args[512];
int ret;
const AVFilter *buffersrc = avfilter_get_by_name("buffer");
const AVFilter *buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, m_pVideoDecodeCodecCtx->pix_fmt, AV_PIX_FMT_NONE };
AVBufferSinkParams *buffersinkparams;
m_FilterGraph = avfilter_graph_alloc();
/* buffer video source: the decoded frames from the decoder will be inserted here. */
sprintf_s(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
m_pVideoDecodeCodecCtx->width, m_pVideoDecodeCodecCtx->height, m_pVideoDecodeCodecCtx->pix_fmt,
m_pVideoDecodeCodecCtx->time_base.num, m_pVideoDecodeCodecCtx->time_base.den,
m_pVideoDecodeCodecCtx->sample_aspect_ratio.num, m_pVideoDecodeCodecCtx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&m_BufferSrcCtx, buffersrc, "in",
args, nullptr, m_FilterGraph);
if (ret < 0)
{
char* err_str = av_err2str(ret);
return -1;
}
/* buffer video sink: to terminate the filter chain. */
//buffersinkparams = av_buffersink_params_alloc();
//buffersinkparams->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&m_BufferSinkCtx, buffersink, "out",
nullptr, nullptr, m_FilterGraph);
//av_free(buffersinkparams);
if (ret < 0)
{
char* err_str = av_err2str(ret);
return ret;
}
ret = av_opt_set_int_list(m_BufferSinkCtx, "pix_fmts", pix_fmts,
0, AV_OPT_SEARCH_CHILDREN);
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = m_BufferSrcCtx;
outputs->pad_idx = 0;
outputs->next = nullptr;
inputs->name = av_strdup("out");
inputs->filter_ctx = m_BufferSinkCtx;
inputs->pad_idx = 0;
inputs->next = nullptr;
//返回负数——视频和图片要放在同一个文件下
ret = avfilter_graph_parse_ptr(m_FilterGraph, FiltersDescr, &inputs, &outputs, nullptr);
if (ret < 0)
return ret;
ret = avfilter_graph_config(m_FilterGraph, nullptr);
if (ret < 0)
return ret;
return 0;
}
void CAddWatermark::H264Decode_Thread_Fun()
{
int count = 0;
int ret;
m_pAVPacket = av_packet_alloc();
while (true) {
ret = av_read_frame(m_pInputFormatCtx, m_pAVPacket);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
av_packet_unref(m_pAVPacket);
printf("read_frame break");
break;
}
if (m_pAVPacket->stream_index == m_nVideoStream)
{
int send_packet_ret = avcodec_send_packet(m_pVideoDecodeCodecCtx, m_pAVPacket);
printf("encode video send_packet_ret %d\n", send_packet_ret);
int receive_frame_ret = avcodec_receive_frame(m_pVideoDecodeCodecCtx, m_pFrameScale);
char* err_str = av_err2str(receive_frame_ret);
printf("frame w=%d, h=%d, linesize[0]=%d, linesize[1]=%d\n", m_pFrameScale->width, m_pFrameScale->height, m_pFrameScale->linesize[0], m_pFrameScale->linesize[1]);
if (receive_frame_ret == 0)
{
++count;
m_pFrameScale->pts = m_pFrameScale->best_effort_timestamp; //?
ret = av_buffersrc_add_frame_flags(m_BufferSrcCtx, m_pFrameScale, AV_BUFFERSRC_FLAG_KEEP_REF);
printf("av_buffersrc_add_frame_flags ret=%d", ret);
while (1) {
AVFrame* pFilterFrame = av_frame_alloc();
ret = av_buffersink_get_frame(m_BufferSinkCtx, pFilterFrame);
/*int iReturn = sws_scale(m_pSwsContext, pFilterFrame->data,
pFilterFrame->linesize, 0, m_nFrameHeight,
m_pFrameYUV->data, m_pFrameYUV->linesize);*/
//printf("frame w=%d, h=%d, linesize[0]=%d, linesize[1]=%d\n", m_pFrameYUV->width, m_pFrameYUV->height, m_pFrameYUV->linesize[0], m_pFrameYUV->linesize[1]);
//if (0 != iReturn && count == 1)
if (0 == ret)
{
fwrite(pFilterFrame->data[0], 1, m_nFrameWidth * m_nFrameHeight, m_pfOutYUV);
fwrite(pFilterFrame->data[1], 1, m_nFrameWidth * m_nFrameHeight/4, m_pfOutYUV);
fwrite(pFilterFrame->data[2], 1, m_nFrameWidth * m_nFrameHeight/4, m_pfOutYUV);
}
//用linesize更能兼容特殊的宽
/*if (0 != iReturn && count == 1)
{
for (int i = 0; i < m_nFrameHeight; ++i) {
fwrite(m_pFrameYUV->data[0] + i * m_pFrameYUV->linesize[0], 1, m_nFrameWidth, m_pfOutYUV2);
}
for (int i = 0; i < m_nFrameHeight / 2; ++i) {
fwrite(m_pFrameYUV->data[1] + i * m_pFrameYUV->linesize[1], 1, m_nFrameWidth / 2, m_pfOutYUV2);
}
for (int i = 0; i < m_nFrameHeight / 2; ++i) {
fwrite(m_pFrameYUV->data[2] + i * m_pFrameYUV->linesize[2], 1, m_nFrameWidth / 2, m_pfOutYUV2);
}
}*/
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
break;
av_frame_unref(pFilterFrame);
}
}
}
av_packet_unref(m_pAVPacket);
}
}
int CAddWatermark::DecodeH264()
{
if (DecodeH264File_Init() != 0)
{
return -1;
}
auto video_func = std::bind(&CAddWatermark::H264Decode_Thread_Fun, this);
std::thread video_thread(video_func);
video_thread.join();
return 0;
}
int CAddWatermark::Start()
{
DecodeH264();
return 1;
}
int CAddWatermark::Close()
{
return 0;
}
int CAddWatermark::ReleaseDecode()
{
if (m_pSwsContext)
{
sws_freeContext(m_pSwsContext);
m_pSwsContext = nullptr;
}
if (m_pFrameScale)
{
av_frame_free(&m_pFrameScale);
}
if (m_pFrameYUV)
{
av_frame_free(&m_pFrameYUV);
}
avcodec_close(m_pVideoDecodeCodecCtx);
avformat_close_input(&m_pInputFormatCtx);
return 0;
}
#include <iostream>
#include <Windows.h>
#include "6__AddWatermark/AddWatermark.h"
int main()
{
CAddWatermark* m_pAddWatermark = new CAddWatermark();
m_pAddWatermark->Start();
return 0;
}
三 存在的问题
有的视频,叠加水印,不成功。用命令行试,是可以的。
限制了视频编码前的格式是yuv420p?