一、音频滤波建立
描述:
啥都不干,就建立滤波模块,然后输入输出。
总体过程参看init_filter函数就行
环境:
QT5.7.1
#include <QDebug>
extern "C" {
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/dict.h>
}
#define LOG qDebug()
static void log_errstr(int ret)
{
char err[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, ret);
LOG << "error:" << err;
}
int filter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,
const char *name, const char *args, void *opaque,
AVFilterGraph *graph_ctx)
{
int ret;
*filt_ctx = avfilter_graph_alloc_filter(graph_ctx, filt, name);
if (!*filt_ctx)
return AVERROR(ENOMEM);
ret = avfilter_init_str(*filt_ctx, args);
if (ret < 0){
log_errstr(ret);
goto fail;
}
return 0;
fail:
if (*filt_ctx)
avfilter_free(*filt_ctx);
*filt_ctx = NULL;
return ret;
}
struct Stream
{
AVCodec* c;
AVCodecContext* cc;
AVStream* st;
AVFrame* frame;
void free(){
avcodec_free_context(&cc);
av_frame_free(&frame);
}
};
struct Filter
{
AVFilterContext* filt;
AVFilterContext* src;
AVFilterContext* sink;
AVFilterGraph* graph;
int init(const char* descr, AVCodecContext* cc)
{
int ret;
const AVFilter* abuffer= avfilter_get_by_name("abuffer"),
* buffersink= avfilter_get_by_name("abuffersink");
AVFilterInOut* inputs= avfilter_inout_alloc(),
*outputs= avfilter_inout_alloc();
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, (AVSampleFormat)-1 };
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
static const int out_sample_rates[] = { 8000, -1 };
//! init graph
graph= avfilter_graph_alloc();
if(!graph || !inputs || !outputs)
return AVERROR(ENOMEM);
//! init src filter
if(!cc->channel_layout)
cc->channel_layout= av_get_default_channel_layout(cc->channels);
char args[512]= {0};
_sprintf_p(args, sizeof(args), "time_base=%d%d : sample_rate=%d : sample_fmt=%s : channel_layout=0x%"PRIX64,
cc->time_base.num, cc->time_base.den,
cc->sample_rate,
av_get_sample_fmt_name(cc->sample_fmt),
cc->channel_layout);
ret= filter_graph_create_filter(&src, abuffer, "in", args, NULL, graph);
if(ret<0) goto end;
//! init sink filter
sink= avfilter_graph_alloc_filter(graph, buffersink, "out");
ret = av_opt_set_int_list(sink, "sample_fmts", out_sample_fmts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
goto end;
}
ret = av_opt_set_int_list(sink, "channel_layouts", out_channel_layouts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
goto end;
}
ret = av_opt_set_int_list(sink, "sample_rates", out_sample_rates, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
goto end;
}
ret= avfilter_init_str(sink, NULL);
if(ret<0) goto end;
// link
ret= avfilter_link(src, 0, sink, 0);
if(ret<0) goto end;
// config
ret= avfilter_graph_config(graph, NULL);
if(ret<0) goto end;
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
void free(){
avfilter_graph_free(&graph);
}
};
static AVFrame* decode(Stream& stream, AVPacket* pkt)
{
int ret;
int got_frame;
ret= avcodec_decode_audio4(stream.cc, stream.frame, &got_frame, pkt);
if(ret<0){
log_errstr(ret);
exit(1);
}
if(got_frame){
return stream.frame;
}else
return NULL;
}
int main()
{
//
Filter filter= {0};
// open file
int ret;
AVFormatContext* ic= NULL;
AVPacket pkt= {0};
AVFrame* frame= av_frame_alloc();
av_init_packet(&pkt);
avformat_open_input(&ic, "D:/dst.flac", NULL, NULL);
ret= avformat_find_stream_info(ic, NULL);
if(ret<0)
goto end;
Stream stream= {0};
stream.st= ic->streams[av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO, -1, -1, &stream.c, 0)];
stream.cc= avcodec_alloc_context3(stream.c);
stream.cc->refcounted_frames= true;
ret= avcodec_parameters_to_context(stream.cc, stream.st->codecpar);
if(ret<0)
goto end;
stream.frame= av_frame_alloc();
if(avcodec_open2(stream.cc, stream.c, NULL)<0){
av_log(NULL, AV_LOG_ERROR, "Can't open codec.");
exit(1);
}
// init filter
char *par= "aformat=sample_fmts=s16:channel_layouts=mono:sample_rates=8000";
ret= filter.init(par, stream.cc);
if(ret<0){
log_errstr(ret);
exit(1);
}
AVFrame *tmp,
*dst= av_frame_alloc();
while(av_read_frame(ic, &pkt)>=0)
{
tmp =decode(stream, &pkt);
if(!tmp)
continue;
else
av_frame_move_ref(frame, tmp);/*移动至滤波用frame*/
// 对此frame进行滤波操作
ret= av_buffersrc_add_frame_flags(filter.src, frame, 0);
if(ret<0){
log_errstr(ret);
goto end;
}
while(1)
{
ret= av_buffersink_get_frame(filter.sink, dst);
if(AVERROR_EOF == ret || AVERROR(EAGAIN) == ret)
break;
else if(ret<0){
log_errstr(ret);
goto end;
}
LOG << "filting ok!" << dst->sample_rate << av_get_sample_fmt_name((AVSampleFormat)dst->format);
av_frame_unref(dst);
}
av_frame_unref(frame);
}
av_packet_unref(&pkt);
/*此处应该刷新流,但仅为测试就不写了*/
end:
avformat_close_input(&ic);
av_frame_free(&frame);
av_frame_free(&dst);
stream.free();
filter.free();
return 0;
}
二、两种添加滤波操作的方法
1.最简单也最直接avfilter_graph_parse_ptr直接解析命令行
程序目的:
输入然后用aformat改变一下格式并输出。
注意:
上一个例子中仅仅改变了帧内的format,实际输出的采样率还是44100。
必须使用aformat滤波器滤波才能保证正确输出
// 自食其力
// 从标准的音频解码添加filter
#include <QDebug>
extern "C" {
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/dict.h>
}
#define LOG qDebug()
static void log_errstr(int ret)
{
char err[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, ret);
LOG << "error:" << err;
}
int filter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,
const char *name, const char *args, void *opaque,
AVFilterGraph *graph_ctx)
{
int ret;
*filt_ctx = avfilter_graph_alloc_filter(graph_ctx, filt, name);
if (!*filt_ctx)
return AVERROR(ENOMEM);
ret = avfilter_init_str(*filt_ctx, args);
if (ret < 0){
log_errstr(ret);
goto fail;
}
return 0;
fail:
if (*filt_ctx)
avfilter_free(*filt_ctx);
*filt_ctx = NULL;
return ret;
}
struct Stream
{
AVCodec* c;
AVCodecContext* cc;
AVStream* st;
AVFrame* frame;
void free(){
avcodec_free_context(&cc);
av_frame_free(&frame);
}
};
struct Filter
{
AVFilterContext* filt;
AVFilterContext* src;
AVFilterContext* sink;
AVFilterGraph* graph;
int init(const char* descr, AVCodecContext* cc)
{
int ret;
const AVFilter* abuffer= avfilter_get_by_name("abuffer"),
* buffersink= avfilter_get_by_name("abuffersink");
AVFilterInOut* inputs= avfilter_inout_alloc(),
*outputs= avfilter_inout_alloc();
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, (AVSampleFormat)-1 };
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
static const int out_sample_rates[] = { 8000, -1 };
//! init graph
graph= avfilter_graph_alloc();
if(!graph || !inputs || !outputs)
return AVERROR(ENOMEM);
//! init src filter
if(!cc->channel_layout)
cc->channel_layout= av_get_default_channel_layout(cc->channels);
char args[512]= {0};
_sprintf_p(args, sizeof(args), "time_base=%d%d : sample_rate=%d : sample_fmt=%s : channel_layout=0x%"PRIX64,
cc->time_base.num, cc->time_base.den,
cc->sample_rate,
av_get_sample_fmt_name(cc->sample_fmt),
cc->channel_layout);
ret= filter_graph_create_filter(&src, abuffer, "in", args, NULL, graph);
if(ret<0) goto end;
//! init sink filter
sink= avfilter_graph_alloc_filter(graph, buffersink, "out");
ret = av_opt_set_int_list(sink, "sample_fmts", out_sample_fmts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
goto end;
}
ret = av_opt_set_int_list(sink, "channel_layouts", out_channel_layouts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
goto end;
}
ret = av_opt_set_int_list(sink, "sample_rates", out_sample_rates, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
goto end;
}
ret= avfilter_init_str(sink, NULL);
if(ret<0) goto end;
// link
inputs->name= "out";
inputs->filter_ctx= sink;
inputs->pad_idx= 0;
inputs->next= NULL;
//
outputs->name= "in";
outputs->filter_ctx= src;
outputs->pad_idx= 0;
outputs->next= NULL;
ret= avfilter_graph_parse_ptr(graph, "aformat=sample_rates=8000:sample_fmts=s16", &inputs, &outputs, NULL);
if(ret<0) goto end;
// config
ret= avfilter_graph_config(graph, NULL);
if(ret<0) goto end;
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
void free(){
avfilter_graph_free(&graph);
}
};
static AVFrame* decode(Stream& stream, AVPacket* pkt)
{
int ret;
int got_frame;
ret= avcodec_decode_audio4(stream.cc, stream.frame, &got_frame, pkt);
if(ret<0){
log_errstr(ret);
exit(1);
}
if(got_frame){
return stream.frame;
}else
return NULL;
}
int main()
{
//
Filter filter= {0};
// open file
int ret;
AVFormatContext* ic= NULL;
AVPacket pkt= {0};
AVFrame* frame= av_frame_alloc();
av_init_packet(&pkt);
avformat_open_input(&ic, "D:/dst.flac", NULL, NULL);
ret= avformat_find_stream_info(ic, NULL);
if(ret<0)
goto end;
Stream stream= {0};
stream.st= ic->streams[av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO, -1, -1, &stream.c, 0)];
stream.cc= avcodec_alloc_context3(stream.c);
stream.cc->refcounted_frames= true;
ret= avcodec_parameters_to_context(stream.cc, stream.st->codecpar);
if(ret<0)
goto end;
stream.frame= av_frame_alloc();
if(avcodec_open2(stream.cc, stream.c, NULL)<0){
av_log(NULL, AV_LOG_ERROR, "Can't open codec.");
exit(1);
}
// init filter
char *par= "aformat=sample_fmts=s16:channel_layouts=mono:sample_rates=8000";
ret= filter.init(par, stream.cc);
if(ret<0){
log_errstr(ret);
exit(1);
}
AVFrame *tmp,
*dst= av_frame_alloc();
while(av_read_frame(ic, &pkt)>=0)
{
tmp =decode(stream, &pkt);
if(!tmp)
continue;
else
av_frame_move_ref(frame, tmp);/*移动至滤波用frame*/
// 对此frame进行滤波操作
ret= av_buffersrc_add_frame_flags(filter.src, frame, 0);
if(ret<0){
log_errstr(ret);
goto end;
}
while(1)
{
ret= av_buffersink_get_frame(filter.sink, dst);
if(AVERROR_EOF == ret || AVERROR(EAGAIN) == ret)
break;
else if(ret<0){
log_errstr(ret);
goto end;
}
LOG << "filting ok!" << dst->sample_rate << av_get_sample_fmt_name((AVSampleFormat)dst->format);
av_frame_unref(dst);
}
av_frame_unref(frame);
}
av_packet_unref(&pkt);
/*此处应该刷新流,但仅为测试就不写了*/
end:
avformat_close_input(&ic);
av_frame_free(&frame);
av_frame_free(&dst);
stream.free();
filter.free();
return 0;
}
2.用link来链接多个滤波器
程序目的:
先把一段音频拆成两部分,分别加不同的回音特效,最后合并输出。
ffmpeg -i fmt.flac -filter_complex “asplit[main][tmp]; [tmp]aecho[echo0]; [main][echo0]amix” dst.flac