#include <jni.h>
#include <string>
#include <android/log.h>
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,"xplay",__VA_ARGS__);
#include "decode_audio.h"
#include "play_pcm.h"
#include "scaling_video.h"
#include "resampling_audio.h"
#include "remuxing.h"
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <android/native_window.h>
#include <android/native_window_jni.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
}
extern "C"
JNIEXPORT
jint JNI_OnLoad(JavaVM *vm, void *res) {
av_jni_set_java_vm(vm, 0);
return JNI_VERSION_1_4;
}
//初始化像素格式转换的上下文
SwsContext *vctx = NULL;
int outWidth = 1920;
int outHeight = 1080;
char *rgb = new char[1080 * 1920 * 4];
ANativeWindow *nwin;
ANativeWindow_Buffer wbuf;
//const char *filter_descr = "scale=78:24,transpose=cclock";
//const char *filter_descr = "drawtext=fontsize=100:fontfile-FreeSerif.ttf:text='hello word':x=20:y=20";
//const char *filter_descr = "drawtext=fontfile=/data/user/0/com.ffmpeg/files/zhongsong.ttf:fontcolor=green:fontsize=30:text='keyi'";
const char *filter_descr = "drawbox=x=100:y=100:w=100:h=100:color=pink@0.5";
//const char *filter_descr = "lutyuv='u=128:v=128'";
//const char *filter_descr = "hflip";
//const char *filter_descr = "hue='h=60:s=-3'";
//const char *filter_descr = "boxblur";
//const char *filter_descr = "";
/* other way:
scale=78:24 [scl]; [scl] transpose=cclock // assumes "[in]" and "[out]" to be input output pads respectively
*/
static AVFormatContext *fmt_ctx;
static AVCodecContext *dec_ctx;
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;
static int video_stream_index = -1;
static int open_input_file(const char *filename)
{
int ret;
AVCodec *dec;
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
/* 选择视频流 */
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
return ret;
}
video_stream_index = ret;
/* 创建解码上下文 */
dec_ctx = avcodec_alloc_context3(dec);
if (!dec_ctx)
return AVERROR(ENOMEM);
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar);
/* 初始化视频解码器 */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
return ret;
}
return 0;
}
static int init_filters(const char *filters_descr)
{
char args[512];
int ret = 0;
const AVFilter *buffersrc = avfilter_get_by_name("buffer");
const AVFilter *buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
/* 缓冲视频源:来自解码器的解码帧将插入此处。 */
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
time_base.num, time_base.den,
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
goto end;
}
/* 缓冲视频接收器:终止过滤器链。 */
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
goto end;
}
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
goto end;
}
/*
* 设置过滤器图的端点。 filter_graph 将链接到由 filters_descr 描述的图。
*/
/*
* 缓冲源输出必须连接到由 filters_descr 描述的第一个滤波器的输入焊盘;
* 由于未指定第一个过滤器输入标签,因此默认设置为“in”。
*/
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
/*
* 缓冲区接收器输入必须连接到由 filters_descr 描述的最后一个滤波器的输出焊盘;
* 由于未指定最后一个过滤器输出标签,因此默认设置为“out”。
*/
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
extern "C"
JNIEXPORT void JNICALL
Java_com_ffmpeg_MainActivity_setSurface(JNIEnv *env, jobject thiz, jobject surface) {
const char * path = "/sdcard/zhou.mp4";
//显示窗口初始化
nwin = ANativeWindow_fromSurface(env,surface);
ANativeWindow_setBuffersGeometry(nwin,outWidth,outHeight,WINDOW_FORMAT_RGBA_8888);
//初始化解封装
av_register_all();
//初始化网络
avformat_network_init();
avcodec_register_all();
int ret;
AVPacket packet;
AVFrame *frame;
AVFrame *filt_frame;
frame = av_frame_alloc();
filt_frame = av_frame_alloc();
if (!frame || !filt_frame) {
perror("Could not allocate frame");
exit(1);
}
if ((ret = open_input_file(path)) < 0)
goto end;
if ((ret = init_filters(filter_descr)) < 0)
goto end;
/* 读取所有数据包 */
while ((ret = av_read_frame(fmt_ctx, &packet)) >= 0) {
if (packet.stream_index == video_stream_index) {
ret = avcodec_send_packet(dec_ctx, &packet);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
break;
}
while (ret >= 0) {
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
goto end;
}
frame->pts = frame->best_effort_timestamp;
/* 将解码的帧推入过滤器图中 */
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
break;
}
/* 从过滤器图中拉出过滤后的帧 */
while (1) {
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
goto end;
vctx = sws_getCachedContext(vctx,
filt_frame->width,
filt_frame->height,
(AVPixelFormat) filt_frame->format,
outWidth,
outHeight,
AV_PIX_FMT_RGBA,
SWS_FAST_BILINEAR,
0, 0, 0
);
if (vctx) {
uint8_t *data[AV_NUM_DATA_POINTERS] = {0};
data[0] = (uint8_t *) rgb;
int lines[AV_NUM_DATA_POINTERS] = {0};
lines[0] = outWidth * 4;
int h = sws_scale(vctx,
(const uint8_t **) filt_frame->data,
filt_frame->linesize,
0,
filt_frame->height,
data, lines);
LOGE("sws_scale = %d", h);
if (h > 0){
ANativeWindow_lock(nwin,&wbuf,0);
uint8_t *dst = (uint8_t*)wbuf.bits;
memcpy(dst,rgb,outWidth*outHeight*4);
ANativeWindow_unlockAndPost(nwin);
}
}
av_frame_unref(filt_frame);
}
av_frame_unref(frame);
}
}
av_packet_unref(&packet);
}
end:
avfilter_graph_free(&filter_graph);
avcodec_free_context(&dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
av_frame_free(&filt_frame);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
exit(1);
}
}
FFmpeg AVFilter滤镜简单应用
最新推荐文章于 2024-05-15 11:55:22 发布