一、背景
因为工作需要制作视频预览功能,使用网上开源代码发现加载第一个视频时,比较耗时,差不多三秒左右,所以对其进行了部分修改,并重新编译使用新ffmpeg 4.1.3
https://github.com/wseemann/FFmpegMediaMetadataRetriever
二、编译FFmpeg-n4.1.3
1.环境
ubuntu 16
ndk:android_ndk_17c(编ffmpeg) 和android_ndk_14b(编FFmpegMediaMetadataRetriever)
编译ffmpeg遇到的问题如何修改参考:https://blog.csdn.net/qq_34902522/article/details/87879145 主要用:
1.1 ndk_r17c不会报libavformat/udp.c:290:28: error: request for member ‘s_addr’ in something not a structure or union
1.2 替换B0为b0 其他的报错大多都需要替换这个B0,xB0,yB0等等
libavcodec/aaccoder.c: In function ‘search_for_ms’:
libavcodec/aaccoder.c:803:25: error: expected identifier or ‘(’ before numeric constant
int B0 = 0, B1 = 0;
1.3 FFmpegMediaMetadataRetriever大神写的脚本基本没改,如下(如果遇到C compiler test failed.
可以到ffmpeg-4.1.3/ffbuild/config.log末尾查看日志,根据日志提示解决问题):
#!/bin/bash
set -e
# Set your own NDK here
NDK=/home/tdz/SDK/android-ndk-r17c
export TARGET=$1
ARM_PLATFORM=$NDK/platforms/android-14/arch-arm/
ARM_PREBUILT=$NDK/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64
ARM64_PLATFORM=$NDK/platforms/android-21/arch-arm64/
ARM64_PREBUILT=$NDK/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64
BUILD_DIR=`pwd`/ffmpeg-android
FFMPEG_VERSION="4.1.3"
function build_one
{
if [ $ARCH == "arm" ]
then
PLATFORM=$ARM_PLATFORM
PREBUILT=$ARM_PREBUILT
HOST=arm-linux-androideabi
#added by alexvas
elif [ $ARCH == "arm64" ]
then
PLATFORM=$ARM64_PLATFORM
PREBUILT=$ARM64_PREBUILT
HOST=aarch64-linux-android
fi
pushd ffmpeg-$FFMPEG_VERSION
./configure --target-os=linux \
--incdir=$BUILD_DIR/$TARGET/include \
--libdir=$BUILD_DIR/$TARGET/lib \
--enable-cross-compile \
--extra-libs="-lgcc" \
--arch=$ARCH \
--cc=$PREBUILT/bin/$HOST-gcc \
--cross-prefix=$PREBUILT/bin/$HOST- \
--nm=$PREBUILT/bin/$HOST-nm \
--sysroot=$PLATFORM \
--extra-cflags="$OPTIMIZE_CFLAGS " \
--enable-shared \
--enable-small \
--extra-ldflags="-Wl,-rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -nostdlib -lc -lm -ldl -llog" \
--disable-ffplay \
--disable-ffmpeg \
--disable-ffprobe \
--disable-avfilter \
--disable-avdevice \
--disable-doc \
--disable-avdevice \
--disable-swresample \
--disable-postproc \
--disable-avfilter \
--disable-gpl \
--disable-encoders \
--disable-hwaccels \
--disable-muxers \
--disable-bsfs \
--disable-protocols \
--disable-indevs \
--disable-outdevs \
--disable-devices \
--disable-filters \
--enable-encoder=png \
--enable-protocol=file,http,https,mmsh,mmst,pipe,rtmp,rtmps,rtmpt,rtmpts,rtp \
--enable-debug=3 \
--disable-asm \
$ADDITIONAL_CONFIGURE_FLAG
make clean
make -j8 install V=1
$PREBUILT/bin/$HOST-ar d libavcodec/libavcodec.a inverse.o
#$PREBUILT/bin/$HOST-ld -rpath-link=$PLATFORM/usr/lib -L$PLATFORM/usr/lib -soname libffmpeg.so -shared -nostdlib -z,noexecstack -Bsymbolic --whole-archive --no-undefined -o $PREFIX/libffmpeg.so libavcodec/libavcodec.a libavformat/libavformat.a libavutil/libavutil.a libswscale/libswscale.a -lc -lm -lz -ldl -llog --warn-once --dynamic-linker=/system/bin/linker $PREBUILT/lib/gcc/$HOST/4.6/libgcc.a
popd
# copy the binaries
mkdir -p $PREFIX
cp -r $BUILD_DIR/$TARGET/* $PREFIX
}
if [ $TARGET == 'arm64-v8a' ]; then
#arm64-v8a
CPU=arm64-v8a
ARCH=arm64
OPTIMIZE_CFLAGS="-I$NDK/sysroot/usr/include/aarch64-linux-android -isysroot $NDK/sysroot -march=armv8-a"
#PREFIX=$BUILD_DIR/$CPU
PREFIX=`pwd`/../jni/ffmpeg/ffmpeg/arm64-v8a
ADDITIONAL_CONFIGURE_FLAG=
build_one
fi
if [ $TARGET == 'armv7-a' ]; then
#arm armv7-a
CPU=armv7-a
ARCH=arm
OPTIMIZE_CFLAGS="-D _FILE_OFFSET_BITS=32 -D__ANDROID_API__=14 -I$NDK/sysroot/usr/include/arm-linux-androideabi -isysroot $NDK/sysroot -fPIC -DANDROID -D__thumb__ -mthumb -Wfatal-errors -Wno-deprecated -mfloat-abi=softfp -marm -march=armv7-a"
#PREFIX=`pwd`/ffmpeg-android/$CPU
PREFIX=`pwd`/../jni/ffmpeg/ffmpeg/armeabi-v7a
ADDITIONAL_CONFIGURE_FLAG=
build_one
fi
直接运行sudo ./build_ffmpeg.sh armv7-a或arm64-v8a即可。
三、修改FFmpegMediaMetadataRetriever使用最新ffmpeg库
https://github.com/wseemann/FFmpegMediaMetadataRetriever
其他源码文件没必要修改了。只改这个文件,问题就可以解决了。
1.修改ffmpeg_mediametadataretriever.c
修改set_data_source_l
int set_data_source_l(State **ps, const char* path) {
av_log_set_level(AV_LOG_INFO);
printf("set_data_source\n");
int audio_index = -1;
int video_index = -1;
int i;
State *state = *ps;
printf("Path: %s\n", path);
AVDictionary *options = NULL;
//av_dict_set(&options, "icy", "1", 0);
//av_dict_set(&options, "user-agent", "FFmpegMediaMetadataRetriever", 0);
av_dict_set(&options, "user_agent", "FFmpegMediaMetadataRetriever", 0);
// av_dict_set(&options, "rtsp_transport", "tcp", 0);
// av_dict_set(&options, "multiple_requests", "1", 0);
av_dict_set(&options, "timeout", "6000", 0);
if (state->headers) {
av_dict_set(&options, "headers", state->headers, 0);
}
if (state->offset > 0) {
state->pFormatCtx = avformat_alloc_context();
state->pFormatCtx->skip_initial_bytes = state->offset;
}
AVInputFormat*aVInputFormat=av_find_input_format("ts");
double startTime= currentTimeMillis();
LOGV("avformat_open_input start time=%f",startTime);
if (avformat_open_input(&state->pFormatCtx, path, aVInputFormat, &options) != 0) {
printf("Metadata could not be retrieved\n");
*ps = NULL;
return FAILURE;
}
//这里是关键,每次加载视频源都会不必要的调用avformat_find_stream_info来探测视频信息,这个是不需要的
//获取视频帧具体方法在decode_frame中
/* startTime= currentTimeMillis();
LOGV("avformat_open_input end time=%f",startTime);
if (avformat_find_stream_info(state->pFormatCtx, NULL) < 0) {
printf("Metadata could not be retrieved\n");
avformat_close_input(&state->pFormatCtx);
*ps = NULL;
if (options != NULL) {
av_dict_free(&options);
}
return FstartTimeAILURE;
}
startTime= currentTimeMillis();
LOGV("avformat_find_stream_info end time=%f",startTime);*/
set_duration(state->pFormatCtx);
set_shoutcast_metadata(state->pFormatCtx);
//av_dump_format(state->pFormatCtx, 0, path, 0);
// Find the first audio and video stream
for (i = 0; i < state->pFormatCtx->nb_streams; i++) {
if (state->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) {
video_index = i;
}
if (state->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) {
audio_index = i;
}
set_codec(state->pFormatCtx, i);
}
if (audio_index >= 0) {
stream_component_open(state, audio_index);
}
if (video_index >= 0) {
stream_component_open(state, video_index);
}
/*if(state->video_stream < 0 || state->audio_stream < 0) {
avformat_close_input(&state->pFormatCtx);
*ps = NULL;
return FAILURE;
}*/
set_rotation(state->pFormatCtx, state->audio_st, state->video_st);
// set_framerate(state->pFormatCtx, state->audio_st, state->video_st);
//set_filesize(state->pFormatCtx);
//set_chapter_count(state->pFormatCtx);
//set_video_dimensions(state->pFormatCtx, state->video_st);
/*printf("Found metadata\n");
AVDictionaryEntry *tag = NULL;
while ((tag = av_dict_get(state->pFormatCtx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
printf("Key %s: \n", tag->key);
printf("Value %s: \n", tag->value);
}*/
*ps = state;
return SUCCESS;
}
为了不必要的麻烦,贴上完整文件供参考
/*
* FFmpegMediaMetadataRetriever: A unified interface for retrieving frame
* and meta data from an input media file.
*
* Copyright 2016 William Seemann
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/opt.h>
#include <ffmpeg_mediametadataretriever.h>
#include <ffmpeg_utils.h>
#include <libavutil/log.h>
#include <stdio.h>
#include <unistd.h>
#include <android/log.h>
#include <sys/time.h>
#define LOG_TAG "##FFmpegMetaRetriever##"
#define LOGV(...) __android_log_print(ANDROID_LOG_VERBOSE,LOG_TAG,__VA_ARGS__)
const int TARGET_IMAGE_FORMAT = AV_PIX_FMT_RGBA; //AV_PIX_FMT_RGB24;
const int TARGET_IMAGE_CODEC = AV_CODEC_ID_PNG;
double currentTimeMillis()
{
struct timeval tv;
gettimeofday(&tv, (struct timezone *) NULL);
return tv.tv_sec * 1000.0 + tv.tv_usec / 1000.0;
}
void convert_image(State *state, AVCodecContext *pCodecCtx, AVFrame *pFrame, AVPacket *avpkt, int *got_packet_ptr, int width, int height);
int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt);
int encode(AVCodecContext *avctx, AVPacket *pkt, int *got_packet, AVFrame *frame);
int is_supported_format(int codec_id, int pix_fmt) {
if ((codec_id == AV_CODEC_ID_PNG ||
codec_id == AV_CODEC_ID_MJPEG ||
codec_id == AV_CODEC_ID_BMP) &&
pix_fmt == AV_PIX_FMT_RGBA) {
return 1;
}
return 0;
}
int get_scaled_context(State *s, AVCodecContext *pCodecCtx, int width, int height) {
AVCodec *targetCodec = avcodec_find_encoder(TARGET_IMAGE_CODEC);
if (!targetCodec) {
printf("avcodec_find_decoder() failed to find encoder\n");
return FAILURE;
}
s->scaled_codecCtx = avcodec_alloc_context3(targetCodec);
if (!s->scaled_codecCtx) {
printf("avcodec_alloc_context3 failed\n");
return FAILURE;
}
s->scaled_codecCtx->bit_rate = s->video_st->codec->bit_rate;
s->scaled_codecCtx->width = width;
s->scaled_codecCtx->height = height;
s->scaled_codecCtx->pix_fmt = TARGET_IMAGE_FORMAT;
s->scaled_codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
s->scaled_codecCtx->time_base.num = s->video_st->codec->time_base.num;
s->scaled_codecCtx->time_base.den = s->video_st->codec->time_base.den;
if (!targetCodec || avcodec_open2(s->scaled_codecCtx, targetCodec, NULL) < 0) {
printf("avcodec_open2() failed\n");
return FAILURE;
}
s->scaled_sws_ctx = sws_getContext(s->video_st->codec->width,
s->video_st->codec->height,
s->video_st->codec->pix_fmt,
width,
height,
TARGET_IMAGE_FORMAT,
SWS_BILINEAR,
NULL,
NULL,
NULL);
return SUCCESS;
}
int stream_component_open(State *s, int stream_index) {
AVFormatContext *pFormatCtx = s->pFormatCtx;
AVCodecContext *codecCtx;
AVCodec *codec;
if (stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {
return FAILURE;
}
// Get a pointer to the codec context for the stream
codecCtx = pFormatCtx->streams[stream_index]->codec;
const AVCodecDescriptor *codesc = avcodec_descriptor_get(codecCtx->codec_id);
if (codesc) {
printf("avcodec_find_decoder %s\n", codesc->name);
}
// Find the decoder for the audio stream
codec = avcodec_find_decoder(codecCtx->codec_id);
if (codec == NULL) {
printf("avcodec_find_decoder() failed to find audio decoder\n");
return FAILURE;
}
// Open the codec
if (!codec || (avcodec_open2(codecCtx, codec, NULL) < 0)) {
printf("avcodec_open2() failed\n");
return FAILURE;
}
switch(codecCtx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
s->audio_stream = stream_index;
s->audio_st = pFormatCtx->streams[stream_index];
break;
case AVMEDIA_TYPE_VIDEO:
s->video_stream = stream_index;
s->video_st = pFormatCtx->streams[stream_index];
AVCodec *targetCodec = avcodec_find_encoder(TARGET_IMAGE_CODEC);
if (!targetCodec) {
printf("avcodec_find_decoder() failed to find encoder\n");
return FAILURE;
}
s->codecCtx = avcodec_alloc_context3(targetCodec);
if (!s->codecCtx) {
printf("avcodec_alloc_context3 failed\n");
return FAILURE;
}
s->codecCtx->bit_rate = s->video_st->codec->bit_rate;
s->codecCtx->width = s->video_st->codec->width;
s->codecCtx->height = s->video_st->codec->height;
s->codecCtx->pix_fmt = TARGET_IMAGE_FORMAT;
s->codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
s->codecCtx->time_base.num = s->video_st->codec->time_base.num;
s->codecCtx->time_base.den = s->video_st->codec->time_base.den;
if (!targetCodec || avcodec_open2(s->codecCtx, targetCodec, NULL) < 0) {
printf("avcodec_open2() failed\n");
return FAILURE;
}
s->sws_ctx = sws_getContext(s->video_st->codec->width,
s->video_st->codec->height,
s->video_st->codec->pix_fmt,
s->video_st->codec->width,
s->video_st->codec->height,
TARGET_IMAGE_FORMAT,
SWS_BILINEAR,
NULL,
NULL,
NULL);
break;
default:
break;
}
return SUCCESS;
}
int set_data_source_l(State **ps, const char* path) {
av_log_set_level(AV_LOG_INFO);
printf("set_data_source\n");
int audio_index = -1;
int video_index = -1;
int i;
State *state = *ps;
printf("Path: %s\n", path);
AVDictionary *options = NULL;
//av_dict_set(&options, "icy", "1", 0);
//av_dict_set(&options, "user-agent", "FFmpegMediaMetadataRetriever", 0);
av_dict_set(&options, "user_agent", "FFmpegMediaMetadataRetriever", 0);
// av_dict_set(&options, "rtsp_transport", "tcp", 0);
// av_dict_set(&options, "multiple_requests", "1", 0);
av_dict_set(&options, "timeout", "6000", 0);
if (state->headers) {
av_dict_set(&options, "headers", state->headers, 0);
}
if (state->offset > 0) {
state->pFormatCtx = avformat_alloc_context();
state->pFormatCtx->skip_initial_bytes = state->offset;
}
//added by tdz
AVInputFormat*aVInputFormat=av_find_input_format("ts");
double startTime= currentTimeMillis();
LOGV("avformat_open_input start time=%f",startTime);
if (avformat_open_input(&state->pFormatCtx, path, aVInputFormat, &options) != 0) {
printf("Metadata could not be retrieved\n");
*ps = NULL;
return FAILURE;
}
// annotated by tdz
/* startTime= currentTimeMillis();
LOGV("avformat_open_input end time=%f",startTime);
if (avformat_find_stream_info(state->pFormatCtx, NULL) < 0) {
printf("Metadata could not be retrieved\n");
avformat_close_input(&state->pFormatCtx);
*ps = NULL;
if (options != NULL) {
av_dict_free(&options);
}
return FstartTimeAILURE;
}
startTime= currentTimeMillis();
LOGV("avformat_find_stream_info end time=%f",startTime);*/
set_duration(state->pFormatCtx);
set_shoutcast_metadata(state->pFormatCtx);
//av_dump_format(state->pFormatCtx, 0, path, 0);
// Find the first audio and video stream
for (i = 0; i < state->pFormatCtx->nb_streams; i++) {
if (state->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) {
video_index = i;
}
if (state->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) {
audio_index = i;
}
set_codec(state->pFormatCtx, i);
}
if (audio_index >= 0) {
stream_component_open(state, audio_index);
}
if (video_index >= 0) {
stream_component_open(state, video_index);
}
/*if(state->video_stream < 0 || state->audio_stream < 0) {
avformat_close_input(&state->pFormatCtx);
*ps = NULL;
return FAILURE;
}*/
set_rotation(state->pFormatCtx, state->audio_st, state->video_st);
// set_framerate(state->pFormatCtx, state->audio_st, state->video_st);
//set_filesize(state->pFormatCtx);
//set_chapter_count(state->pFormatCtx);
//set_video_dimensions(state->pFormatCtx, state->video_st);
/*printf("Found metadata\n");
AVDictionaryEntry *tag = NULL;
while ((tag = av_dict_get(state->pFormatCtx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
printf("Key %s: \n", tag->key);
printf("Value %s: \n", tag->value);
}*/
*ps = state;
return SUCCESS;
}
void init(State **ps) {
State *state = *ps;
if (state && state->pFormatCtx) {
avformat_close_input(&state->pFormatCtx);
}
if (state && state->fd != -1) {
close(state->fd);
}
if (!state) {
state = av_mallocz(sizeof(State));
}
state->pFormatCtx = NULL;
state->audio_stream = -1;
state->video_stream = -1;
state->audio_st = NULL;
state->video_st = NULL;
state->fd = -1;
state->offset = 0;
state->headers = NULL;
*ps = state;
}
int set_data_source_uri(State **ps, const char* path, const char* headers) {
State *state = *ps;
ANativeWindow *native_window = NULL;
if (state && state->native_window) {
native_window = state->native_window;
}
init(&state);
state->native_window = native_window;
state->headers = headers;
*ps = state;
return set_data_source_l(ps, path);
}
int set_data_source_fd(State **ps, int fd, int64_t offset, int64_t length) {
char path[256] = "";
State *state = *ps;
ANativeWindow *native_window = NULL;
if (state && state->native_window) {
native_window = state->native_window;
}
init(&state);
state->native_window = native_window;
int myfd = dup(fd);
char str[20];
sprintf(str, "pipe:%d", myfd);
strcat(path, str);
state->fd = myfd;
state->offset = offset;
*ps = state;
return set_data_source_l(ps, path);
}
const char* extract_metadata(State **ps, const char* key) {
printf("extract_metadata\n");
char* value = NULL;
State *state = *ps;
if (!state || !state->pFormatCtx) {
return value;
}
return extract_metadata_internal(state->pFormatCtx, state->audio_st, state->video_st, key);
}
const char* extract_metadata_from_chapter(State **ps, const char* key, int chapter) {
printf("extract_metadata_from_chapter\n");
char* value = NULL;
State *state = *ps;
if (!state || !state->pFormatCtx || state->pFormatCtx->nb_chapters <= 0) {
return value;
}
if (chapter < 0 || chapter >= state->pFormatCtx->nb_chapters) {
return value;
}
return extract_metadata_from_chapter_internal(state->pFormatCtx, state->audio_st, state->video_st, key, chapter);
}
int get_metadata(State **ps, AVDictionary **metadata) {
printf("get_metadata\n");
State *state = *ps;
if (!state || !state->pFormatCtx) {
return FAILURE;
}
get_metadata_internal(state->pFormatCtx, metadata);
return SUCCESS;
}
int get_embedded_picture(State **ps, AVPacket *pkt) {
printf("get_embedded_picture\n");
int i = 0;
int got_packet = 0;
AVFrame *frame = NULL;
State *state = *ps;
if (!state || !state->pFormatCtx) {
return FAILURE;
}
// TODO commented out 5/31/16, do we actully need this since the context
// has been initialized
// read the format headers
/*if (state->pFormatCtx->iformat->read_header(state->pFormatCtx) < 0) {
printf("Could not read the format header\n");
return FAILURE;
}*/
// find the first attached picture, if available
for (i = 0; i < state->pFormatCtx->nb_streams; i++) {
if (state->pFormatCtx->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC) {
printf("Found album art\n");
if (pkt) {
av_packet_unref(pkt);
av_init_packet(pkt);
}
av_copy_packet(pkt, &state->pFormatCtx->streams[i]->attached_pic);
// TODO is this right
got_packet = 1;
// Is this a packet from the video stream?
if (pkt->stream_index == state->video_stream) {
int codec_id = state->video_st->codec->codec_id;
int pix_fmt = state->video_st->codec->pix_fmt;
// If the image isn't already in a supported format convert it to one
if (!is_supported_format(codec_id, pix_fmt)) {
int got_frame = 0;
frame = av_frame_alloc();
if (!frame) {
break;
}
// if (avcodec_decode_video2(state->video_st->codec, frame, &got_frame, pkt) <= 0) {
// break;
// }
if (decode(state->video_st->codec, frame, &got_frame, pkt) < 0) {
got_frame = 0;
break;
}
// Did we get a video frame?
if (got_frame) {
AVPacket convertedPkt;
av_init_packet(&convertedPkt);
convertedPkt.size = 0;
convertedPkt.data = NULL;
convert_image(state, state->video_st->codec, frame, &convertedPkt, &got_packet, -1, -1);
av_packet_unref(pkt);
av_init_packet(pkt);
av_copy_packet(pkt, &convertedPkt);
av_packet_unref(&convertedPkt);
break;
}
} else {
av_packet_unref(pkt);
av_init_packet(pkt);
av_copy_packet(pkt, &state->pFormatCtx->streams[i]->attached_pic);
got_packet = 1;
break;
}
}
}
}
av_frame_free(&frame);
if (got_packet) {
return SUCCESS;
} else {
return FAILURE;
}
}
int encode(AVCodecContext *avctx, AVPacket *pkt, int *got_packet, AVFrame *frame) {
int ret;
*got_packet = 0;
ret = avcodec_send_frame(avctx, frame);
if (ret < 0)
return ret;
ret = avcodec_receive_packet(avctx, pkt);
if (!ret)
*got_packet = 1;
if (ret == AVERROR(EAGAIN))
return 0;
return ret;
}
void convert_image(State *state, AVCodecContext *pCodecCtx, AVFrame *pFrame, AVPacket *avpkt, int *got_packet_ptr, int width, int height) {
AVCodecContext *codecCtx;
struct SwsContext *scalerCtx;
AVFrame *frame;
*got_packet_ptr = 0;
if (width != -1 && height != -1) {
if (state->scaled_codecCtx == NULL ||
state->scaled_sws_ctx == NULL) {
get_scaled_context(state, pCodecCtx, width, height);
}
codecCtx = state->scaled_codecCtx;
scalerCtx = state->scaled_sws_ctx;
} else {
codecCtx = state->codecCtx;
scalerCtx = state->sws_ctx;
}
if (width == -1) {
width = pCodecCtx->width;
}
if (height == -1) {
height = pCodecCtx->height;
}
frame = av_frame_alloc();
// Determine required buffer size and allocate buffer
int numBytes = avpicture_get_size(TARGET_IMAGE_FORMAT, codecCtx->width, codecCtx->height);
void * buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
// set the frame parameters
frame->format = TARGET_IMAGE_FORMAT;
frame->width = codecCtx->width;
frame->height = codecCtx->height;
avpicture_fill(((AVPicture *)frame),
buffer,
TARGET_IMAGE_FORMAT,
codecCtx->width,
codecCtx->height);
if (scalerCtx != NULL) {
sws_scale(scalerCtx,
(const uint8_t *const *) pFrame->data,
pFrame->linesize,
0,
pFrame->height,
frame->data,
frame->linesize);
}
//int ret = avcodec_encode_video2(codecCtx, avpkt, frame, got_packet_ptr);
//modified by tdz
int ret = -1;
if (codecCtx != NULL) {
ret = encode(codecCtx, avpkt, got_packet_ptr, frame);
}
if (ret >= 0 && state->native_window) {
ANativeWindow_setBuffersGeometry(state->native_window, width, height, WINDOW_FORMAT_RGBA_8888);
ANativeWindow_Buffer windowBuffer;
if (ANativeWindow_lock(state->native_window, &windowBuffer, NULL) == 0) {
//__android_log_print(ANDROID_LOG_VERBOSE, "LOG_TAG", "width %d", windowBuffer.width);
//__android_log_print(ANDROID_LOG_VERBOSE, "LOG_TAG", "height %d", windowBuffer.height);
int h = 0;
for (h = 0; h < height; h++) {
memcpy(windowBuffer.bits + h * windowBuffer.stride * 4,
buffer + h * frame->linesize[0],
width*4);
}
ANativeWindow_unlockAndPost(state->native_window);
}
}
if (ret < 0) {
*got_packet_ptr = 0;
}
av_frame_free(&frame);
if (buffer) {
free(buffer);
}
if (ret < 0 || !*got_packet_ptr) {
av_packet_unref(avpkt);
}
}
int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt) {
int ret;
*got_frame = 0;
if (pkt) {
ret = avcodec_send_packet(avctx, pkt);
// In particular, we don't expect AVERROR(EAGAIN), because we read all
// decoded frames with avcodec_receive_frame() until done.
if (ret < 0)
return ret == AVERROR_EOF ? 0 : ret;
}
ret = avcodec_receive_frame(avctx, frame);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
return ret;
if (ret >= 0)
*got_frame = 1;
return 0;
}
void decode_frame(State *state, AVPacket *pkt, int *got_frame, int64_t desired_frame_number, int width, int height) {
// Allocate video frame
AVFrame *frame = av_frame_alloc();
*got_frame = 0;
if (!frame) {
return;
}
// Read frames and return the first one found
while (av_read_frame(state->pFormatCtx, pkt) >= 0) {
// Is this a packet from the video stream?
if (pkt->stream_index == state->video_stream) {
int codec_id = state->video_st->codec->codec_id;
int pix_fmt = state->video_st->codec->pix_fmt;
// If the image isn't already in a supported format convert it to one
if (!is_supported_format(codec_id, pix_fmt)) {
*got_frame = 0;
// Decode video frame
// if (avcodec_decode_video2(state->video_st->codec, frame, got_frame, pkt) <= 0) {
// *got_frame = 0;
// break;
// }
if (decode(state->video_st->codec, frame, got_frame, pkt) < 0) {
*got_frame = 0;
continue;
}
// Did we get a video frame?
if (*got_frame) {
if (desired_frame_number == -1 ||
(desired_frame_number != -1 && frame->pkt_pts >= desired_frame_number)) {
if (pkt->data) {
av_packet_unref(pkt);
}
av_init_packet(pkt);
convert_image(state, state->video_st->codec, frame, pkt, got_frame, width, height);
break;
}
}
} else {
*got_frame = 1;
break;
}
}
}
// Free the frame
av_frame_free(&frame);
}
int get_frame_at_time(State **ps, int64_t timeUs, int option, AVPacket *pkt) {
return get_scaled_frame_at_time(ps, timeUs, option, pkt, -1, -1);
}
int get_scaled_frame_at_time(State **ps, int64_t timeUs, int option, AVPacket *pkt, int width, int height) {
printf("get_frame_at_time\n");
int got_packet = 0;
int64_t desired_frame_number = -1;
State *state = *ps;
Options opt = option;
if (!state || !state->pFormatCtx || state->video_stream < 0) {
return FAILURE;
}
if (timeUs > -1) {
int stream_index = state->video_stream;
int64_t seek_time = av_rescale_q(timeUs, AV_TIME_BASE_Q, state->pFormatCtx->streams[stream_index]->time_base);
int64_t seek_stream_duration = state->pFormatCtx->streams[stream_index]->duration;
int flags = 0;
int ret = -1;
// For some reason the seek_stream_duration is sometimes a negative value,
// make sure to check that it is greater than 0 before adjusting the
// seek_time
if (seek_stream_duration > 0 && seek_time > seek_stream_duration) {
seek_time = seek_stream_duration;
}
if (seek_time < 0) {
return FAILURE;
}
if (opt == OPTION_CLOSEST) {
desired_frame_number = seek_time;
flags = AVSEEK_FLAG_BACKWARD;
} else if (opt == OPTION_CLOSEST_SYNC) {
flags = 0;
} else if (opt == OPTION_NEXT_SYNC) {
flags = 0;
} else if (opt == OPTION_PREVIOUS_SYNC) {
flags = AVSEEK_FLAG_BACKWARD;
}
ret = av_seek_frame(state->pFormatCtx, stream_index, seek_time, flags);
if (ret < 0) {
return FAILURE;
} else {
if (state->audio_stream >= 0) {
avcodec_flush_buffers(state->audio_st->codec);
}
if (state->video_stream >= 0) {
avcodec_flush_buffers(state->video_st->codec);
}
}
}
decode_frame(state, pkt, &got_packet, desired_frame_number, width, height);
if (got_packet) {
//const char *filename = "/Users/wseemann/Desktop/one.png";
//FILE *picture = fopen(filename, "wb");
//fwrite(pkt->data, pkt->size, 1, picture);
//fclose(picture);
}
if (got_packet) {
return SUCCESS;
} else {
return FAILURE;
}
}
int set_native_window(State **ps, ANativeWindow* native_window) {
printf("set_native_window\n");
State *state = *ps;
if (native_window == NULL) {
return FAILURE;
}
if (!state) {
init(&state);
}
state->native_window = native_window;
*ps = state;
return SUCCESS;
}
void release(State **ps) {
printf("release\n");
State *state = *ps;
if (state) {
if (state->audio_st && state->audio_st->codec) {
avcodec_close(state->audio_st->codec);
}
if (state->video_st && state->video_st->codec) {
avcodec_close(state->video_st->codec);
}
if (state->pFormatCtx) {
avformat_close_input(&state->pFormatCtx);
}
if (state->fd != -1) {
close(state->fd);
}
if (state->sws_ctx) {
sws_freeContext(state->sws_ctx);
state->sws_ctx = NULL;
}
if (state->codecCtx) {
avcodec_close(state->codecCtx);
av_free(state->codecCtx);
}
if (state->sws_ctx) {
sws_freeContext(state->sws_ctx);
}
if (state->scaled_codecCtx) {
avcodec_close(state->scaled_codecCtx);
av_free(state->scaled_codecCtx);
}
if (state->scaled_sws_ctx) {
sws_freeContext(state->scaled_sws_ctx);
}
// make sure we don't leak native windows
if (state->native_window != NULL) {
ANativeWindow_release(state->native_window);
state->native_window = NULL;
}
av_freep(&state);
ps = NULL;
}
}
2.其他mk文件配置
…/jni/根目录下的Android.mk修改API 12改成14
Application.mk改APP_ABI
…/jni/ffmpeg/根目录下的Android.mk修改如下
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := libavcodec
LOCAL_SRC_FILES := ffmpeg/$(TARGET_ARCH_ABI)/lib/$(LOCAL_MODULE)-58.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/ffmpeg/$(TARGET_ARCH_ABI)/include
include $(PREBUILT_SHARED_LIBRARY)
include $(CLEAR_VARS)
LOCAL_MODULE := libavformat
LOCAL_SRC_FILES := ffmpeg/$(TARGET_ARCH_ABI)/lib/$(LOCAL_MODULE)-58.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/ffmpeg/$(TARGET_ARCH_ABI)/include
include $(PREBUILT_SHARED_LIBRARY)
include $(CLEAR_VARS)
LOCAL_MODULE := libavutil
LOCAL_SRC_FILES := ffmpeg/$(TARGET_ARCH_ABI)/lib/$(LOCAL_MODULE)-56.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/ffmpeg/$(TARGET_ARCH_ABI)/include
include $(PREBUILT_SHARED_LIBRARY)
include $(CLEAR_VARS)
LOCAL_MODULE := libswscale
LOCAL_SRC_FILES := ffmpeg/$(TARGET_ARCH_ABI)/lib/$(LOCAL_MODULE)-5.so
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/ffmpeg/$(TARGET_ARCH_ABI)/include
include $(PREBUILT_SHARED_LIBRARY)
LOCAL_PATH:= $(call my-dir)
然后用android-ndk-r14b/ndk-build 来编,因为其他的(17c等等)编报错所以我用了14b。