编译ffmpeg
请参考: ffmpeg源码编译
使用 ffmpeg.so
如图: 编译成功后会在ffmpeg源码工程目录下的android->arm下生成inlcude(ffmpeg的头文件)和libffmpeg.so。将他俩拷贝到工程的jni目录下。
再写解码的c代码实现decode.c以及交叉编译用的脚本文件Android.mk.和平台配置文件Application.mk。jni目录文件如下(点此下载):
decode.c文件如下(来源于已故专家雷霄骅博士的博客),本人在运行到avformat_open_input函数时出现打不开视频文件的错误,于是添加了打印具体错误日志信息的方法,有利排错,详见avformat_open_input打开视频文件错误的解决:
/**
* 雷霄骅 Lei Xiaohua
* leixiaohua1020@126.com
* 中国传媒大学/数字电视技术
* 本程序是安卓平台下最简单的基于FFmpeg的视频解码器。它可以将输入的视频数据解码成YUV像素数据。
*/
#include <stdio.h>
#include <time.h>
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/log.h"
#ifdef ANDROID
#include <jni.h>
#include <android/log.h>
#define LOGE(format, ...) __android_log_print(ANDROID_LOG_ERROR, "(>_<)", format, ##__VA_ARGS__)
#define LOGI(format, ...) __android_log_print(ANDROID_LOG_INFO, "(^_^)", format, ##__VA_ARGS__)
#else
#define LOGE(format, ...) printf("(>_<) " format "\n", ##__VA_ARGS__)
#define LOGI(format, ...) printf("(^_^) " format "\n", ##__VA_ARGS__)
#endif
//Output FFmpeg's av_log()
void custom_log(void *ptr, int level, const char* fmt, va_list vl) {
FILE *fp = fopen("/storage/emulated/0/av_log.txt", "a+");
if (fp) {
vfprintf(fp, fmt, vl);
fflush(fp);
fclose(fp);
}
}
JNIEXPORT jint JNICALL Java_com_test_jt_MainActivity_decode(JNIEnv *env,
jobject obj, jstring input_jstr, jstring output_jstr) {
AVFormatContext *pFormatCtx;
int i, videoindex;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame, *pFrameYUV;
uint8_t *out_buffer;
AVPacket *packet;
int y_size;
int ret, got_picture;
struct SwsContext *img_convert_ctx;
FILE *fp_yuv;
int frame_cnt;
clock_t time_start, time_finish;
double time_duration = 0.0;
char input_str[500] = { 0 };
char output_str[500] = { 0 };
char info[1000] = { 0 };
sprintf(input_str, "%s", (*env)->GetStringUTFChars(env, input_jstr, NULL));
sprintf(output_str, "%s",
(*env)->GetStringUTFChars(env, output_jstr, NULL));
//FFmpeg av_log() callback
av_log_set_callback(custom_log);
av_register_all();
avformat_network_init();
pFormatCtx = avformat_alloc_context();
int err_code;
char buf[1024];
if ((err_code = avformat_open_input(&pFormatCtx, input_str, NULL, NULL))
!= 0) {
av_strerror(err_code, buf, 1024);
LOGE("Couldn't open file %s: %d(%s)", input_str, err_code, buf);
return -1;
}
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
LOGE("Couldn't find stream information.\n");
return -1;
}
videoindex = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoindex = i;
break;
}
if (videoindex == -1) {
LOGE("Couldn't find a video stream.\n");
return -1;
}
pCodecCtx = pFormatCtx->streams[videoindex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
LOGE("Couldn't find Codec.\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
LOGE("Couldn't open codec.\n");
return -1;
}
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
out_buffer = (unsigned char *) av_malloc(
av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width,
pCodecCtx->height, 1));
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer,
AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
packet = (AVPacket *) av_malloc(sizeof(AVPacket));
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
sprintf(info, "[Input ]%s\n", input_str);
sprintf(info, "%s[Output ]%s\n", info, output_str);
sprintf(info, "%s[Format ]%s\n", info, pFormatCtx->iformat->name);
sprintf(info, "%s[Codec ]%s\n", info, pCodecCtx->codec->name);
sprintf(info, "%s[Resolution]%dx%d\n", info, pCodecCtx->width,
pCodecCtx->height);
fp_yuv = fopen(output_str, "wb+");
if (fp_yuv == NULL) {
printf("Cannot open output file.\n");
return -1;
}
frame_cnt = 0;
time_start = clock();
while (av_read_frame(pFormatCtx, packet) >= 0) {
if (packet->stream_index == videoindex) {
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,
packet);
if (ret < 0) {
LOGE("Decode Error.\n");
return -1;
}
if (got_picture) {
sws_scale(img_convert_ctx,
(const uint8_t* const *) pFrame->data, pFrame->linesize,
0, pCodecCtx->height, pFrameYUV->data,
pFrameYUV->linesize);
y_size = pCodecCtx->width * pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
//Output info
char pictype_str[10] = { 0 };
switch (pFrame->pict_type) {
case AV_PICTURE_TYPE_I:
sprintf(pictype_str, "I");
break;
case AV_PICTURE_TYPE_P:
sprintf(pictype_str, "P");
break;
case AV_PICTURE_TYPE_B:
sprintf(pictype_str, "B");
break;
default:
sprintf(pictype_str, "Other");
break;
}
LOGI("Frame Index: %5d. Type:%s", frame_cnt, pictype_str);
frame_cnt++;
}
}
av_free_packet(packet);
}
//flush decoder
//FIX: Flush Frames remained in Codec
while (1) {
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0)
break;
if (!got_picture)
break;
sws_scale(img_convert_ctx, (const uint8_t* const *) pFrame->data,
pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data,
pFrameYUV->linesize);
int y_size = pCodecCtx->width * pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
//Output info
char pictype_str[10] = { 0 };
switch (pFrame->pict_type) {
case AV_PICTURE_TYPE_I:
sprintf(pictype_str, "I");
break;
case AV_PICTURE_TYPE_P:
sprintf(pictype_str, "P");
break;
case AV_PICTURE_TYPE_B:
sprintf(pictype_str, "B");
break;
default:
sprintf(pictype_str, "Other");
break;
}
LOGI("Frame Index: %5d. Type:%s", frame_cnt, pictype_str);
frame_cnt++;
}
time_finish = clock();
time_duration = (double) (time_finish - time_start);
sprintf(info, "%s[Time ]%fms\n", info, time_duration);
sprintf(info, "%s[Count ]%d\n", info, frame_cnt);
sws_freeContext(img_convert_ctx);
fclose(fp_yuv);
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}
Android.mk文件内容如下:
LOCAL_PATH := $(call my-dir)
# FFmpeg library
include $(CLEAR_VARS)
LOCAL_MODULE := ffmpeg
LOCAL_SRC_FILES := libffmpeg.so
include $(PREBUILT_SHARED_LIBRARY)
# Program
include $(CLEAR_VARS)
LOCAL_MODULE := decode
LOCAL_SRC_FILES :=decode.c
LOCAL_C_INCLUDES += $(LOCAL_PATH)/include
LOCAL_LDLIBS := -llog -lz
LOCAL_SHARED_LIBRARIES := ffmpeg
include $(BUILD_SHARED_LIBRARY)
- 编译安卓下使用的lib.so
配置ndk的主目录到path环境变量,然后在终端切换到jni目录下执行ndk-build.cmd。然后在jni同级目录找到libs文件夹,里面有对应平台的.so库文件(libdecode.so和libffmpeg.so)。注意这里生成的libffmpeg.so会比源码编译出来的小十几%,应该是android studio对它进行了压缩。
本来这步和下一步是采用Android studio集成的NDK开发环境一起的,但由于集成NDK的开发环境经常有各种各样的问题,于是我分开来做。建议大家多试试集成NDK去弄,长远来讲可提高开发效率。 - 使用.so库进行解码。
创建android studio安卓工程,将上一步生成的各平台的so文件放到与java同级的jniLibs目录(自己创建)。
MainActivity文件中调用解码的代码如下。
public class MainActivity extends AppCompatActivity {
static {
System.loadLibrary("ffmpeg");
System.loadLibrary("decode");
}
public native int decode(String inFile, String outFile);
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
Button btGetString = (Button) findViewById(R.id.bt_getString);
btGetString.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
String folderurl= Environment.getExternalStorageDirectory().getPath();
String in =folderurl+"/"+ "joy/pingfanzhilu.mkv";
String out =folderurl+"/"+ "joy/pingfanzhilu.yuv" ;
System.out.println("in:" + in + "out" + out);
}
});
}
}
请修改需解码的视频源文件和输出yuv文件的路径。如果只需解码一种或几种文件格式,在编译ffmpeg源码时可修改配置脚本裁剪不需要的解码器和解封装器使得libffmpeg.so文件更小。解码后得到的yuv文件可以使用YUV player播放点击下载,设置成原始文件的宽高播放,否则花屏。