以下内容基于ffmpeg2.6.9:
几个关键的结构体:
- AVFormatContext 包含码流信息结构体
- AVCodecContext 解码器上下文
- AVCodec 解码器
- SwsContext 像素信息转换上下文
- AVPacket 包含一帧画面的信息
- AVFrame 解析出一帧的画面
流程概括:
- av_register_all()
- avformat_open_input
- avformat_find_stream_info
- 获取视频流
- 根据视频流获得解码器上下文AVCodecContext
- 根据上下文AVCodecContext获得解码器
- avcodec_open2()初始化解码器上下文
- 读取一帧的画面>=0
- 得到packet并且开始解码:avcodec_decode_video2
- 使用sws_scale()根据解码得到的frame转换像素信息
编码:
#include <jni.h>
#include <string>
#include <android/log.h>
extern "C" {
//编码
#include "libavcodec/avcodec.h"
//封装格式处理
#include "libavformat/avformat.h"
//像素处理
#include "libswscale/swscale.h"
}
#define LOGE(FORMAT, ...) __android_log_print(ANDROID_LOG_ERROR,"Guo", FORMAT, ##__VA_ARGS__)
#define LOGI(FORMAT, ...) __android_log_print(ANDROID_LOG_INFO, "Guo", FORMAT, ##__VA_ARGS__)
extern "C"
JNIEXPORT void JNICALL
Java_com_hyhl_learning_ffmpegdemo_MainActivity_open(
JNIEnv *env,
jobject obj, jstring inputStr, jobject surface, jstring outputStr/* this */) {
const char *input = env->GetStringUTFChars(inputStr, JNI_FALSE);
const char *output = env->GetStringUTFChars(outputStr, JNI_FALSE);
//注册各大组件
av_register_all();
AVFormatContext * avFormatContext = avformat_alloc_context();
if (avformat_open_input(&avFormatContext, input, NULL, NULL) < 0) {
LOGE("打开文件失败");
return;
}
if (avformat_find_stream_info(avFormatContext, NULL) < 0) {
LOGE("获取信息失败");
return;
}
int videos_index = -1;
for (int i = 0; i < avFormatContext->nb_streams; ++i) {
if (avFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videos_index = i;
}
}
AVCodecContext *avCodecContext = avFormatContext->streams[videos_index]->codec;
AVCodec *pCodec = avcodec_find_decoder(avCodecContext->codec_id);
if (avcodec_open2(avCodecContext, pCodec, NULL) < 0) {
LOGE("解码失败");
return;
}
SwsContext* swsContext = sws_getContext(avCodecContext->width, avCodecContext->height,avCodecContext->pix_fmt,
avCodecContext->width, avCodecContext->height,AV_PIX_FMT_YUV420P,
SWS_BILINEAR,NULL,NULL,NULL);
AVPacket * packet = (AVPacket*)av_malloc(sizeof(AVPacket));
av_init_packet(packet);
AVFrame* frame = av_frame_alloc();
AVFrame* yuvFrame = av_frame_alloc();
const uint8_t * yuvsize = (const uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, avCodecContext->width, avCodecContext->height));
avpicture_fill((AVPicture *)yuvFrame, yuvsize, AV_PIX_FMT_YUV420P, avCodecContext->width, avCodecContext->height);
int got_picture;
FILE * yuvFile = fopen(output, "wb");
while (av_read_frame(avFormatContext,packet) >= 0) {
avcodec_decode_video2(avCodecContext, frame, &got_picture, packet);
if (got_picture > 0) {
sws_scale(swsContext,
(const uint8_t* const*)frame->data,
frame->linesize, 0, frame->height,
yuvFrame->data, yuvFrame->linesize);
int y_size = avCodecContext->width * avCodecContext->height;
fwrite(yuvFrame->data[0], 1, y_size, yuvFile);
fwrite(yuvFrame->data[1], 1, y_size/4, yuvFile);
fwrite(yuvFrame->data[2], 1, y_size/4, yuvFile);
}
av_free_packet(packet);
}
/**
* 释放资源
*/
fclose(yuvFile);
av_frame_free(&yuvFrame);
av_frame_free(&frame);
sws_freeContext(swsContext);
avcodec_close(avCodecContext);
avformat_close_input(&avFormatContext);
env->ReleaseStringUTFChars(inputStr, input);
env->ReleaseStringUTFChars(outputStr, output);
}