源码记录:
const char * path = env->GetStringUTFChars(url, 0);
//封装格式上下文,统领全局的结构体,保存了视频文件封装格式的相关信息
AVFormatContext *pFormatCtx = avformat_alloc_context();
av_register_all();//注册编解码器
LOGI("%s",path);
int result = avformat_open_input(&pFormatCtx,path,NULL,NULL);//打开视频文件
if (result!=0){
LOGI("文件打开失败");
return -1;
}
//获取视频文件信息
result=avformat_find_stream_info(pFormatCtx,NULL);
if (result<0){
LOGI("获取文件信息失败");
return -1;
}
int videoStreams=-1;
//遍历所有类型的流(音频流、视频流、字幕流),找到视频流
for(int i =0;i<pFormatCtx->nb_streams;i++){
if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
LOGI("视频流");
videoStreams=i;
} else if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
LOGI("音频流");
}
}
if (videoStreams==-1){
LOGI("找不到视频流");
return -1;
}
//获取视频流中的编解码上下文
AVCodecContext * codecContext= pFormatCtx->streams[videoStreams]->codec;
//根据编解码上下文中的编码id查找对应的解码器
AVCodec *avCodec= avcodec_find_decoder(codecContext->codec_id);
if (avCodec==NULL){
LOGI("找不到解码器");
return -1;
}
result=avcodec_open2(codecContext,avCodec,NULL);
if (result<0){
LOGI("打开解码器失败");
return -1;
}
//视频信息
LOGI("视频格式:%s",pFormatCtx->iformat->name);
LOGI("视频时长:%d", (pFormatCtx->duration)/1000000);
LOGI("视频的宽高:%d,%d",codecContext->width,codecContext->height);
LOGI("解码器的名称:%s",avCodec->name);
return 0;
if (surface==NULL){
LOGI("surface==null");
return -1;
}
// 获取native window
ANativeWindow *nativeWindow = ANativeWindow_fromSurface(env, surface);
if (nativeWindow==NULL){
LOGI("nativeWindow==null");
return -1;
}
// 获取视频宽高
int videoWidth = codecContext->width;
int videoHeight = codecContext->height;
// 设置native window的buffer大小,可自动拉伸
ANativeWindow_setBuffersGeometry(nativeWindow, videoWidth, videoHeight,
WINDOW_FORMAT_RGBA_8888);
ANativeWindow_Buffer windowBuffer;
//准备读取
//AVPacket用于存储一帧一帧的压缩数据(H264)
//缓冲区,开辟空间
AVPacket *packet = (AVPacket*)av_malloc(sizeof(AVPacket));
//AVFrame用于存储解码后的像素数据(YUV)
//内存分配
AVFrame *pFrame = av_frame_alloc();
//YUV420
AVFrame *pFrameRGB = av_frame_alloc();
if (pFrame == NULL || pFrameRGB == NULL) {
LOGE("Could not allocate video frame.");
return -1;
}
int numBytes = av_image_get_buffer_size(AV_PIX_FMT_RGBA, codecContext->width, codecContext->height,
1);
uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
//初始化缓冲区
av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, buffer, AV_PIX_FMT_RGBA,
codecContext->width, codecContext->height, 1);
//用于转码(缩放)的参数,转之前的宽高,转之后的宽高,格式等
struct SwsContext *sws_ctx = sws_getContext(codecContext->width,
codecContext->height,
codecContext->pix_fmt,
codecContext->width,
codecContext->height, AV_PIX_FMT_RGBA,
SWS_BICUBIC, NULL, NULL, NULL);
int res;
int got_picture;
//一帧一帧的读取压缩数据
while (av_read_frame(pFormatCtx,packet)>=0){
//只要视频压缩数据(根据流的索引位置判断)
if (packet->stream_index==videoStreams){
//解码一帧视频压缩数据,得到视频像素数据
res = avcodec_decode_video2(codecContext, pFrame, &got_picture, packet);
if (res<0){
LOGI("解码错误");
}
if (got_picture){
LOGI("解码成功");
// lock native window buffer
ANativeWindow_lock(nativeWindow, &windowBuffer, 0);
sws_scale(sws_ctx,(uint8_t const *const *)pFrame->data, pFrame->linesize, 0, codecContext->height,
pFrameRGB->data, pFrameRGB->linesize);
// 获取stride
uint8_t *dst = (uint8_t *) windowBuffer.bits;
int dstStride = windowBuffer.stride * 4;
uint8_t *src = (uint8_t * )(pFrameRGB->data[0]);
int srcStride = pFrameRGB->linesize[0];
// 由于window的stride和帧的stride不同,因此需要逐行复制
int h;
for (h = 0; h < videoHeight; h++) {
memcpy(dst + h * dstStride, src + h * srcStride, srcStride);
}
ANativeWindow_unlockAndPost(nativeWindow);
//env->SetByteArrayRegion(data, 0, srcStride, (jbyte*) buffer);
// env->CallVoidMethod(jobject1,jid2,data,srcStride);
}
}
av_packet_unref(packet);
}
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codecs
avcodec_close(codecContext);
// Close the video file
avformat_close_input(&pFormatCtx);
return 0;