ijkplayer iOS 音视频解码
//创建音频解码线程
if ((ret = decoder_start(&is->auddec, audio_thread, ffp, "ff_audio_dec")) < 0)
//创建视频解码线程
if ((ret = decoder_start(&is->viddec, video_thread, ffp, "ff_video_dec")) < 0)
不管视频解码还是音频解码,其基本流程都是从解码前的数据缓冲区中取出一帧数据进行解码,完成后放入相应的解码后的数据缓冲区,如下图所示:
接收数据 —————> 解码前的数据 -> 解码器 -> 解码后的数据 —————> 渲染(播放)
创建存放video/audio解码前数据的videoq/audioq
创建存放video/audio解码后数据的pictq/sampq
/* start video display */
if (frame_queue_init(&is->pictq, &is->videoq, ffp->pictq_size, 1) < 0)
goto fail;
if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
goto fail;
if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
goto fail;
FrameQueue video/audio解码后数据的pictq/sampq
PacketQueue *pktq video/audio解码前数据的videoq/audioq
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
//注意以上解码前的数据类型和解码后的数据类型均在VideoState结构体中包含
typedef struct VideoState {
……
//解码后的数据队列
FrameQueue pictq;
FrameQueue subpq;
FrameQueue sampq;
……
//解码器
Decoder auddec;
Decoder viddec;
Decoder subdec;
……
int audio_stream;
……
//解码前的数据队列
PacketQueue audioq;
PacketQueue subtitleq;
PacketQueue videoq;
……
PacketQueue *buffer_indicator_queue;
……
} VideoState;
static IJKFF_Pipenode *func_open_video_decoder(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
IJKFF_Pipenode* node = NULL;
IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;
if (ffp->videotoolbox) {
//这里如果是采用硬解码 使用videotoolbox来硬解码
node = ffpipenode_create_video_decoder_from_ios_videotoolbox(ffp);
if (!node)
ALOGE("vtb fail!!! switch to ffmpeg decode!!!! \n");
}
if (node == NULL) {
//如果没有设置硬解码 采用FFmpeg来软解
node = ffpipenode_create_video_decoder_from_ffplay(ffp);
ffp->stat.vdec_type = FFP_PROPV_DECODER_AVCODEC;
opaque->is_videotoolbox_open = false;
} else {
ffp->stat.vdec_type = FFP_PROPV_DECODER_VIDEOTOOLBOX;
opaque->is_videotoolbox_open = true;
}
ffp_notify_msg2(ffp, FFP_MSG_VIDEO_DECODER_OPEN, opaque->is_videotoolbox_open);
return node;
}
//创建Video软解码
ffpipenode_create_video_decoder_from_ffplay
//创建Video硬解码
IJKFF_Pipenode *ffpipenode_create_video_decoder_from_ios_videotoolbox(FFPlayer *ffp)
//软解码
IJKFF_Pipenode *ffpipenode_create_video_decoder_from_ffplay(FFPlayer *ffp)
{
IJKFF_Pipenode *node = ffpipenode_alloc(sizeof(IJKFF_Pipenode_Opaque));
if (!node)
return node;
IJKFF_Pipenode_Opaque *opaque = node->opaque;
opaque->ffp = ffp;
node->func_destroy = func_destroy;
node->func_run_sync = func_run_sync;
ffp_set_video_codec_info(ffp, AVCODEC_MODULE_NAME, avcodec_get_name(ffp->is->viddec.avctx->codec_id));
ffp->stat.vdec_type = FFP_PROPV_DECODER_AVCODEC;
return node;
}
//硬解码
IJKFF_Pipenode *ffpipenode_create_video_decoder_from_ios_videotoolbox(FFPlayer *ffp)
{
if (!ffp || !ffp->is)
return NULL;
if ([[[UIDevice currentDevice] systemVersion] floatValue] < 8.0){
return NULL;
}
IJKFF_Pipenode *node = ffpipenode_alloc(sizeof(IJKFF_Pipenode_Opaque));
if (!node)
return node;
memset(node, sizeof(IJKFF_Pipenode), 0);
VideoState *is = ffp->is;
IJKFF_Pipenode_Opaque *opaque = node->opaque;
node->func_destroy = func_destroy;
node->func_run_sync = func_run_sync;
opaque->ffp = ffp;
opaque->decoder = &is->viddec;
opaque->avctx = opaque->decoder->avctx;
switch (opaque->avctx->codec_id) {
case AV_CODEC_ID_H264:
if (ffp->vtb_async)
opaque->context = Ijk_VideoToolbox_Async_Create(ffp, opaque->avctx);
else
opaque->context = Ijk_VideoToolbox_Sync_Create(ffp, opaque->avctx);
break;
default:
ALOGI("Videotoolbox-pipeline:open_video_decoder: not H264\n");
goto fail;
}
if (opaque->context == NULL) {
ALOGE("could not init video tool box decoder !!!");
goto fail;
}
return node;
fail:
ffpipenode_free_p(&node);
return NULL;
}
"AVPacket"是一个结构体,里面装的是h.264
"AVFream"里面装的是yuv数据
AVPacket —> Decode —> AVFream
视频解码线程
视频解码线程 video_thread视频解码线程
static int video_thread(void *arg)
{
FFPlayer *ffp = (FFPlayer *)arg;
int ret = 0;
if (ffp->node_vdec) {
ret = ffpipenode_run_sync(ffp->node_vdec);
}
return ret;
}
int ffpipenode_run_sync(IJKFF_Pipenode *node)
{
return node->func_run_sync(node);
}
//其中无论是创建软解码还是硬解码都会指定
node->func_run_sync = func_run_sync;//这个函数
当为软解码时IJKFF_Pipenode node->func_run_sync 指向的是ffpipenode_ffplay_vdec.c里面的func_run_sync函数
当为硬解码时IJKFF_Pipenode node->func_run_sync 指向的是ffpipenode_ios_videotoolbox_vdec.m里面的func_run_sync函数
此处体现了C中函数指针的重要性 linux内核代码中函数指针的用法到处可见 哈哈 这里的用法只是小case 哈哈
先分析软解码
static int func_run_sync(IJKFF_Pipenode *node)
{
IJKFF_Pipenode_Opaque *opaque = node->opaque;
return ffp_video_thread(opaque->ffp);
}
int ffp_video_thread(FFPlayer *ffp)
{
return ffplay_video_thread(ffp);
}
static int ffplay_video_thread(void *arg)
{
……
for (;;) {
//get_video_frame中调用了decoder_decode_frame会进行解码
ret = get_video_frame(ffp, frame);
……
ret = av_buffersrc_add_frame(filt_in, frame);
……
//将解码后的数据送入pictq。
ret = queue_picture(ffp, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
……
}
}
//将解码后的帧数据送入pictq
static int queue_picture(FFPlayer *ffp, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
{
…….
// FIXME: set swscale options
if (SDL_VoutFillFrameYUVOverlay(vp->bmp, src_frame) < 0) {
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
exit(1);
}
…….
frame_queue_push(&is->pictq);
…….
}
//将视频做全部填充
int SDL_VoutFillFrameYUVOverlay(SDL_VoutOverlay *overlay, const AVFrame *frame)
{
if (!overlay || !overlay->func_fill_frame)
return -1;
return overlay->func_fill_frame(overlay, frame);
}
//进行图像填充
static int func_fill_frame(SDL_VoutOverlay *overlay, const AVFrame *frame)
{
…….
//进行图像缩放和格式转换,该函数可以使用各种不同算法来对图像进行处理
sws_scale(opaque->img_convert_ctx, (const uint8_t**) frame->data, frame->linesize,
0, frame->height, swscale_dst_pic.data, swscale_dst_pic.linesize);
…….
}
再次分析硬解码
//硬解码器的创建
IJKFF_Pipenode *ffpipenode_create_video_decoder_from_ios_videotoolbox(FFPlayer *ffp)
{
if (!ffp || !ffp->is)
return NULL;
if ([[[UIDevice currentDevice] systemVersion] floatValue] < 8.0){
return NULL;
}
IJKFF_Pipenode *node = ffpipenode_alloc(sizeof(IJKFF_Pipenode_Opaque));
if (!node)
return node;
memset(node, sizeof(IJKFF_Pipenode), 0);
VideoState *is = ffp->is;
IJKFF_Pipenode_Opaque *opaque = node->opaque;
node->func_destroy = func_destroy;
node->func_run_sync = func_run_sync;
opaque->ffp = ffp;
opaque->decoder = &is->viddec;
opaque->avctx = opaque->decoder->avctx;
switch (opaque->avctx->codec_id) {
case AV_CODEC_ID_H264:
if (ffp->vtb_async)
opaque->context = Ijk_VideoToolbox_Async_Create(ffp, opaque->avctx);
else
opaque->context = Ijk_VideoToolbox_Sync_Create(ffp, opaque->avctx);
break;
default:
ALOGI("Videotoolbox-pipeline:open_video_decoder: not H264\n");
goto fail;
}
if (opaque->context == NULL) {
ALOGE("could not init video tool box decoder !!!");
goto fail;
}
return node;
fail:
ffpipenode_free_p(&node);
return NULL;
}
//此处创建的是同步解码器
opaque->context = Ijk_VideoToolbox_Sync_Create(ffp, opaque->avctx);
Ijk_VideoToolBox *Ijk_VideoToolbox_Sync_Create(FFPlayer* ffp, AVCodecContext* ic) {
return Ijk_VideoToolbox_CreateInternal(0, ffp, ic);
}
inline static Ijk_VideoToolBox *Ijk_VideoToolbox_CreateInternal(int async, FFPlayer* ffp, AVCodecContext* ic)
{
Ijk_VideoToolBox *vtb = (Ijk_VideoToolBox*) mallocz(sizeof(Ijk_VideoToolBox));
if (!vtb)
return NULL;
if (async) {
//创建异步解码器
vtb->opaque = videotoolbox_async_create(ffp, ic);
vtb->decode_frame = videotoolbox_async_decode_frame;
vtb->free = videotoolbox_async_free;
} else {
//创建同步解码器
vtb->opaque = videotoolbox_sync_create(ffp, ic);
vtb->decode_frame = videotoolbox_sync_decode_frame;
vtb->free = videotoolbox_sync_free;
}
if (!vtb->opaque) {
freep((void **)&vtb);
return NULL;
}
return vtb;
}
//创建硬解码同步解码器
Ijk_VideoToolBox_Opaque* videotoolbox_sync_create(FFPlayer* ffp, AVCodecContext* avctx)
{
......
context_vtb->vt_session = vtbsession_create(context_vtb);
......
}
//硬件解码器创建函数
//解码后的数据回调函数地址
outputCallback.decompressionOutputCallback = VTDecoderCallback;
static VTDecompressionSessionRef vtbsession_create(Ijk_VideoToolBox_Opaque* context)
{
FFPlayer *ffp = context->ffp;
int ret = 0;
int width = context->codecpar->width;
int height = context->codecpar->height;
VTDecompressionSessionRef vt_session = NULL;
CFMutableDictionaryRef destinationPixelBufferAttributes;
VTDecompressionOutputCallbackRecord outputCallback;
OSStatus status;
ret = vtbformat_init(&context->fmt_desc, context->codecpar);
if (ffp->vtb_max_frame_width > 0 && width > ffp->vtb_max_frame_width) {
double w_scaler = (float)ffp->vtb_max_frame_width / width;
width = ffp->vtb_max_frame_width;
height = height * w_scaler;
}
ALOGI("after scale width %d height %d \n", width, height);
destinationPixelBufferAttributes = CFDictionaryCreateMutable(
NULL,
0,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
CFDictionarySetSInt32(destinationPixelBufferAttributes,
kCVPixelBufferPixelFormatTypeKey, kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange);
CFDictionarySetSInt32(destinationPixelBufferAttributes,
kCVPixelBufferWidthKey, width);
CFDictionarySetSInt32(destinationPixelBufferAttributes,
kCVPixelBufferHeightKey, height);
CFDictionarySetBoolean(destinationPixelBufferAttributes,
kCVPixelBufferOpenGLESCompatibilityKey, YES);
//此处会指定解码后的数据回调函数
outputCallback.decompressionOutputCallback = VTDecoderCallback;
outputCallback.decompressionOutputRefCon = context ;
status = VTDecompressionSessionCreate(
kCFAllocatorDefault,
context->fmt_desc.fmt_desc,
NULL,
destinationPixelBufferAttributes,
&outputCallback,
&vt_session);
if (status != noErr) {
NSError* error = [NSError errorWithDomain:NSOSStatusErrorDomain code:status userInfo:nil];
NSLog(@"Error %@", [error description]);
ALOGI("%s - failed with status = (%d)", __FUNCTION__, (int)status);
}
CFRelease(destinationPixelBufferAttributes);
memset(&context->sample_info, 0, sizeof(struct sample_info));
return vt_session;
}
vtb->decode_frame = videotoolbox_sync_decode_frame//设置解码函数指针
//此处可以发现硬解码的decode_frame函数指针指向 videotoolbox_sync_decode_frame 函数
vtb->decode_frame = videotoolbox_sync_decode_frame;
当为硬解码时
static int func_run_sync(IJKFF_Pipenode *node)
{
IJKFF_Pipenode_Opaque *opaque = node->opaque;
int ret = videotoolbox_video_thread(opaque);
if (opaque->context) {
opaque->context->free(opaque->context->opaque);
free(opaque->context);
opaque->context = NULL;
}
return ret;
}
int videotoolbox_video_thread(void *arg)
{
IJKFF_Pipenode_Opaque* opaque = (IJKFF_Pipenode_Opaque*) arg;
FFPlayer *ffp = opaque->ffp;
VideoState *is = ffp->is;
Decoder *d = &is->viddec;
int ret = 0;
for (;;) {
if (is->abort_request || d->queue->abort_request) {
return -1;
}
@autoreleasepool {
//这里的decode_frame函数指针指向 videotoolbox_sync_decode_frame 函数
ret = opaque->context->decode_frame(opaque->context->opaque);
}
if (ret < 0)
goto the_end;
if (!ret)
continue;
if (ret < 0)
goto the_end;
}
the_end:
return 0;
}
//视频同步硬解码
int videotoolbox_sync_decode_frame(Ijk_VideoToolBox_Opaque* context)
{
......
//获取解码前的数据包AVPacket
if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
AVPacket pkt;
do {
if (d->queue->nb_packets == 0)
SDL_CondSignal(d->empty_queue_cond);
ffp_video_statistic_l(ffp);
if (ffp_packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0)
return -1;
if (ffp_is_flush_packet(&pkt)) {
avcodec_flush_buffers(d->avctx);
context->refresh_request = true;
context->serial += 1;
d->finished = 0;
ALOGI("flushed last keyframe pts %lld \n",d->pkt.pts);
d->next_pts = d->start_pts;
d->next_pts_tb = d->start_pts_tb;
}
} while (ffp_is_flush_packet(&pkt) || d->queue->serial != d->pkt_serial);
/// 注意此处的此函数
/**
FFmpeg中side_data的影响 对于flv格式没有此问题.
如果使用FFmpeg对ts格式进行解复用操作,在av_read_frame读取到一帧视频数据之后,需要将数据转换为AVC1的格式,但如果在FFmpeg中没有对AVFormatContext结构的flags变量设置AVFMT_FLAG_KEEP_SIDE_DATA,那么获取的AVPacket结构中的data地址中,保存的将不仅仅只有原始数据,它还将在末尾包含一个叫做side_data的数据(其实存储的是MEPG2标准中定义的StreamID),这个数据会导致计算的NALU长度比实际要长,从而可能导致VideoToolbox无法解码。
避免VideoToolbox解码失败的方法有两种,任选其一即可:
设置AVFormatContext的AVFMT_FLAG_KEEP_SIDE_DATA;
调用av_packet_split_side_data将side_data从data数据中分离。
*/
av_packet_split_side_data(&pkt);
av_packet_unref(&d->pkt);
d->pkt_temp = d->pkt = pkt;
d->packet_pending = 1;
}
//解码
ret = decode_video(context, d->avctx, &d->pkt_temp, &got_frame);
......
}
static int decode_video(Ijk_VideoToolBox_Opaque* context, AVCodecContext *avctx, AVPacket *avpkt, int* got_picture_ptr)
{
......
//iOS硬件解码VideoToolbox在应用中进入后台VTDecompressionSession失效 此时需要重置解码器
//context->refresh_session = true; 此标志位置为YES会重新创建解码器
if (context->refresh_session) {
ret = 0;
vtbsession_destroy(context);
memset(&context->sample_info, 0, sizeof(struct sample_info));
//重新创建硬件解码器
context->vt_session = vtbsession_create(context);
if (!context->vt_session)
return -1;
if ((context->m_buffer_deep > 0) &&
ff_avpacket_i_or_idr(&context->m_buffer_packet[0], context->idr_based_identified) == true ) {
for (int i = 0; i < context->m_buffer_deep; i++) {
AVPacket* pkt = &context->m_buffer_packet[i];
ret = decode_video_internal(context, avctx, pkt, got_picture_ptr);
}
} else {
context->recovery_drop_packet = true;
ret = -1;
ALOGE("recovery error!!!!\n");
}
context->refresh_session = false;
return ret;
}
//decode_video_internal 内部解码函数
return decode_video_internal(context, avctx, avpkt, got_picture_ptr);
}
static int decode_video_internal(Ijk_VideoToolBox_Opaque* context, AVCodecContext *avctx, const AVPacket *avpkt, int* got_picture_ptr)
{
......
//解码 由于在之前创建硬件解码器vtbsession_create函数中指定了 解码回调函数 outputCallback.decompressionOutputCallback = VTDecoderCallback;
//
status = VTDecompressionSessionDecodeFrame(context->vt_session, sample_buff, decoder_flags, (void*)sample_info, 0);
if (status == noErr) {
if (ffp->is->videoq.abort_request)
goto failed;
}
if (status != 0) {
ALOGE("decodeFrame %d %s\n", (int)status, vtb_get_error_string(status));
//iOS硬件解码VideoToolbox在应用中进入后台VTDecompressionSession失效 此时需要重置解码器
//context->refresh_session = true; 此标志位置为YES会重新创建解码器
if (status == kVTInvalidSessionErr) {
context->refresh_session = true;
}
if (status == kVTVideoDecoderMalfunctionErr) {
context->recovery_drop_packet = true;
context->refresh_session = true;
}
goto failed;
}
......
}
static void VTDecoderCallback(void *decompressionOutputRefCon,
void *sourceFrameRefCon,
OSStatus status,
VTDecodeInfoFlags infoFlags,
CVImageBufferRef imageBuffer,
CMTime presentationTimeStamp,
CMTime presentationDuration)
{
......
if (ctx->new_seg_flag) {
ALOGI("new seg process!!!!");
while (ctx->m_queue_depth > 0) {
QueuePicture(ctx);
}
ctx->new_seg_flag = false;
}
......
if ((ctx->m_queue_depth > ctx->fmt_desc.max_ref_frames)) {
QueuePicture(ctx);
}
......
}
static void QueuePicture(Ijk_VideoToolBox_Opaque* ctx) {
......
ffp_queue_picture(ctx->ffp, &picture, pts, duration, 0, ctx->ffp->is->viddec.pkt_serial);
......
}
int ffp_queue_picture(FFPlayer *ffp, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
{
return queue_picture(ffp, src_frame, pts, duration, pos, serial);
}
//frame_queue_push(&is->pictq);//将解码后的数据放入pictq队列内
static int queue_picture(FFPlayer *ffp, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
{
......
/* if the frame is not skipped, then display it */
if (vp->bmp) {
......
frame_queue_push(&is->pictq);
......
}
return 0;
}
//视频的渲染(视频显示部分)从pictq队列内获取解码后的数据再显示
SDL_VoutIos_CreateForGLES2//iOS端视频渲染
SDL_VoutDummy_Create//虚拟视频渲染 对iOS端没用
IjkMediaPlayer *ijkmp_ios_create(int (*msg_loop)(void*))
{
IjkMediaPlayer *mp = ijkmp_create(msg_loop);
if (!mp)
goto fail;
//iOS 视频渲染线程创建
mp->ffplayer->vout = SDL_VoutIos_CreateForGLES2();
if (!mp->ffplayer->vout)
goto fail;
mp->ffplayer->pipeline = ffpipeline_create_from_ios(mp->ffplayer);
if (!mp->ffplayer->pipeline)
goto fail;
return mp;
fail:
ijkmp_dec_ref_p(&mp);
return NULL;
}
SDL_Vout *SDL_VoutIos_CreateForGLES2()
{
SDL_Vout *vout = SDL_Vout_CreateInternal(sizeof(SDL_Vout_Opaque));
if (!vout)
return NULL;
SDL_Vout_Opaque *opaque = vout->opaque;
opaque->gl_view = nil;
vout->create_overlay = vout_create_overlay;
vout->free_l = vout_free_l;
vout->display_overlay = vout_display_overlay;
return vout;
}
//此处的vout->display_overlay 函数指针指向ijksdl_vout_ios_gles2.c里面的static int vout_display_overlay(SDL_Vout *vout, SDL_VoutOverlay *overlay)
函数
static int vout_display_overlay(SDL_Vout *vout, SDL_VoutOverlay *overlay)
{
@autoreleasepool {
SDL_LockMutex(vout->mutex);
int retval = vout_display_overlay_l(vout, overlay);
SDL_UnlockMutex(vout->mutex);
return retval;
}
}
//解码后的视频最终被送到 这个函数进行显示
static int vout_display_overlay_l(SDL_Vout *vout, SDL_VoutOverlay *overlay)
{
SDL_Vout_Opaque *opaque = vout->opaque;
IJKSDLGLView *gl_view = opaque->gl_view;
if (!gl_view) {
ALOGE("vout_display_overlay_l: NULL gl_view\n");
return -1;
}
if (!overlay) {
ALOGE("vout_display_overlay_l: NULL overlay\n");
return -1;
}
if (overlay->w <= 0 || overlay->h <= 0) {
ALOGE("vout_display_overlay_l: invalid overlay dimensions(%d, %d)\n", overlay->w, overlay->h);
return -1;
}
[gl_view display:overlay];
return 0;
}
通过上面的分析我们 可以明白ijkplayer视频处理流程为:
iOS平台上采用OpenGL渲染解码后的YUV图像,渲染线程为video_refresh_thread,最后渲染图像的方法为video_image_display2
视频的处理流程 在decoder_decode_frame 方法中从解码前的video queue中取出一帧数据,送入decoder进行解码,解码后的数据在ffplay_video_thread中送入pictq
解码后的数据被送到pictq后 我们观察视频渲染线程的static void video_image_display2(FFPlayer *ffp)函数
vp = frame_queue_peek_last(&is->pictq);
......
SDL_VoutDisplayYUVOverlay(ffp->vout, vp->bmp);
video_image_display2函数会取出最新的解码后的视频数据
然后交给SDL通过openGL来进行渲染