接着上一篇文章 Ijkplayer 初始化和prepare源码分析 我们知道ijkplayer在初始化和准备过程中做了那些事,这篇文章分析一下ijkplayer是如何播放音频的。
回顾一下prepare的流程:
Java层: prepareAsync()->
JNI层: [ijkplayer_jni.c] IjkMediaPlayer_prepareAsync()->
C源码: [ijkplayer.c] ijkmp_prepare_async()->
[ff_ffplay.c] stream_open()-> read_thread()
下面我们从read_thread开始阅读。
一、read_thread
从方法名和方法注释我们可以知道read_thread主要作用就是从文件或者网络中读取音视频流
/* this thread gets the stream from the disk or the network */
static int read_thread(void *arg)
{
//真正的播放器
FFPlayer *ffp = arg;
//封装播放过程中所有状态
VideoState *is = ffp->is;
//封装了文件格式相关信息的结构体 , 如视频宽高 , 音频采样率等信息
AVFormatContext *ic = NULL;
AVPacket pkt1, *pkt = &pkt1;
...
//1、初始化AVFormatContext
ic = avformat_alloc_context();
//设置中断函数,如果出错或者退出,就可以立刻退出
ic->interrupt_callback.callback = decode_interrupt_cb;
ic->interrupt_callback.opaque = is;
//特定选项处理
if (av_stristart(is->filename, "rtmp", NULL) ||
av_stristart(is->filename, "rtsp", NULL)) {
// There is total different meaning for 'timeout' option in rtmp
av_log(ffp, AV_LOG_WARNING, "remove 'timeout' option for rtmp.\n");
av_dict_set(&ffp->format_opts, "timeout", NULL, 0);
}
...
//2、打开输入流,并读取文件头部,解码器还未打开。主要作用是探测流的协议,如http还是rtmp等,如果是网络文件则创建网络链接
err = avformat_open_input(&ic, is->filename, is->iformat, &ffp->format_opts);
...
AVDictionary **opts = setup_find_stream_info_opts(ic, ffp->codec_opts);
...
//3、查找流:探测媒体类型,可得到当前文件的封装格式,音视频编码参数等信息
err = avformat_find_stream_info(ic, opts);
...
//遍历nb_streams,区分各个流
int video_stream_count = 0;
int h264_stream_count = 0;
int first_h264_stream = -1;
for (i = 0; i < ic->nb_streams; i++) {
AVStream *st = ic->streams[i];
enum AVMediaType type = st->codecpar->codec_type;
st->discard = AVDISCARD_ALL;
if (type >= 0 && ffp->wanted_stream_spec[type] && st_index[type] == -1)
//分离audio,video流信息,判断当前流的类型
if (avformat_match_stream_specifier(ic, st, ffp->wanted_stream_spec[type]) > 0)
st_index[type] = i;
// choose first h264
if (type == AVMEDIA_TYPE_VIDEO) {
enum AVCodecID codec_id = st->codecpar->codec_id;
video_stream_count++;
if (codec_id == AV_CODEC_ID_H264) {
h264_stream_count++;
if (first_h264_stream < 0)
first_h264_stream = i;
}
}
}
//4、调用av_find_best_stream,找到最佳流
if (video_stream_count > 1 && st_index[AVMEDIA_TYPE_VIDEO] < 0) {
st_index[AVMEDIA_TYPE_VIDEO] = first_h264_stream;
av_log(NULL, AV_LOG_WARNING, "multiple video stream found, prefer first h264 stream: %d\n", first_h264_stream);
}
if (!ffp->video_disable)
st_index[AVMEDIA_TYPE_VIDEO] =
av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
if (!ffp->audio_disable)
st_index[AVMEDIA_TYPE_AUDIO] =
av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
st_index[AVMEDIA_TYPE_AUDIO],
st_index[AVMEDIA_TYPE_VIDEO],
NULL, 0);
if (!ffp->video_disable && !ffp->subtitle_disable)
st_index[AVMEDIA_TYPE_SUBTITLE] =
av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
st_index[AVMEDIA_TYPE_SUBTITLE],
(st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
st_index[AVMEDIA_TYPE_AUDIO] :
st_index[AVMEDIA_TYPE_VIDEO]),
NULL, 0);
...
/* open the streams */
//5、stream_component_open,读取各个数据流,根据audio,video的流信息,根据流信息找到decoder,然后开启各自的线程进行解码。
//音视频解码器线程分别是:audio_thread、video_thread
if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
stream_component_open(ffp, st_index[AVMEDIA_TYPE_AUDIO]);
} else {
ffp->av_sync_type = AV_SYNC_VIDEO_MASTER;
is->av_sync_type = ffp->av_sync_type;
}
ret = -1;
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
ret = stream_component_open(ffp, st_index[AVMEDIA_TYPE_VIDEO]);
}
if (is->show_mode == SHOW_MODE_NONE)
is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
stream_component_open(ffp, st_index[AVMEDIA_TYPE_SUBTITLE]);
}
...
//prepare过程完成
ffp->prepared = true;
ffp_notify_msg1(ffp, FFP_MSG_PREPARED);
//6、循环等待start()调用
if (!ffp->render_wait_start && !ffp->start_on_prepared) {
while (is->pause_req && !is->abort_request) {
SDL_Delay(20);
}
}
...
//7、进入无限循环,从解析流的线程中获取Packet,同步到video_refresh_thread线程中,进行时钟同步,开始播放
for (;;) {
if (is->abort_request)
break;//处理退出请求
#ifdef FFP_MERGE
if (is->paused != is->last_paused) {
is->last_paused = is->paused;//处理暂停恢复
if (is->paused)
is->read_pause_return = av_read_pause(ic);
else
av_read_play(ic);
}
#endif
...
/* if the queue are full, no need to read more */
//控制缓冲区大小:如果队列已满,则无需读取更多数据
//#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
if (ffp->infinite_buffer<1 && !is->seek_req &&
#ifdef FFP_MERGE
(is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
#else
(is->audioq.size + is->videoq.size + is->subtitleq.size > ffp->dcc.max_buffer_size
#endif
|| ( stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq, MIN_FRAMES)
&& stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq, MIN_FRAMES)
&& stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq, MIN_FRAMES)))) {
if (!is->eof) {
ffp_toggle_buffering(ffp, 0);
}
/* wait 10 ms */
SDL_LockMutex(wait_mutex);
SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
SDL_UnlockMutex(wait_mutex);
continue;
}
...
//8、读取媒体数据,得到解码前的AVPacket数据,存入变量pkt中
ret = av_read_frame(ic, pkt);
//ret == 0表示读取AVPacket流成功, < 0表示出错或者文件结束
...
/* check if packet is in play range specified by user, then queue, otherwise discard */
//检查数据包是否在用户指定的范围内,然后排队,否则丢弃
stream_start_time = ic->streams[pkt->stream_index]->start_time;
pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
pkt_in_play_range = ffp->duration == AV_NOPTS_VALUE ||
(pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
av_q2d(ic->streams[pkt->stream_index]->time_base) -
(double)(ffp->start_time != AV_NOPTS_VALUE ? ffp->start_time : 0) / 1000000
<= ((double)ffp->duration / 1000000);
//9、将音视频数据分别存入相应的queue中
if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
//音频
packet_queue_put(&is->audioq, pkt);
} else if (pkt->stream_index == is->video_stream && pkt_in_play_range
&& !(is->video_st && (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC))) {
//视频
packet_queue_put(&is->videoq, pkt);
} else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
//字幕
packet_queue_put(&is->subtitleq, pkt);
} else {
av_packet_unref(pkt);
}
//10、开启缓冲机制
//在for循环中,没读到一个包,都会检查是否进行缓冲
if (ffp->packet_buffering) {
io_tick_counter = SDL_GetTickHR();
if ((!ffp->first_video_frame_rendered && is->video_st) || (!ffp->first_audio_frame_rendered && is->audio_st)) {
//首帧未显示前,50ms检测一次 #define FAST_BUFFERING_CHECK_PER_MILLISECONDS (50)
if (abs((int)(io_tick_counter - prev_io_tick_counter)) > FAST_BUFFERING_CHECK_PER_MILLISECONDS) {
prev_io_tick_counter = io_tick_counter;
ffp->dcc.current_high_water_mark_in_ms = ffp->dcc.first_high_water_mark_in_ms;
ffp_check_buffering_l(ffp);
}
} else {
//首帧显示后,500毫秒检测一次 #define BUFFERING_CHECK_PER_MILLISECONDS (500)
if (abs((int)(io_tick_counter - prev_io_tick_counter)) > BUFFERING_CHECK_PER_MILLISECONDS) {
prev_io_tick_counter = io_tick_counter;
ffp_check_buffering_l(ffp);
}
}
}
}
...
return 0;
}
read_thread() 方法主要做了:
- 打开音视频文件或者建立URL链接
- 解析音视频流信息
- 循环等待start调用,进入播放状态
- 在stream_component_open中:打开音频播放器,创建音频解码线程audio_thread;创建视频解码器,创建视频解码线程video_thread。
- 循环读取音视频流AVPacket,存入PacketQueue
值得一提的是,在此方法中创建了用于封装音视频格式信息的上下文:AVFormatContext ,因为后面还会有其它上下文,所以特意留意了一下。
二、stream_component_open
同样从方法名和方法注释,可以知道此方法是给定音视频流的。
/* open a given stream. Return 0 if OK */
static int stream_component_open(FFPlayer *ffp, int stream_index)
{
//封装音视频所有信息
VideoState *is = ffp->is;
//iformat 输入封装格式的上下文
AVFormatContext *ic = is->ic;
//解码器上下文
AVCodecContext *avctx;
//解码器
AVCodec *codec = NULL;
...
//初始化解码器上下文
avctx = avcodec_alloc_context3(NULL);
if (!avctx)
return AVERROR(ENOMEM);
//将流里面的参数,也就是AVStream里面的参数直接复制到AVCodecContext的上下文当中
ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
if (ret < 0)
goto fail;
av_codec_set_pkt_timebase(avctx, ic->streams[stream_index]->time_base);
//根据codec_id查找解码器
codec = avcodec_find_decoder(avctx->codec_id);
...
opts = filter_codec_opts(ffp->codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
//打开解码器
if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
goto fail;
}
switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
...
/* prepare audio output */
//打开音频设备
if ((ret = audio_open(ffp, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
...
//初始化解码器
decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
if ((ret = decoder_start(&is->auddec, audio_thread, ffp, "ff_audio_dec")) < 0)
goto out;
SDL_AoutPauseAudio(ffp->aout, 0);
break;
case AVMEDIA_TYPE_VIDEO:
...
decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
ffp->node_vdec = ffpipeline_open_video_decoder(ffp->pipeline, ffp);
if ((ret = decoder_start(&is->viddec, video_thread, ffp, "ff_video_dec")) < 0)
goto out;
break;
case AVMEDIA_TYPE_SUBTITLE:
...
}
可以看到,stream_component_open主要作用有:
- 打开音频设备audio_open
- 创建音频解码器,启动音频解码线程 audio_thread
- 创建视频解码器,启动视频解码线程 video_thread
…等等
这里有个解码器的上下文:AVCodecContext。
我们先查看audio_thread之后在查看audio_open是如何打开音频设备的
三、audio_thread
static int audio_thread(void *arg)
{
FFPlayer *ffp = arg;
VideoState *is = ffp->is;
AVFrame *frame = av_frame_alloc();
Frame *af;
...
do {
//将AVPacket解码成AVFrame帧数据
if ((got_frame = decoder_decode_frame(ffp, &is->auddec, frame, NULL)) < 0)
goto the_end;
//获取一个可写的结点
if (!(af = frame_queue_peek_writable(&is->sampq)))
//将frame完全赋值给af->frame
av_frame_move_ref(af->frame, frame);
//存入队列
frame_queue_push(&is->sampq);
} while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
}
我们知道解码的主要作用就是将AVPacket数据解码成对应的AVFrame数据,看一下decoder_decode_frame是如何解码的:
static int decoder_decode_frame(FFPlayer *ffp, Decoder *d, AVFrame *frame, AVSubtitle *sub) {
int ret = AVERROR(EAGAIN);
for (;;) {
AVPacket pkt;
//流连续的情况下,不断调用avcodec_receive_frame获取解码后的frame
if (d->queue->serial == d->pkt_serial) {
do {
if (d->queue->abort_request)
return -1;
switch (d->avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
//从解码器中接收frame数据。返回0表示成功
ret = avcodec_receive_frame(d->avctx, frame);
...
break;
case AVMEDIA_TYPE_AUDIO:
//从解码器中接收frame数据。返回0表示成功
ret = avcodec_receive_frame(d->avctx, frame);
...
break;
default:
break;
}
if (ret == AVERROR_EOF) {
d->finished = d->pkt_serial;
avcodec_flush_buffers(d->avctx);
return 0;
}
if (ret >= 0)
return 1;
} while (ret != AVERROR(EAGAIN));
}
do {
if (d->queue->nb_packets == 0)
SDL_CondSignal(d->empty_queue_cond);
if (d->packet_pending) {
av_packet_move_ref(&pkt, &d->pkt);
d->packet_pending = 0;
} else {
从packet_queue中取出pkt。如果没有ptk队列会阻塞
if (packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0)
return -1;
}
} while (d->queue->serial != d->pkt_serial);
...
//将pkt发送到解码器进行解码
if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
d->packet_pending = 1;
av_packet_move_ref(&d->pkt, &pkt);
}
}
}
四、audio_open
4.1、audio_open
static int audio_open(FFPlayer *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
{
FFPlayer *ffp = opaque;
VideoState *is = ffp->is;
SDL_AudioSpec wanted_spec, spec;
...
//将SDL_AudioSpec的callback指向sdl_audio_callback,3.4环节会通过wanted_spec.callbackf调用sdl_audio_callbackj进行音频重采样
wanted_spec.callback = sdl_audio_callback;
wanted_spec.userdata = opaque;
// ffp->aout == SDL_Aout *aout;
while (SDL_AoutOpenAudio(ffp->aout, &wanted_spec, &spec) < 0) {
/* avoid infinity loop on exit. --by bbcallen */
...
}
...
return spec.size;
}
调用SDL_AoutOpenAudio打开音频输出设备,查看SDL_AoutOpenAudio方法之前,我们先来看一下ffp->aout的创建过程。
在上一篇文章 ijkplayer初始化 中我们已经知道在初始化过程native_setup方法中会创建音频输出设备,创建代码如下:
static SDL_Aout *func_open_audio_output(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
SDL_Aout *aout = NULL;
if (ffp->opensles) {
aout = SDL_AoutAndroid_CreateForOpenSLES();
} else {
aout = SDL_AoutAndroid_CreateForAudioTrack();
}
if (aout)
SDL_AoutSetStereoVolume(aout, pipeline->opaque->left_volume, pipeline->opaque->right_volume);
return aout;
}
在prepare过程中会将创建的音频输出设备赋值给ffp->aout指针,赋值代码如下:
int ffp_prepare_async_l(FFPlayer *ffp, const char *file_name)
{
if (!ffp->aout) {
//在初始化时候已经创建SDL_Aout,这里把创建好的音频播放设备赋值给 ffp->aout
ffp->aout = ffpipeline_open_audio_output(ffp->pipeline, ffp);
if (!ffp->aout)
return -1;
}
}
在ijkplayer中音频输出支持:OpenslES和AudioTrack两种方式,默认使用AudioTrack,如果使用openslES需要在options中配置:
if (mSettings.getUsingOpenSLES()) {
ijkMediaPlayer.setOption(IjkMediaPlayer.OPT_CATEGORY_PLAYER, "opensles", 1);
} else {
ijkMediaPlayer.setOption(IjkMediaPlayer.OPT_CATEGORY_PLAYER, "opensles", 0);
}
因为AudioTrack是Android音频设备,因此我们看一下ijkplayer是如何通过OpenslES实现音频播放的吧。至于原因,姑且算OpenslES播放效率相对高一点,毕竟都在C层
4.2、SDL_AoutOpenAudio
方法所在文件: ijksdl_aout.c
int SDL_AoutOpenAudio(SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained)
{
if (aout && desired && aout->open_audio)
return aout->open_audio(aout, desired, obtained);
return -1;
}
该方法会调用SDL_Aout的open_audio方法,而SDL_Aout有两个实现,这里查看OpenSL ES的实现。
在查看OpenSLES的open_audio实现之前,看一下如何创建OpenSLES的SDL_Aout:
SDL_Aout *SDL_AoutAndroid_CreateForOpenSLES()
{
SDL_Aout *aout = SDL_Aout_CreateInternal(sizeof(SDL_Aout_Opaque));
if (!aout)
return NULL;
...
aout->free_l = aout_free_l;
aout->opaque_class = &g_opensles_class;
//open_audio的实现方法是:aout_open_audio
aout->open_audio = aout_open_audio;
aout->pause_audio = aout_pause_audio;
aout->flush_audio = aout_flush_audio;
aout->close_audio = aout_close_audio;
aout->set_volume = aout_set_volume;
aout->func_get_latency_seconds = aout_get_latency_seconds;
return aout;
fail:
aout_free_l(aout);
return NULL;
}
可以看到open_audio指向aout_open_audio
4.3、aout_open_audio
方法所在文件: ijksdl_aout_android_opensles.c
static int aout_open_audio(SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained)
{
SDL_Aout_Opaque *opaque = aout->opaque;
SLEngineItf slEngine = opaque->slEngine;//OpenSL引擎
SLDataFormat_PCM *format_pcm = &opaque->format_pcm;
int ret = 0;
...
//音频配置信息
SLDataSource audio_source = {&loc_bufq, format_pcm};
// config audio sink
SLDataLocator_OutputMix loc_outmix = {
SL_DATALOCATOR_OUTPUTMIX,
opaque->slOutputMixObject
};
//混音器
SLDataSink audio_sink = {&loc_outmix, NULL};
//OpenSL播放器
SLObjectItf slPlayerObject = NULL;
const SLInterfaceID ids2[] = { SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_VOLUME, SL_IID_PLAY };
static const SLboolean req2[] = { SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
//创建OpenSL播放器
//参数:1、OpenSL引擎 2、OpenSL播放器 3、音频配置信息:采样率、声道、位深等
ret = (*slEngine)->CreateAudioPlayer(slEngine, &slPlayerObject, &audio_source,
//参数: 4、混音器 5、参数个数(用于标记后面两个参数:SLInterfaceID数组和SLboolean数组的大小)
&audio_sink, sizeof(ids2) / sizeof(*ids2),
//参数:5、SLInterfaceID 这里需要传入一个数组,指定创建的AudioPlayerObject需要包含哪些Interface
//参数6:SLboolean 这里也是一个数组,用来标记每个需要包含的Interface在AudioPlayerObject不支持的情况下,是不是需要在创建AudioPlayerObject时返回失败。
ids2, req2);
opaque->slPlayerObject = slPlayerObject;
//初始化播放器:SLObjectItf slPlayerObject
ret = (*slPlayerObject)->Realize(slPlayerObject, SL_BOOLEAN_FALSE);
//获取播放器接口:opaque->slPlayItf:SLPlayItf slPlayItf用来控制播放
ret = (*slPlayerObject)->GetInterface(slPlayerObject, SL_IID_PLAY, &opaque->slPlayItf);
ret = (*slPlayerObject)->GetInterface(slPlayerObject, SL_IID_VOLUME, &opaque->slVolumeItf);
//获取播放队列接口:opaque->slBufferQueueItf:需要播放的队列
ret = (*slPlayerObject)->GetInterface(slPlayerObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &opaque->slBufferQueueItf);
//设置回调
ret = (*opaque->slBufferQueueItf)->RegisterCallback(opaque->slBufferQueueItf, aout_opensles_callback, (void*)aout);
//执行aout_thread线程
opaque->audio_tid = SDL_CreateThreadEx(&opaque->_audio_tid, aout_thread, aout, "ff_aout_opensles");
return opaque->buffer_capacity;
fail:
aout_close_audio(aout);
return -1;
}
在此方法中主要是对OpenSLES播放音频的一些操作,然后执行aout_thread线程播放音频
4.4、aout_thread
方法所在文件: ijksdl_aout_android_opensles.c
static int aout_thread(void *arg)
{
return aout_thread_n(arg);
}
static int aout_thread_n(SDL_Aout *aout)
{
SDL_Aout_Opaque *opaque = aout->opaque;
SLPlayItf slPlayItf = opaque->slPlayItf;
SLAndroidSimpleBufferQueueItf slBufferQueueItf = opaque->slBufferQueueItf;
SLVolumeItf slVolumeItf = opaque->slVolumeItf;
//重采样回调
SDL_AudioCallback audio_cblk = opaque->spec.callback;
void *userdata = opaque->spec.userdata;
//需要播放的音频
uint8_t *next_buffer = NULL;
int next_buffer_index = 0;
size_t bytes_per_buffer = opaque->bytes_per_buffer;
//设置播放器状态为播放状态
if (!opaque->abort_request && !opaque->pause_on)
(*slPlayItf)->SetPlayState(slPlayItf, SL_PLAYSTATE_PLAYING);
//循环播放
while (!opaque->abort_request) {
...
next_buffer = opaque->buffer + next_buffer_index * bytes_per_buffer;
next_buffer_index = (next_buffer_index + 1) % OPENSLES_BUFFERS;
//执行sdl_audio_callback对音频进行重采样
audio_cblk(userdata, next_buffer, bytes_per_buffer);
if (opaque->need_flush) {
ALOGE("flush");
opaque->need_flush = 0;
(*slBufferQueueItf)->Clear(slBufferQueueItf);
} else {
//将重采样的音频数据放入缓冲区播放
slRet = (*slBufferQueueItf)->Enqueue(slBufferQueueItf, next_buffer, bytes_per_buffer);
if (slRet == SL_RESULT_SUCCESS) {
// do nothing
} else if (slRet == SL_RESULT_BUFFER_INSUFFICIENT) {
// don't retry, just pass through
ALOGE("SL_RESULT_BUFFER_INSUFFICIENT\n");
} else {
ALOGE("slBufferQueueItf->Enqueue() = %d\n", (int)slRet);
break;
}
}
}
return 0;
}
此方法的作用有:
- 调用sdl_audio_callback方法对音频进行重采样
- 将重采样的音频加入OpenSLES缓冲池等待播放
为什么在这里调用audio_cblk会触发sdl_audio_callback进行音频重采样?可以看《3.1、audio_open》方法 实现
4.5、sdl_audio_callback
此方法位于:ff_ffplay.c 中
/* prepare a new audio buffer */
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
{
FFPlayer *ffp = opaque;
VideoState *is = ffp->is;
int audio_size, len1;
...
while (len > 0) {
//如果buffer中数据消耗完,重新解析音频数据
if (is->audio_buf_index >= is->audio_buf_size) {
//音频重采样,并把它存储在is->audio_buf
audio_size = audio_decode_frame(ffp);
if (audio_size < 0) {
/* if error, just output silence */
is->audio_buf = NULL;
is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
} else {
if (is->show_mode != SHOW_MODE_VIDEO)
update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
is->audio_buf_size = audio_size;
}
is->audio_buf_index = 0;
len1 = is->audio_buf_size - is->audio_buf_index;
if (len1 > len)
len1 = len;
if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME){
//将重采样数据拷贝到stream中
memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
}else{
memset(stream, 0, len1);
if (!is->muted && is->audio_buf)
SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
}
//音频位置校对
len -= len1;
stream += len1;
is->audio_buf_index += len1;
}
...
}
该方法会调用audio_decode_frame对音频进行重采样,然后把重采样的音频拷贝到输入数组stream中。下面来看一下重采样过程,如果需要播放器的PCM数据,就可以在这里返回或者保存。
4.6、audio_decode_frame
此方法位于:ff_ffplay.c 中
/**
* Decode one audio frame and return its uncompressed size.
*
* The processed audio frame is decoded, converted if required, and
* stored in is->audio_buf, with size in bytes given by the return
* value.
*/
static int audio_decode_frame(FFPlayer *ffp)
{
VideoState *is = ffp->is;
int data_size, resampled_data_size;
int64_t dec_channel_layout;
av_unused double audio_clock0;
int wanted_nb_samples;
Frame *af;
do {
...
//获取从队列中取出一帧数据,是个阻塞函数
if (!(af = frame_queue_peek_readable(&is->sampq)))
return -1;
frame_queue_next(&is->sampq);
} while (af->serial != is->audioq.serial);
...
if (af->frame->format != is->audio_src.fmt ||
dec_channel_layout != is->audio_src.channel_layout ||
af->frame->sample_rate != is->audio_src.freq ||
(wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
AVDictionary *swr_opts = NULL;
swr_free(&is->swr_ctx);
//初始化重采样环境上下文,并设置重采样参数:声道、位深、采样率
is->swr_ctx = swr_alloc_set_opts(NULL,
is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
dec_channel_layout, af->frame->format, af->frame->sample_rate,
0, NULL);
//初始化重采样上下文
if (swr_init(is->swr_ctx) < 0) {
swr_free(&is->swr_ctx);
return -1;
}
}
if (is->swr_ctx) {
//重采样前音频数据
const uint8_t **in = (const uint8_t **)af->frame->extended_data;
//重采样后音频数据
uint8_t **out = &is->audio_buf1;
//重采样后单通道样本数
int out_count = (int)((int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256);
int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
int len2;
av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
if (!is->audio_buf1)
return AVERROR(ENOMEM);
//重采样
len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
//将重采样后的数据保存在audio_buf中
is->audio_buf = is->audio_buf1;
//重采样后PCM数据大小
resampled_data_size = len2 * is->audio_tgt.channels * bytes_per_sample;
}
return resampled_data_size;
}
该方法主要是对音频进行重采样操作。采样完成后将数据返回给3.4中的buffer数组,放入OpenSLES缓冲池中等待播放。
五、总结
以上就是ijkplayer音频播放流程,播放流程图如下:
参考:
https://its401.com/article/andylao62/109853894#3%E3%80%81stream_component_open%28%29
https://zhuanlan.zhihu.com/p/43672062