解码线程
音频解码线程的入口函数是audio_thread()
static int audio_thread(void *arg)
{
//...
do {
ffp_audio_statistic_l(ffp);
if ((got_frame = decoder_decode_frame(ffp, &is->auddec, frame, NULL)) < 0)
goto the_end;
//...
while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
//...
if (!(af = frame_queue_peek_writable(&is->sampq)))
goto the_end;
//...
av_frame_move_ref(af->frame, frame);
frame_queue_push(&is->sampq);
//...
}
} while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
the_end:
//...
av_frame_free(&frame);
return ret;
}
一开始就进入循环,然后调用decoder_decode_frame()进行解码,解码后的帧存放到frame中,然后调用frame_queue_peek_writable()判断是否能把刚刚解码的frame写入is->sampq中(判断sampq队列是否满了),如果没位置放frame的话,会调用pthread_cond_wait()阻塞队列。is->sampq是音频解码帧列表,播放线程直接从这里读取数据然后播放出来。最后调用av_frame_move_ref(af->frame, frame),把frame放入到sampq的相应位置。前面的af = frame_queue_peek_writable(&is->sampq),af就是指向这一帧frame应该放的位置的指针,所以直接把值赋值给它的结构体里面的frame就行了。
然后frame_queue_push(&is->sampq)里面是一个唤醒线程操作,如果音频播放线程因为sampq队列为空而阻塞,这里可以唤醒它。
在decoder_decode_frame()里面调用传进去的codec的codec->decode()方法解码。
播放流程
初始化IjkMediaPlayer的时候,在ijkmp_android_create()方法:
IjkMediaPlayer *ijkmp_android_create(int(*msg_loop)(void*))
{
IjkMediaPlayer *mp = ijkmp_create(msg_loop);
mp->ffplayer->vout = SDL_VoutAndroid_CreateForAndroidSurface();
if (!mp->ffplayer->vout)
goto fail;
mp->ffplayer->pipeline = ffpipeline_create_from_android(mp->ffplayer);
ffpipeline_set_vout(mp->ffplayer->pipeline, mp->ffplayer->vout);
return mp;
}
在ffpipeline_create_from_android()里面有一句:
pipeline->func_open_audio_output = func_open_audio_output;
func_open_audio_output()
static SDL_Aout *func_open_audio_output(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
SDL_Aout *aout = NULL;
if (ffp->opensles) {
aout = SDL_AoutAndroid_CreateForOpenSLES();
} else {
aout = SDL_AoutAndroid_CreateForAudioTrack();
}
if (aout)
SDL_AoutSetStereoVolume(aout, pipeline->opaque->left_volume, pipeline->opaque->right_volume);
return aout;
}
可以看出,音频播放也分为opensles,audiotrack,看一下audiotrack:
SDL_Aout *SDL_AoutAndroid_CreateForAudioTrack()
{
SDL_Aout *aout = SDL_Aout_CreateInternal(sizeof(SDL_Aout_Opaque));
if (!aout)
return NULL;
SDL_Aout_Opaque *opaque = aout->opaque;
opaque->wakeup_cond = SDL_CreateCond();
opaque->wakeup_mutex = SDL_CreateMutex();
opaque->speed = 1.0f;
aout->opaque_class = &g_audiotrack_class;
aout->free_l = aout_free_l;
//
aout->open_audio = aout_open_audio;
aout->pause_audio = aout_pause_audio;
aout->flush_audio = aout_flush_audio;
aout->set_volume = aout_set_volume;
aout->close_audio = aout_close_audio;
aout->func_get_audio_session_id = aout_get_audio_session_id;
aout->func_set_playback_rate = func_set_playback_rate;
return aout;
}
前面stream_component_open()里面相当于直接调用了aout_open_audio(),里面接着会调用aout_open_audio_n(),然后
SDL_CreateThreadEx(&opaque->_audio_tid, aout_thread, aout, "ff_aout_android");
这里创建的线程就是播放线程。aout_thread这个函数内部会调用aout_thread_n()
static int aout_thread_n(JNIEnv *env, SDL_Aout *aout)
{
SDL_Aout_Opaque *opaque = aout->opaque;
SDL_Android_AudioTrack *atrack = opaque->atrack;
SDL_AudioCallback audio_cblk = opaque->spec.callback;
void *userdata = opaque->spec.userdata;
uint8_t *buffer = opaque->buffer;
//...
if (!opaque->abort_request && !opaque->pause_on)
SDL_Android_AudioTrack_play(env, atrack);
while (!opaque->abort_request) {
SDL_LockMutex(opaque->wakeup_mutex);
if (!opaque->abort_request && opaque->pause_on) {
SDL_Android_AudioTrack_pause(env, atrack);
while (!opaque->abort_request && opaque->pause_on) {
SDL_CondWaitTimeout(opaque->wakeup_cond, opaque->wakeup_mutex, 1000);
}
if (!opaque->abort_request && !opaque->pause_on)
SDL_Android_AudioTrack_play(env, atrack);
}
if (opaque->need_flush) {
opaque->need_flush = 0;
SDL_Android_AudioTrack_flush(env, atrack);
}
if (opaque->need_set_volume) {
opaque->need_set_volume = 0;
SDL_Android_AudioTrack_set_volume(env, atrack, opaque->left_volume, opaque->right_volume);
}
if (opaque->speed_changed) {
opaque->speed_changed = 0;
if (J4A_GetSystemAndroidApiLevel(env) >= 23) {
SDL_Android_AudioTrack_setSpeed(env, atrack, opaque->speed);
}
}
SDL_UnlockMutex(opaque->wakeup_mutex);
audio_cblk(userdata, buffer, copy_size);
if (opaque->need_flush) {
SDL_Android_AudioTrack_flush(env, atrack);
opaque->need_flush = false;
}
if (opaque->need_flush) {
opaque->need_flush = 0;
SDL_Android_AudioTrack_flush(env, atrack);
} else {
int written = SDL_Android_AudioTrack_write(env, atrack, buffer, copy_size);
if (written != copy_size) {
ALOGW("AudioTrack: not all data copied %d/%d", (int)written, (int)copy_size);
}
}
// TODO: 1 if callback return -1 or 0
}
}
SDL_Android_AudioTrack_set_xxx(),主要是设置播放器相关的配置,比如播放速度,声音大小等。
我们来看一下audio_cblk(),在stream_component_open()的里面调用的audio_open(),会有这么一句代码:
wanted_spec.callback = sdl_audio_callback;
上面aout_thread_n()里调用的audio_cblk(),实际上就是调用的opaque->spec.callback,其实就是调用到sdl_audio_callback()这个函数来了。
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
{
//...
while (len > 0) {
if (is->audio_buf_index >= is->audio_buf_size) {
audio_size = audio_decode_frame(is);
if (audio_size < 0) {
/* 发生错误,就输出silence */
//...
} else {
if (is->show_mode != SHOW_MODE_VIDEO)
update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
is->audio_buf_size = audio_size;
}
is->audio_buf_index = 0;
}
len1 = is->audio_buf_size - is->audio_buf_index;
if (len1 > len)
len1 = len;
if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
else {
//...
}
len -= len1;
stream += len1;
is->audio_buf_index += len1;
}
is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
/* Let's assume the audio driver that is used by SDL has two periods. */
//...
}
其中重要代码是:
audio_size = audio_decode_frame(ffp);
memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
audio_decode_frame()
/**
* Decode one audio frame and return its uncompressed size.
*
* The processed audio frame is decoded, converted if required, and
* stored in is->audio_buf, with size in bytes given by the return
* value.
*/
static int audio_decode_frame(FFPlayer *ffp){
af = frame_queue_peek_readable(&is->sampq)
is->audio_buf = af->frame->data[0];
}
这里主要是判断解码后的is->sampq是否为空(解码时放入is->sampq判断是否满),如果为空就阻塞(解码的时候,每向is->sampq放入一frame,就唤醒线程),不为空就返回队列的第一个frame,然后赋值给ffp->is->audio_buf。
接着把刚刚赋值的ffp->is->audio_buf copy到stream中,流的另一头在哪里?
再返回到aout_thread_n()中:
SDL_Android_AudioTrack_write(env, atrack, buffer, copy_size);
这里的buffer就是刚刚的stream,该函数调用:
(*env)->SetByteArrayRegion(env, atrack->byte_buffer, 0, (int)size_in_byte, (jbyte*) data);
J4AC_AudioTrack__write(env, atrack->thiz, atrack->byte_buffer, 0, (int)size_in_byte);
这里先是把buffer(data)拷贝到数组中,后面会把这个数组也就是音频帧传递给java。
J4AC_AudioTrack__write()中继续跟踪会发现:
jint J4AC_android_media_AudioTrack__write(JNIEnv *env, jobject thiz, jbyteArray audioData, jint offsetInBytes, jint sizeInBytes)
{
return (*env)->CallIntMethod(env, thiz, class_J4AC_android_media_AudioTrack.method_write, audioData, offsetInBytes, sizeInBytes);
}
这里又调用到java里面去了,这里调用了java层的AudioTrack.java中的write()函数。