对于AT在应用层开始play,到AF打开输出通道的调用流程如下图示,代码分析就不赘述了,
AT:AudioTrack AF:AudioFlinger AS:AudioSystemAPII:AudioPolicyInterfaceImpl APS:AudioPolicyService APM:AudioPolicyManager
在openOutput之后,
经过艰难的跋涉,终于又回到了AF,在这里,port的类型有所不同,如下将端口类型分为三种,session、混音和物理设备。
/* Audio port type indicates if it is a session (e.g AudioTrack), * a mix (e.g PlaybackThread output) or a physical device * (e.g AUDIO_DEVICE_OUT_SPEAKER) */ typedef enum { AUDIO_PORT_TYPE_NONE, AUDIO_PORT_TYPE_DEVICE, AUDIO_PORT_TYPE_MIX, AUDIO_PORT_TYPE_SESSION, } audio_port_type_t; |
根据source和port的类型不同,有不同的策略,来打开不同的输出通道,对于source为AUDIO_PORT_TYPE_DEVICE的通道,如果source数目为2,则如下,为输出的mPlaybackThread线程和MIC输入的mRecordThread线程各打开一个通道。
status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *patch, audio_patch_handle_t *handle) { … switch (patch->sources[0].type) { case AUDIO_PORT_TYPE_DEVICE: { … if ((patch->num_sources == 2) || ((patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) && ((patch->sinks[0].ext.device.hw_module != srcModule) || (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0)))) { … newPatch->mPlaybackThread = audioflinger->openOutput_l( patch->sinks[0].ext.device.hw_module, &output, &config, device, address, AUDIO_OUTPUT_FLAG_NONE); … audio_io_handle_t input = AUDIO_IO_HANDLE_NONE; newPatch->mRecordThread = audioflinger->openInput_l(srcModule, &input, &config, device, address, AUDIO_SOURCE_MIC, AUDIO_INPUT_FLAG_NONE);
} else { if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) { sp<ThreadBase> thread = audioflinger->checkRecordThread_l( patch->sinks[0].ext.mix.handle); if (thread == 0) { ALOGW("createAudioPatch() bad capture I/O handle %d", patch->sinks[0].ext.mix.handle); status = BAD_VALUE; goto exit; } status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle); } else { if (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) { status = INVALID_OPERATION; goto exit; }
audio_hw_device_t *hwDevice = audioHwDevice->hwDevice(); status = hwDevice->create_audio_patch(hwDevice, patch->num_sources, patch->sources, patch->num_sinks, patch->sinks, &halHandle); } } |
create_audio_patch在HAL没有被定义,这个分支应该是走不到的。
我们以audioflinger->openOutput_l的分析为主吧。
sp<AudioFlinger::PlaybackThread> AudioFlinger::openOutput_l(audio_module_handle_t module, audio_io_handle_t *output, audio_config_t *config, audio_devices_t devices, const String8& address, audio_output_flags_t flags) { AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices); … AudioStreamOut *outputStream = NULL; status_t status = outHwDev->openOutputStream( &outputStream, *output, devices, flags, config, address.string());
mHardwareStatus = AUDIO_HW_IDLE;
if (status == NO_ERROR) {
PlaybackThread *thread; if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady); ALOGV("openOutput_l() created offload output: ID %d thread %p", *output, thread); } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) || !isValidPcmSinkFormat(config->format) || !isValidPcmSinkChannelMask(config->channel_mask)) { thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady); ALOGV("openOutput_l() created direct output: ID %d thread %p ", *output, thread); //Check if this is DirectPCM, if so if (flags & AUDIO_OUTPUT_FLAG_DIRECT_PCM) { thread->mIsDirectPcm = true; } } else { thread = new MixerThread(this, outputStream, *output, devices, mSystemReady); ALOGV("openOutput_l() created mixer output: ID %d thread %p", *output, thread); } mPlaybackThreads.add(*output, thread); return thread; }
return 0; } |
主要就是找到或加载之前提到的3类输入输出设备,说直白一点,就是加载对应的so库文件,便于后面调用其中的api接口;再接着就是调用设备的openOutputStream,字面意思是打开输出流,这里再说直白一点,就是将so库的api和现在framework的中间层管理起来,形成从上向下的一个调用流程;最后,根据flag类型,来决定使用哪一种线程处理音频数据,这一点在AF的线程分析里面另行分析。
对于AudioHwDevice的openOutputStream,逐层向下调用,最终会到平台厂商的hal层audio_hw.c,
status_t AudioHwDevice::openOutputStream( AudioStreamOut **ppStreamOut, audio_io_handle_t handle, audio_devices_t devices, audio_output_flags_t flags, struct audio_config *config, const char *address) {
struct audio_config originalConfig = *config; AudioStreamOut *outputStream = new AudioStreamOut(this, flags);
status_t status = outputStream->open(handle, devices, config, address); … *ppStreamOut = outputStream; return status; }
status_t AudioStreamOut::open( audio_io_handle_t handle, audio_devices_t devices, struct audio_config *config, const char *address) { audio_stream_out_t *outStream; int status = hwDev()->open_output_stream( hwDev(), handle, devices, customFlags, config, &outStream, address); … return status; } |
在audio_hw.c的adev_open_output_stream看似很复杂,实际也就是根据flag和device的类型,配置stream_out(定义在audio_hw.h文件里面)的成员变量,并将audio_stream_out类型指针赋值,作为后面操作HAL层的接口。
static int adev_open_output_stream(struct audio_hw_device *dev, audio_io_handle_t handle, audio_devices_t devices, audio_output_flags_t flags, struct audio_config *config, struct audio_stream_out **stream_out, const char *address __unused) { struct audio_device *adev = (struct audio_device *)dev; struct stream_out *out; int ret = 0; audio_format_t format; *stream_out = NULL; … out->channel_mask = AUDIO_CHANNEL_OUT_STEREO; out->supported_channel_masks[0] = AUDIO_CHANNEL_OUT_STEREO; out->handle = handle; out->bit_width = CODEC_BACKEND_DEFAULT_BIT_WIDTH; out->non_blocking = 0; … /* Init use case and pcm_config */ if ((out->flags & AUDIO_OUTPUT_FLAG_DIRECT) && !(out->flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD || (out->flags & AUDIO_OUTPUT_FLAG_DIRECT_PCM)) && (out->devices & AUDIO_DEVICE_OUT_AUX_DIGITAL || out->devices & AUDIO_DEVICE_OUT_PROXY)) { … out->stream.common.get_sample_rate = out_get_sample_rate; out->stream.common.set_sample_rate = out_set_sample_rate; out->stream.common.get_buffer_size = out_get_buffer_size; out->stream.common.get_channels = out_get_channels; out->stream.common.get_format = out_get_format; out->stream.common.set_format = out_set_format; … *stream_out = &out->stream;
} |