AudioFlinger::openOutput

接上文

本文参考这篇文章结合目前的代码整理openoutput的调用流程。

1 openOutput作用及主要调用函数

AudioFlinger的openOutput是真正实现打开输出设备(模块)的地方,其中有三个步骤:

  1. 加载音频硬件设备(audio.primary.XXXX.so)
    硬件设备输出方法初始化(选择恰当的输出函数)
  2. 打开输出音频流outputstream
  3. 创建MixerThread

最终的目的就是创建线程、打开线程环(即运行线程)并返回线程ID。

2 加载音频硬件设备:findSuitableHwDev_l

函数调用flow

openOutput_l
->	|- findSuitableHwDev_l
		|- loadHwModule_l
			|- strncmp
			|- openDevice
				|- load_audio_interface
					|- hw_get_module_by_class
						|- load
					|- audio_hw_device_open
						|- open
					|- audio_hw_device_close
			|- nextUniqueId
			|- add
		|- getSupportedDevices
	|- openOutputStream

findSuitableHwDev_l在AudioFlinger::openOutput_l中被调用
返回值是AudioHwDevice 类型的指针

sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
                                                            audio_io_handle_t *output,
                                                            audio_config_t *config,
                                                            audio_devices_t devices,
                                                            const String8& address,
                                                            audio_output_flags_t flags)
{
    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);
}

findSuitableHwDev_l传入module和device,
loadHwModule_l()加载module
getSupportedDevices()获取audioHwDevice

audioHwDevice就是findSuitableHwDev_l要返回的变量

//@Audio.h
typedef int audio_module_handle_t;
typedef uint32_t audio_devices_t;



//@AudioFlinger.cpp
AudioHwDevice* AudioFlinger::findSuitableHwDev_l(
        audio_module_handle_t module,
        audio_devices_t devices)
{
    // if module is 0, the request comes from an old policy manager and we should load
    // well known modules
    if (module == 0) {
        ALOGW("findSuitableHwDev_l() loading well know audio hw modules");
        
        //加载所有的module
        for (size_t i = 0; i < arraysize(audio_interfaces); i++) {
            loadHwModule_l(audio_interfaces[i]);
        }
        
        // then try to find a module supporting the requested device.
        //查找module下的设备
        for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
            AudioHwDevice *audioHwDevice = mAudioHwDevs.valueAt(i);
            sp<DeviceHalInterface> dev = audioHwDevice->hwDevice();
            uint32_t supportedDevices;

			//注意:这里的dev是强指针,要和传入的devices进行判断,是否为同一个设备,是才能返回
            if (dev->getSupportedDevices(&supportedDevices) == OK &&
                    (supportedDevices & devices) == devices) {
                return audioHwDevice;
                //查到了合适的设备,返回
            }
        }
    } else {
        // check a match for the requested module handle
        AudioHwDevice *audioHwDevice = mAudioHwDevs.valueFor(module);
        if (audioHwDevice != NULL) {
            return audioHwDevice;
        }
    }

    return NULL;
}

入参(点击变量调到该变量的flow)
audio_module_handle_t module
audio_devices_t devices

参数audio_interfaces[],定义了几个需要加载的设备
当然,并不是所有定义的都能加载成功,这取决于方案厂商实现了几个设备模块

audio_interfaces数组是人为定义,和XML文件中的module的关系需要后续解决

//宏定义各个模组ID
#define AUDIO_HARDWARE_MODULE_ID_PRIMARY "primary"
#define AUDIO_HARDWARE_MODULE_ID_A2DP "a2dp"
#define AUDIO_HARDWARE_MODULE_ID_USB "usb"
#define AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX "r_submix"
#define AUDIO_HARDWARE_MODULE_ID_CODEC_OFFLOAD "codec_offload"
#define AUDIO_HARDWARE_MODULE_ID_STUB "stub"

static const char * const audio_interfaces[] = {
    AUDIO_HARDWARE_MODULE_ID_PRIMARY,
    AUDIO_HARDWARE_MODULE_ID_A2DP,
    AUDIO_HARDWARE_MODULE_ID_USB,
};

AudioFlinger::loadHwModule_l(加载)

@AudioFlinger.cpp

openOutput_l
	|- findSuitableHwDev_l
	->	|- loadHwModule_l
			|- strncmp
			|- openDevice
				|- load_audio_interface
					|- hw_get_module_by_class
						|- load
					|- audio_hw_device_open
						|- open
					|- audio_hw_device_close
			|- nextUniqueId
			|- add
		|- getSupportedDevices
	|- openOutputStream

函数功能介绍
loadHwModule_l
|- strncmp //检查是否已经加载过mAudioHwDevs
|- openDevice //根据名字获取AudioHwDev,并打开
|- nextUniqueId //创建AudioHwDev对应的ID,确保唯一性
|- add //将AudioHwDev和handle(module)匹配成键值对存入表中

// loadHwModule_l() must be called with AudioFlinger::mLock held
audio_module_handle_t AudioFlinger::loadHwModule_l(const char *name)
{
	//遍历mAudioHwDevs,检查是否已经加载过
    for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
        if (strncmp(mAudioHwDevs.valueAt(i)->moduleName(), name, strlen(name)) == 0) {
            ALOGW("loadHwModule() module %s already loaded", name);
            return mAudioHwDevs.keyAt(i);
        }
    }

	//这里创建一个强指针,只是用于判断设备
    sp<DeviceHalInterface> dev;

	//根据名字加载并打开设备,dev是定义的强指针,用于存放opendevice函数内获取的设备
    int rc = mDevicesFactoryHal->openDevice(name, &dev);
    if (rc) {
        ALOGE("loadHwModule() error %d loading module %s", rc, name);
        return AUDIO_MODULE_HANDLE_NONE;
    }

    mHardwareStatus = AUDIO_HW_INIT;
    rc = dev->initCheck();
    mHardwareStatus = AUDIO_HW_IDLE;
    if (rc) {
        ALOGE("loadHwModule() init check error %d for module %s", rc, name);
        return AUDIO_MODULE_HANDLE_NONE;
    }

    // Check and cache this HAL's level of support for master mute and master
    // volume.  If this is the first HAL opened, and it supports the get
    // methods, use the initial values provided by the HAL as the current
    // master mute and volume settings.

    AudioHwDevice::Flags flags = static_cast<AudioHwDevice::Flags>(0);
    {  // scope for auto-lock pattern
        AutoMutex lock(mHardwareLock);

        if (0 == mAudioHwDevs.size()) {
            mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
            float mv;
            if (OK == dev->getMasterVolume(&mv)) {
                mMasterVolume = mv;
            }

            mHardwareStatus = AUDIO_HW_GET_MASTER_MUTE;
            bool mm;
            if (OK == dev->getMasterMute(&mm)) {
                mMasterMute = mm;
            }
        }

        mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
        if (OK == dev->setMasterVolume(mMasterVolume)) {
            flags = static_cast<AudioHwDevice::Flags>(flags |
                    AudioHwDevice::AHWD_CAN_SET_MASTER_VOLUME);
        }

        mHardwareStatus = AUDIO_HW_SET_MASTER_MUTE;
        if (OK == dev->setMasterMute(mMasterMute)) {
            flags = static_cast<AudioHwDevice::Flags>(flags |
                    AudioHwDevice::AHWD_CAN_SET_MASTER_MUTE);
        }

        mHardwareStatus = AUDIO_HW_IDLE;
    }

    audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);

	//handle和AudioHwDevice是一对键值对,传入mAudioHwDevs列表里,通过audio_module_handle_t类型变量handle,可以获得硬件设备
	//把这个设备加入设备数组中进行维护
    mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));

    ALOGI("loadHwModule() Loaded %s audio interface, handle %d", name, handle);

    return handle;
}

入参是audio_interfaces[i],为module的名字,判断是否加载过该硬件模块。

openDevice

@DevicesFactoryHalLocal.cpp

openOutput_l
	|- findSuitableHwDev_l
		|- loadHwModule_l
			|- strncmp
		->	|- openDevice
		->		|- load_audio_interface
					|- hw_get_module_by_class
						|- load
					|- audio_hw_device_open
						|- open
					|- audio_hw_device_close
			|- nextUniqueId
			|- add
		|- getSupportedDevices
	|- openOutputStream
status_t DevicesFactoryHalLocal::openDevice(const char *name, sp<DeviceHalInterface> *device) {
    audio_hw_device_t *dev;
    status_t rc = load_audio_interface(name, &dev);
    if (rc == OK) {
        *device = new DeviceHalLocal(dev);
    }
    return rc;
}
load_audio_interface

@DevicesFactoryHalLocal.cpp

static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
{
    const hw_module_t *mod;
    int rc;

    rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
    if (rc) {
        ALOGE("%s couldn't load audio hw module %s.%s (%s)", __func__,
                AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
        goto out;
    }
    rc = audio_hw_device_open(mod, dev);
    if (rc) {
        ALOGE("%s couldn't open audio hw device in %s.%s (%s)", __func__,
                AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
        goto out;
    }
    if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
        ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
        rc = BAD_VALUE;
        audio_hw_device_close(*dev);
        goto out;
    }
    return OK;

out:
    *dev = NULL;
    return rc;
}
hw_get_module_by_class&audio_hw_device_open

@hardware.c

openOutput_l
	|- findSuitableHwDev_l
		|- loadHwModule_l
			|- strncmp
			|- openDevice
				|- load_audio_interface
				->	|- hw_get_module_by_class	//获取module
				->		|- load
				->	|- audio_hw_device_open		//打开设备
						|- open
					|- audio_hw_device_close
			|- nextUniqueId
			|- add
		|- getSupportedDevices
	|- openOutputStream

获取module之后打开设备

//@hardware.c
//把设备相关的lib文件加载进来,并且加载lib文件内的相关函数
int hw_get_module_by_class(const char *class_id, const char *inst,
                           const struct hw_module_t **module)
{	
	//······
	
	//加载module
    return load(class_id, path, module);
}


/**
 * Load the file defined by the variant and if successful
 * return the dlopen handle and the hmi.
 * @return 0 = success, !0 = failure.
 */
//@hardware.c
static int load(const char *id,
       			const char *path,
       			const struct hw_module_t **pHmi)
{
    if (strncmp(path, "/system/", 8) == 0) {
        /* If the library is in system partition, no need to check sphal namespace. Open it with dlopen.*/
        
        //打开库
        handle = dlopen(path, RTLD_NOW);
    } else {
        handle = android_load_sphal_library(path, RTLD_NOW);
    }

	//获取库中的变量或者函数
    hmi = (struct hw_module_t *)dlsym(handle, sym);

	//将函数句柄存入module结构中
    hmi->dso = handle;
}



//打开设备
static inline int audio_hw_device_open(const struct hw_module_t* module,
                                       struct audio_hw_device** device)
{
    return module->methods->open(module, AUDIO_HARDWARE_INTERFACE,
                                 TO_HW_DEVICE_T_OPEN(device));
}


//@audio_hal.c
static struct hw_module_methods_t hal_module_methods = {
    .open = adev_open,
};



//初始化module的操作函数,
//@audio_hw.c(注意路径,modules/audio/下的,还有其他路径下的我没仔细看。其他文件中也有,注意区分)
static int adev_open(const hw_module_t* module, const char* name,
                     hw_device_t** device)
{
    ALOGV("adev_open: %s", name);

    struct stub_audio_device *adev;

    if (strcmp(name, AUDIO_HARDWARE_INTERFACE) != 0)
        return -EINVAL;

    adev = calloc(1, sizeof(struct stub_audio_device));
    if (!adev)
        return -ENOMEM;

    adev->device.common.tag = HARDWARE_DEVICE_TAG;
    adev->device.common.version = AUDIO_DEVICE_API_VERSION_2_0;
    adev->device.common.module = (struct hw_module_t *) module;
    adev->device.common.close = adev_close;

    adev->device.init_check = adev_init_check;
    adev->device.set_voice_volume = adev_set_voice_volume;
    adev->device.set_master_volume = adev_set_master_volume;
    adev->device.get_master_volume = adev_get_master_volume;
    adev->device.set_master_mute = adev_set_master_mute;
    adev->device.get_master_mute = adev_get_master_mute;
    adev->device.set_mode = adev_set_mode;
    adev->device.set_mic_mute = adev_set_mic_mute;
    adev->device.get_mic_mute = adev_get_mic_mute;
    adev->device.set_parameters = adev_set_parameters;
    adev->device.get_parameters = adev_get_parameters;
    adev->device.get_input_buffer_size = adev_get_input_buffer_size;
    adev->device.open_output_stream = adev_open_output_stream;
    adev->device.close_output_stream = adev_close_output_stream;
    adev->device.open_input_stream = adev_open_input_stream;
    adev->device.close_input_stream = adev_close_input_stream;
    adev->device.dump = adev_dump;

    *device = &adev->device.common;

    return 0;
}

audio_hw_device_open会调用刚刚加载的lib中的adev_open函数,adev_open函数的职责是把后续所需要的功能函数赋值到某个结构体中进行维护,以便后续调用

总结下,findSuitableHwDev_l()中加载module,并初始化其操作函数。

加载module这一块内容较多,大致理解为
1、查找已有的mAudioHwDevs(键值对)表格,是否存在module,如果已经存在就直接返回该AudioHwDev
2、没有的话就继续执行,打开设备opendevice,简单来讲就是
a. 打开库,找到hal_module_info的地址
b. 执行open函数对该module的操作函数进行初始化
3、添加该AudioHwDev到表中

至于如何操作该module在后面

getSupportedDevices

openOutput_l
	|- findSuitableHwDev_l
		|- loadHwModule_l
			|- strncmp
			|- openDevice
				|- load_audio_interface
					|- hw_get_module_by_class
						|- load
					|- audio_hw_device_open
					|- audio_hw_device_close
			|- nextUniqueId
			|- add
	->	|- getSupportedDevices
	|- openOutputStream

查找合适的设备,先遍历表中,找到audioHwDevice,然后查看它所包含的DeviceHalInterface,设备hal接口,这里有点头大,很多设备类型。

//@DeviceHalLocal.cpp
status_t DeviceHalLocal::getSupportedDevices(uint32_t *devices) {
    if (mDev->get_supported_devices == NULL) return INVALID_OPERATION;
    *devices = mDev->get_supported_devices(mDev);
    return OK;
}

3 打开输出音频流:outputstream

openOutput_l
	|-findSuitableHwDev_l
	|	|- loadHwModule_l
	|	|	|- strncmp
	|	|	|- openDevice
	|	|	|	|- load_audio_interface
	|	|	|		|- hw_get_module_by_class
	|	|	|			|- load
	|	|	|		|- audio_hw_device_open
	|	|	|		|- audio_hw_device_close
	|	|	|- nextUniqueId
	|	|	|- add
	|	|- getSupportedDevices
->	|- openOutputStream
		|- new AudioStreamOut
	->	|- open
		->	|- openOutputStream
				|- adev_open_output_stream

打开输出流

//@AudioHwDevice.cpp
//打开输出流
status_t AudioHwDevice::openOutputStream(
        AudioStreamOut **ppStreamOut,
        audio_io_handle_t handle,
        audio_devices_t devices,
        audio_output_flags_t flags,
        struct audio_config *config,
        const char *address)
{

    struct audio_config originalConfig = *config;

	//创建AudioStreamOut音频输出流
    AudioStreamOut *outputStream = new AudioStreamOut(this, flags);

    // Try to open the HAL first using the current format.
    ALOGV("openOutputStream(), try "
            " sampleRate %d, Format %#x, "
            "channelMask %#x",
            config->sample_rate,
            config->format,
            config->channel_mask);
    status_t status = outputStream->open(handle, devices, config, address);

    if (status != NO_ERROR) {
        delete outputStream;
        outputStream = NULL;

        // FIXME Look at any modification to the config.
        // The HAL might modify the config to suggest a wrapped format.
        // Log this so we can see what the HALs are doing.
        ALOGI("openOutputStream(), HAL returned"
            " sampleRate %d, Format %#x, "
            "channelMask %#x, status %d",
            config->sample_rate,
            config->format,
            config->channel_mask,
            status);

        // If the data is encoded then try again using wrapped PCM.
        bool wrapperNeeded = !audio_has_proportional_frames(originalConfig.format)
                && ((flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0)
                && ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0);

        if (wrapperNeeded) {
            if (SPDIFEncoder::isFormatSupported(originalConfig.format)) {
                outputStream = new SpdifStreamOut(this, flags, originalConfig.format);
                status = outputStream->open(handle, devices, &originalConfig, address);
                if (status != NO_ERROR) {
                    ALOGE("ERROR - openOutputStream(), SPDIF open returned %d",
                        status);
                    delete outputStream;
                    outputStream = NULL;
                }
            } else {
                ALOGE("ERROR - openOutputStream(), SPDIFEncoder does not support format 0x%08x",
                    originalConfig.format);
            }
        }
    }

	//看到了,创建好的outputStream赋给传进来的参数ppStreamOut,往外跳看下,是在audioflinger中定义的outputStream,记住这个变量,后面创建线程会用到。
    *ppStreamOut = outputStream;
    return status;
}



//@AudioStreamOut.cpp
status_t AudioStreamOut::open(
        audio_io_handle_t handle,
        audio_devices_t devices,
        struct audio_config *config,
        const char *address)
{
    sp<StreamOutHalInterface> outStream;

    audio_output_flags_t customFlags = (config->format == AUDIO_FORMAT_IEC61937)
                ? (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO)
                : flags;

	//这里调到DeviceHalInterface类对象的执行函数
    int status = hwDev()->openOutputStream(
            handle,
            devices,
            customFlags,
            config,
            address,
            &outStream);
    ALOGV("AudioStreamOut::open(), HAL returned "
            " stream %p, sampleRate %d, Format %#x, "
            "channelMask %#x, status %d",
            outStream.get(),
            config->sample_rate,
            config->format,
            config->channel_mask,
            status);

    // Some HALs may not recognize AUDIO_FORMAT_IEC61937. But if we declare
    // it as PCM then it will probably work.
    if (status != NO_ERROR && config->format == AUDIO_FORMAT_IEC61937) {
        struct audio_config customConfig = *config;
        customConfig.format = AUDIO_FORMAT_PCM_16_BIT;

        status = hwDev()->openOutputStream(
                handle,
                devices,
                customFlags,
                &customConfig,
                address,
                &outStream);
        ALOGV("AudioStreamOut::open(), treat IEC61937 as PCM, status = %d", status);
    }

    if (status == NO_ERROR) {
        stream = outStream;
        status = stream->getFrameSize(&mHalFrameSize);
    }

    return status;
}




//@audio.h
typedef struct audio_hw_device audio_hw_device_t;

//@DeviceHalLocal.h
audio_hw_device_t *mDev;

//@DeviceHalLocal.cpp
status_t DeviceHalLocal::openOutputStream(
        audio_io_handle_t handle,
        audio_devices_t devices,
        audio_output_flags_t flags,
        struct audio_config *config,
        const char *address,
        sp<StreamOutHalInterface> *outStream) {
    audio_stream_out_t *halStream;
    ALOGV("open_output_stream handle: %d devices: %x flags: %#x"
            "srate: %d format %#x channels %x address %s",
            handle, devices, flags,
            config->sample_rate, config->format, config->channel_mask,
            address);

	//这里就执行到具体设备了
    int openResut = mDev->open_output_stream(
            mDev, handle, devices, flags, config, &halStream, address);
    if (openResut == OK) {
		//这里创建了outstream,后面有用到
        *outStream = new StreamOutHalLocal(halStream, this);
    }
    ALOGV("open_output_stream status %d stream %p", openResut, halStream);
    return openResut;
}

在audio.h文件中定义了audio_hw_device类,该类中有open_output_stream操作。
回到上面的adev_open,里面有初始化

//@audio_hw.c
static int adev_open(const hw_module_t* module, const char* name,
                     hw_device_t** device)
{
    adev->device.open_output_stream = adev_open_output_stream;
}

刚刚加载lib中的open_output_stream操作,即adev_open_output_stream,目的是选择合适的Audio输出方法

openOutputStream
	|- new AudioStreamOut
	|- open
		|- openOutputStream
		->	|- adev_open_output_stream
//@audio_hw.c
//初始化输出流的操作
static int adev_open_output_stream(struct audio_hw_device *dev,
                                   audio_io_handle_t handle,
                                   audio_devices_t devices,
                                   audio_output_flags_t flags,
                                   struct audio_config *config,
                                   struct audio_stream_out **stream_out,
                                   const char *address __unused)
{
    ALOGV("adev_open_output_stream...");

    *stream_out = NULL;
    struct stub_stream_out *out =
            (struct stub_stream_out *)calloc(1, sizeof(struct stub_stream_out));
    if (!out)
        return -ENOMEM;

    out->stream.common.get_sample_rate = out_get_sample_rate;
    out->stream.common.set_sample_rate = out_set_sample_rate;
    out->stream.common.get_buffer_size = out_get_buffer_size;
    out->stream.common.get_channels = out_get_channels;
    out->stream.common.get_format = out_get_format;
    out->stream.common.set_format = out_set_format;
    out->stream.common.standby = out_standby;
    out->stream.common.dump = out_dump;
    out->stream.common.set_parameters = out_set_parameters;
    out->stream.common.get_parameters = out_get_parameters;
    out->stream.common.add_audio_effect = out_add_audio_effect;
    out->stream.common.remove_audio_effect = out_remove_audio_effect;
    out->stream.get_latency = out_get_latency;
    out->stream.set_volume = out_set_volume;
    out->stream.write = out_write;
    out->stream.get_render_position = out_get_render_position;
    out->stream.get_next_write_timestamp = out_get_next_write_timestamp;

    *stream_out = &out->stream;
    return 0;
}

这里需要注意,该函数初始化了流的输出函数,之后调用stream.write 就是调用 out_write()函数,这个在threadloop中会用到。

到此为止,outputstream,输出流就初始化好了,到后面就是创建播放线程了,创建播放线程的时候也用到了音频输出流,我们看下创建流程。

4 创建线程:new MixerThread

openOutput_l
	|- findSuitableHwDev_l
	|- openOutputStream
->	|- new MixerThread
	|- mPlaybackThreads.add(*output, thread)

这里以MixerThread为例。

sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
                                                            audio_io_handle_t *output,
                                                            audio_config_t *config,
                                                            audio_devices_t devices,
                                                            const String8& address,
                                                            audio_output_flags_t flags)

{
	thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
}

分析下参数,
this:就是这个audioflinger(推测)
outputStream:就是刚刚创建好的,传递出来的。
output:从上面传递过来的,先知道他的类型是audio_io_handle_t,应该是个线程句柄,这个等会再看
devices:也是传进来的
mSystemReady:audioflinger类本身定义的bool型参数,推测是用它进行判断线程的准备状态

mixerthread初始化

我们进threads.cpp中看下mixerthread的创建。

//@threads.cpp
AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
        audio_io_handle_t id, audio_devices_t device, bool systemReady, type_t type)
    :   PlaybackThread(audioFlinger, output, id, device, type, systemReady),
        // mAudioMixer below
        // mFastMixer below
        mFastMixerFutex(0),
        mMasterMono(false)
        // mOutputSink below
        // mPipeSink below
        // mNormalSink below
{
	//创建Mixer
    mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);

    // create an NBAIO sink for the HAL output stream, and negotiate
	mOutputSink = new AudioStreamOutSink(output->stream);

	//创建FastMixer
	mFastMixer = new FastMixer();

}

注意这里的output类型是AudioStreamOut指针。
output->stream就是之前DeviceHalLocal::openOutputStream中创建的StreamOutHalLocal变量。好绕啊。
依据这个变量有创建了AudioStreamOutSink类型对象。

再往下就是Fastmixer的部分了,这里涉及到很多框架和概念,先放一放。

总结

最后总结一下,openOutput函数的作用就是获取output。
获取output包含了三大内容

  1. 通过module和device查找到硬件设备HwDev,并打开设备,初始化设备操作
  2. 创建输出流
  3. 创建播放线程(四种)

创建完播放线程后,结合系统生成的唯一ID添加到线程表里。并且创建完成的时候就运行了该线程的线程环。

线程环里的内容才是具体实现数据处理的过程,这个后面再总结。

问题及解决

目前两个问题
Q1:device哪来的
A1:getDeviceForStrategy函数获取device
Q2:output哪来的
A2:nextUniqueId自动创建的唯一线程ID
Q3:module哪来的
A3:getModuleHandle从文件获取module

Q1:device获取流程

device

//@AudioPolicyManager.cpp
status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
                                              audio_io_handle_t *output,
                                              audio_session_t session,
                                              audio_stream_type_t *stream,
                                              uid_t uid,
                                              const audio_config_t *config,
                                              audio_output_flags_t flags,
                                              audio_port_handle_t *selectedDeviceId,
                                              audio_port_handle_t *portId)
{
	//getDeviceForStrategy函数获取device
    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);

	//device传入getOutputForDevice函数
    *output = getOutputForDevice(device, session, *stream,
                                 config->sample_rate, config->format, config->channel_mask,
                                 flags, &config->offload_info);
}

//@AudioPolicyManager.cpp
audio_io_handle_t AudioPolicyManager::getOutputForDevice(
        audio_devices_t device,
        audio_session_t session,
        audio_stream_type_t stream,
        uint32_t samplingRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        audio_output_flags_t flags,
        const audio_offload_info_t *offloadInfo)
{
        outputDesc->mDevice = device;
        
        status = mpClientInterface->openOutput(profile->getModuleHandle(),
                                               &output,
                                               &config,
                                               &outputDesc->mDevice,
                                               address,
                                               &outputDesc->mLatency,
                                               outputDesc->mFlags);
}


//@AudioPolicyClientImpl.cpp
status_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
                                                           audio_io_handle_t *output,
                                                           audio_config_t *config,
                                                           audio_devices_t *devices,
                                                           const String8& address,
                                                           uint32_t *latencyMs,
                                                           audio_output_flags_t flags)
{
    return af->openOutput(module, output, config, devices, address, latencyMs, flags);

}


//@AudioFlinger.cpp
status_t AudioFlinger::openOutput(audio_module_handle_t module,
                                  audio_io_handle_t *output,
                                  audio_config_t *config,
                                  audio_devices_t *devices,
                                  const String8& address,
                                  uint32_t *latencyMs,
                                  audio_output_flags_t flags)
{
    sp<ThreadBase> thread = openOutput_l(module, output, config, *devices, address, flags);
}


sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
                                                            audio_io_handle_t *output,
                                                            audio_config_t *config,
                                                            audio_devices_t devices,
                                                            const String8& address,
                                                            audio_output_flags_t flags)
{
	//这里传进了device,用于查找适合的AudioHwDevice
    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);

	status_t status = outHwDev->openOutputStream(
            &outputStream,
            *output,
            devices,
            flags,
            config,
            address.string());

	thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);

}

总结下,device的获取是在getOutputForAttr函数中,通过getDeviceForStrategy 函数返回device。device是从Strategy策略中获取的,而策略是从XML文件中解析出来的(待验证)。
注意这里的devices和findSuitableHwDev_l下的device不是同一个,findSuitableHwDev_l的入参devices是用于判断的,而findSuitableHwDev_l其下的函数内创建的dev是用于打开设备的。需要判断其是否相等。

Strategy ->  device -> output

Q2:output获取流程

output

//@AudioPolicyManager.cpp
audio_io_handle_t AudioPolicyManager::getOutputForDevice(
        audio_devices_t device,
        audio_session_t session,
        audio_stream_type_t stream,
        uint32_t samplingRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        audio_output_flags_t flags,
        const audio_offload_info_t *offloadInfo)
{
	//初始化output
    audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
	
	//这里传入了刚刚创建的output的地址,用于存储output值
	status = mpClientInterface->openOutput(profile->getModuleHandle(),
                                               &output,
                                               &config,
                                               &outputDesc->mDevice,
                                               address,
                                               &outputDesc->mLatency,
                                               outputDesc->mFlags);
}

//@AudioFlinger.cpp
status_t AudioFlinger::openOutput(audio_module_handle_t module,
                                  audio_io_handle_t *output,
                                  audio_config_t *config,
                                  audio_devices_t *devices,
                                  const String8& address,
                                  uint32_t *latencyMs,
                                  audio_output_flags_t flags)
{
    sp<ThreadBase> thread = openOutput_l(module, output, config, *devices, address, flags);
}



sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
                                                            audio_io_handle_t *output,
                                                            audio_config_t *config,
                                                            audio_devices_t devices,
                                                            const String8& address,
                                                            audio_output_flags_t flags)
{
	//这里传进了device,用于查找适合的AudioHwDevice
    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);

	//这里分配一个ID给output,此ID为系统自动创建的,是唯一的
    if (*output == AUDIO_IO_HANDLE_NONE) {
        *output = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT);
    } else {
        // Audio Policy does not currently request a specific output handle.
        // If this is ever needed, see openInput_l() for example code.
        ALOGE("openOutput_l requested output handle %d is not AUDIO_IO_HANDLE_NONE", *output);
        return 0;
    }


	status_t status = outHwDev->openOutputStream(
            &outputStream,
            *output,
            devices,
            flags,
            config,
            address.string());

	thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);

	//这里的output为线程唯一ID,thread是创建的线程
	mPlaybackThreads.add(*output, thread);
}

总结下,audio_io_handle_t类型的output变量是系统通过 nextUniqueId() 函数创建的线程ID号,和thread共同组成键值对,获取到了output就可以访问该线程。

Q3:module的获取流程

module

//@AudioPolicyManager.cpp
audio_io_handle_t AudioPolicyManager::getOutputForDevice(
        audio_devices_t device,
        audio_session_t session,
        audio_stream_type_t stream,
        uint32_t samplingRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        audio_output_flags_t flags,
        const audio_offload_info_t *offloadInfo)
{
        outputDesc->mDevice = device;
        
        status = mpClientInterface->openOutput(profile->getModuleHandle(),
                                               &output,
                                               &config,
                                               &outputDesc->mDevice,
                                               address,
                                               &outputDesc->mLatency,
                                               outputDesc->mFlags);
}


//@AudioPolicyClientImpl.cpp
status_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
                                                           audio_io_handle_t *output,
                                                           audio_config_t *config,
                                                           audio_devices_t *devices,
                                                           const String8& address,
                                                           uint32_t *latencyMs,
                                                           audio_output_flags_t flags)
{
    return af->openOutput(module, output, config, devices, address, latencyMs, flags);
}


//@AudioFlinger.cpp
status_t AudioFlinger::openOutput(audio_module_handle_t module,
                                  audio_io_handle_t *output,
                                  audio_config_t *config,
                                  audio_devices_t *devices,
                                  const String8& address,
                                  uint32_t *latencyMs,
                                  audio_output_flags_t flags)
{
    sp<ThreadBase> thread = openOutput_l(module, output, config, *devices, address, flags);
}


sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
                                                            audio_io_handle_t *output,
                                                            audio_config_t *config,
                                                            audio_devices_t devices,
                                                            const String8& address,
                                                            audio_output_flags_t flags)
{
	//这里传进了device,用于查找适合的AudioHwDevice
    AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);

	status_t status = outHwDev->openOutputStream(
            &outputStream,
            *output,
            devices,
            flags,
            config,
            address.string());

	thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
}

总结:
module是在AudioPolicyManager::getOutputForDevice中的profile->getModuleHandle() 函数赋值的,按照名字推测是从文件中获取的module句柄。
具体调用在AudioFlinger::openOutput_l中,如果module为0,则说明要从已知的策略管理器(old policy manager)中根据device重新加载该module,如果不为0,那就直接通过module获取到AudioHwDevice。
最终都是为了获取AudioHwDevice,不过一个是已经存在,直接匹配就行,一个是需要重新加载。

  • 6
    点赞
  • 24
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值