Android音频框架之二 用户录音启动流程源码走读

前言

此篇是对《Android音频框架之一 详解audioPolicy流程及HAL驱动加载》的延续,此系列博文是记录在Android7.1系统即以后版本实现
内录音功能。
当用户使用 AudioRecord 录音 API 时,

private void init() {
        int minBufferSize = AudioRecord.getMinBufferSize(kSampleRate, kChannelMode,
                kEncodeFormat);
        mRecord = new AudioRecord(MediaRecorder.AudioSource.REMOTE_SUBMIX,
                kSampleRate, kChannelMode, kEncodeFormat, minBufferSize * 2);  //>REMOTE_SUBMIX
        Log.d(TAG,"Create AudiRecord ...");
    }

Android系统的启动流程是什么呢?
本篇通过源码走读方式,来还原这个过程。

环境说明

源码:android7.1 版本,硬件平台 RK3288-box。

step 1:

用户在 app 中使用 new AudioRecord() API接口时,第一步 调用 LOCAL_MODULE:= libandroid_runtime 模块注册的 JNI 接口,
AudioRecord注册的接口如下:

@ frameworks/base/core/jni/android_media_AudioRecord.cpp
static const JNINativeMethod gMethods[] = {
    // name,               signature,  funcPtr
    {"native_start",         "(II)I",    (void *)android_media_AudioRecord_start},
    {"native_stop",          "()V",    (void *)android_media_AudioRecord_stop},
    {"native_setup",         "(Ljava/lang/Object;Ljava/lang/Object;[IIIII[ILjava/lang/String;J)I",
                                      (void *)android_media_AudioRecord_setup},      //> 设置 AudioRecord 参数
    {"native_finalize",      "()V",    (void *)android_media_AudioRecord_finalize},
    {"native_release",       "()V",    (void *)android_media_AudioRecord_release},
    {"native_read_in_byte_array",
                             "([BIIZ)I",
                                     (void *)android_media_AudioRecord_readInArray<jbyteArray>},
    {"native_read_in_short_array",
                             "([SIIZ)I",
                                     (void *)android_media_AudioRecord_readInArray<jshortArray>},
    {"native_read_in_float_array",
                             "([FIIZ)I",
                                     (void *)android_media_AudioRecord_readInArray<jfloatArray>},
    {"native_read_in_direct_buffer","(Ljava/lang/Object;IZ)I",
                                       (void *)android_media_AudioRecord_readInDirectBuffer},
    {"native_get_buffer_size_in_frames",
                             "()I", (void *)android_media_AudioRecord_get_buffer_size_in_frames},
    {"native_set_marker_pos","(I)I",   (void *)android_media_AudioRecord_set_marker_pos},
    {"native_get_marker_pos","()I",    (void *)android_media_AudioRecord_get_marker_pos},
    {"native_set_pos_update_period",
                             "(I)I",   (void *)android_media_AudioRecord_set_pos_update_period},
    {"native_get_pos_update_period",
                             "()I",    (void *)android_media_AudioRecord_get_pos_update_period},
    {"native_get_min_buff_size",
                             "(III)I",   (void *)android_media_AudioRecord_get_min_buff_size},
    {"native_setInputDevice", "(I)Z", (void *)android_media_AudioRecord_setInputDevice},
    {"native_getRoutedDeviceId", "()I", (void *)android_media_AudioRecord_getRoutedDeviceId},
    {"native_enableDeviceCallback", "()V", (void *)android_media_AudioRecord_enableDeviceCallback},
    {"native_disableDeviceCallback", "()V",
                                        (void *)android_media_AudioRecord_disableDeviceCallback},
    {"native_get_timestamp", "(Landroid/media/AudioTimestamp;I)I",
                                       (void *)android_media_AudioRecord_get_timestamp},
};

此函数源码如下:

@ frameworks/base/core/jni/android_media_AudioRecord.cpp
static jint
android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
        jobject jaa, jintArray jSampleRate, jint channelMask, jint channelIndexMask,
        jint audioFormat, jint buffSizeInBytes, jintArray jSession, jstring opPackageName,
        jlong nativeRecordInJavaObj)
{
	jclass clazz = env->GetObjectClass(thiz);

	if (nativeRecordInJavaObj == 0) { //> create AudioRecords instance and set parameter
		audio_format_t format = audioFormatToNative(audioFormat);

		lpRecorder = new AudioRecord(String16(opPackageNameStr.c_str()));     //> new AudioRecord() 见: step 2

		const status_t status = lpRecorder->set(paa->source,
					    sampleRateInHertz,
					    format,        // word length, PCM
					    localChanMask,
					    frameCount,
					    recorderCallback,// callback_t
					    lpCallbackData,// void* user
					    0,             // notificationFrames,
					    true,          // threadCanCallJava
					    sessionId,
					    AudioRecord::TRANSFER_DEFAULT,
					    flags,
					    -1, -1,        // default uid, pid
					    paa);
	} else {
		lpRecorder = (AudioRecord*)nativeRecordInJavaObj;
		lpCallbackData = new audiorecord_callback_cookie;
		lpCallbackData->audioRecord_class = (jclass)env->NewGlobalRef(clazz);
		// we use a weak reference so the AudioRecord object can be garbage collected.
		lpCallbackData->audioRecord_ref = env->NewGlobalRef(weak_this);
		lpCallbackData->busy = false;
	}


	setAudioRecord(env, thiz, lpRecorder);                   //> 调用 cpp 的 设置 audioRecord 接口函数

	env->SetLongField(thiz, javaAudioRecordFields.nativeCallbackCookie, (jlong)lpCallbackData);

	return (jint) AUDIO_JAVA_SUCCESS;
}

step 2:

下面的源码都是 C++ 的库源码内容。

@ frameworks/av/media/libmedia/AudioRecord.cpp
AudioRecord::AudioRecord(){
	 mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
            notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
            uid, pid, pAttributes);
}

setp 3:

设置 inputSource 参数,如果系统未建立 AudioRecordThread 线程,就创建此线程。

status_t AudioRecord::set(){

	if (cbf != NULL) {
	    mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);  // 创建线程
	    mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
	    // thread begins in paused state, and will not reference us until start()
	}

	status_t status = openRecord_l(0 /*epoch*/, mOpPackageName);               // 打开 record 见:step 4
	    ALOGD("Debug_dump_info: %s,%d set() runStatus:%d ", __func__, __LINE__, status);
	if (status != NO_ERROR) {
	    if (mAudioRecordThread != 0) {
	        mAudioRecordThread->requestExit();   // see comment in AudioRecord.h
	        mAudioRecordThread->requestExitAndWait();
	        mAudioRecordThread.clear();
	    }
	        return status;
	}

	AudioSystem::acquireAudioSessionId(mSessionId, -1);
	
	return NO_ERROR;
}

setp 4:

获取 inputSource 源参数和采样率后,根据参数调用 audioFlinger->openRecord() 函数,创建audioRecord实例。

status_t AudioRecord::openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName)
{
	const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();

	status = AudioSystem::getInputForAttr(&mAttributes, &input,              //> 获取输入源 参数
                                        mSessionId,
                                        // FIXME compare to AudioTrack
                                        mClientPid,
                                        mClientUid,
                                        mSampleRate, mFormat, mChannelMask,
                                        mFlags, mSelectedDeviceId);
    //> error happen there 
    status = AudioSystem::getSamplingRate(input, &afSampleRate);

    sp<IAudioRecord> record = audioFlinger->openRecord(input,
                                                       mSampleRate,
                                                       mFormat,
                                                       mChannelMask,
                                                       opPackageName,
                                                       &temp,
                                                       &flags,
                                                       mClientPid,
                                                       tid,
                                                       mClientUid,
                                                       &mSessionId,
                                                       &notificationFrames,
                                                       iMem,
                                                       bufferMem,
                                                       &status);
    
    // update proxy
    mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mFrameSize);
    mProxy->setEpoch(epoch);
    mProxy->setMinimum(mNotificationFramesAct);

    mDeathNotifier = new DeathNotifier(this);
    IInterface::asBinder(mAudioRecord)->linkToDeath(mDeathNotifier, this);

    return NO_ERROR;
}

setp 5:

函数 getInputForAttr() 根据 用户APP中 new AudioRecord() 传递过来参数,检索 audio_policy_configuration.xml 文件;

@ frameworks/av/media/libmedia/AudioSystems.cpp

status_t AudioSystem::getInputForAttr(const audio_attributes_t *attr,
                                audio_io_handle_t *input,
                                audio_session_t session,
                                pid_t pid,
                                uid_t uid,
                                uint32_t samplingRate,
                                audio_format_t format,
                                audio_channel_mask_t channelMask,
                                audio_input_flags_t flags,
                                audio_port_handle_t selectedDeviceId)
{
    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
    if (aps == 0) return NO_INIT;
    return aps->getInputForAttr(
            attr, input, session, pid, uid,
            samplingRate, format, channelMask, flags, selectedDeviceId);         //> 调用源码 见:step 6部分
}

// establish binder interface to AudioPolicy service
const sp<IAudioPolicyService> AudioSystem::get_audio_policy_service()
{
    sp<IAudioPolicyService> ap;
    sp<AudioPolicyServiceClient> apc;
    {
        Mutex::Autolock _l(gLockAPS);
        if (gAudioPolicyService == 0) {
            sp<IServiceManager> sm = defaultServiceManager();
            sp<IBinder> binder;
            do {
                binder = sm->getService(String16("media.audio_policy"));
                if (binder != 0)
                    break;
                ALOGW("AudioPolicyService not published, waiting...");
                usleep(500000); // 0.5 s
            } while (true);
            if (gAudioPolicyServiceClient == NULL) {
                gAudioPolicyServiceClient = new AudioPolicyServiceClient();
            }
            binder->linkToDeath(gAudioPolicyServiceClient);
            gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
            LOG_ALWAYS_FATAL_IF(gAudioPolicyService == 0);
            apc = gAudioPolicyServiceClient;
        }
        ap = gAudioPolicyService;
    }
    if (apc != 0) {
        ap->registerClient(apc);
    }

    return ap;
}

在此解释 AudioPolicyServiceClient 概念,此客户端是相对于 Android 系统启动的 AudioServcie 服务,此服务可以为多个用户 app 提供服务,
而 app 中操作 AudioRecord 时,需要通过 client 端来操作,服务端与客户端是通过 event队列实现,所有程序中可见 事件发送相关代码。

step 6:

@ frameworks/av/services/audiopolicy/service/AudiopolicyInterfaceImpl.cpp

status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
                                             audio_io_handle_t *input,
                                             audio_session_t session,
                                             pid_t pid,
                                             uid_t uid,
                                             uint32_t samplingRate,
                                             audio_format_t format,
                                             audio_channel_mask_t channelMask,
                                             audio_input_flags_t flags,
                                             audio_port_handle_t selectedDeviceId)
{

    if ((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
        return BAD_VALUE;
    }

    {
        Mutex::Autolock _l(mLock);
        // the audio_in_acoustics_t parameter is ignored by get_input()
        status = mAudioPolicyManager->getInputForAttr(attr, input, session, uid,        //> 实例的方法,见: step 7 部分
                                                     samplingRate, format, channelMask,
                                                     flags, selectedDeviceId,
                                                     &inputType);                    
        audioPolicyEffects = mAudioPolicyEffects;
        
        if (status == NO_ERROR) {
            // enforce permission (if any) required for each type of input
            switch (inputType) {
            case AudioPolicyInterface::API_INPUT_LEGACY:
                break;
            case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
                // FIXME: use the same permission as for remote submix for now.
            case AudioPolicyInterface::API_INPUT_MIX_CAPTURE:                          //> 用户初始化 AudioSource.REMOTE_SUBMIX 类型
                if (!captureAudioOutputAllowed(pid, uid)) {                            //> 安卓系统未授权 REMOTE_SUBMIX 具有录音功能
                    //ALOGE("getInputForAttr() permission denied: capture not allowed");
                    //status = PERMISSION_DENIED;
                }
                break;
            case AudioPolicyInterface::API_INPUT_MIX_EXT_POLICY_REROUTE:
                if (!modifyAudioRoutingAllowed()) {
                    ALOGE("getInputForAttr() permission denied: modify audio routing not allowed");
                    status = PERMISSION_DENIED;
                }
                break;
            case AudioPolicyInterface::API_INPUT_INVALID:
            default:
                LOG_ALWAYS_FATAL("getInputForAttr() encountered an invalid input type %d",
                        (int)inputType);
            }
        }
        
        if (status != NO_ERROR) {
            if (status == PERMISSION_DENIED) {
                mAudioPolicyManager->releaseInput(*input, session);
            }
            return status;
        }
    }

}

通过在源码中打印日志信息,追踪至此授权问题,日志内容如下:

 230 E AudioPolicyIntefaceImpl: Debug_dump_info: getInputForAttr() permission denied: capture not allowed

失败的日志内容仅保留这一条了=, 因本人已经把此功能调试成功,留下来都是成功的日志,错误过程分析日志就丢了。很抱歉。
封闭上面的权限代码后, new AudioRecond() 过程就成功了。网上很多博文都有描述,就能够实现内录音功能。
笔者实践测试,可以录音,可还是录不到系统声音内容。
具体原因我还没有搞清楚,可能是平台间的 audio_policy_configuration.xml 配置内容问题。我的配置内容如下:


<?xml version="1.0" encoding="UTF-8" standalone="yes"?>

<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
    <!-- version section contains a “version” tag in the form “major.minor” e.g version=1.0-->
    <!-- Global configuration Decalaration -->
    <globalConfiguration speaker_drc_enabled="true"/>

    <modules>
        <!-- Primary Audio HAL -->
        <module name="primary" halVersion="2.5">
            <attachedDevices>
                <item>Speaker</item>
                <item>Remote Submix Out</item>
                <item>Built-In Mic</item>
                <item>Remote Submix In</item>
            </attachedDevices>
            <defaultOutputDevice>Speaker</defaultOutputDevice>

            <!-- “mixPorts”: listing all output and input streams exposed by the audio HAL -->
            <mixPorts>
                <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="48000" 
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </mixPort>
                <mixPort name="primary input" role="sink">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
                             channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
                </mixPort>
                <!-- r_submix -->
                <mixPort name="r_submix output" role="source">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="48000" 
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                </mixPort>
                <mixPort name="r_submix input" role="sink">
                   <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                            samplingRates="44100,48000" 
                            channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
                </mixPort>
            </mixPorts>

            <!-- “devicePorts”: a list of device descriptors for all input and output devices 
            accessible via this module. -->
            <devicePorts>
                <!-- Output devices declaration, i.e. Sink DEVICE PORT -->
                <devicePort tagName="Speaker" role="sink" type="AUDIO_DEVICE_OUT_SPEAKER" address="">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="48000" 
                             channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
                    <gains>
                        <gain name="gain_1" mode="AUDIO_GAIN_MODE_JOINT"
                              minValueMB="-8400"
                              maxValueMB="4000"
                              defaultValueMB="0"
                              stepValueMB="100"/>
                    </gains>
                </devicePort>
                <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                             samplingRates="44100,48000"
                             channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
                </devicePort>
                <!-- add remote deviceport by ljb -->
                <devicePort tagName="Remote Submix Out" role="sink" type="AUDIO_DEVICE_OUT_REMOTE_SUBMIX">
                   <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                            samplingRates="48000" 
                            channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
               </devicePort>
               <devicePort tagName="Remote Submix In" type="AUDIO_DEVICE_IN_REMOTE_SUBMIX" role="source">
                   <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                            samplingRates="44100,48000" 
                            channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
                </devicePort>
            </devicePorts>
            <!-- route declaration, i.e. list all available sources for a given sink -->
            <routes>
                <route type="mix" sink="Speaker"
                       sources="primary output"/>          
                <route type="mix" sink="primary input"
                       sources="Built-In Mic"/>
                <!-- r_submix route -->
                <route type="mix" sink="Remote Submix Out"
                       sources="r_submix output"/>
                <route type="mix" sink="r_submix input"
                       sources="Remote Submix In"/>
                </routes>
        </module>
    </modules>
    <!-- End of Modules section -->


    <!-- Volume section -->

    <xi:include href="audio_policy_volumes.xml"/>
    <xi:include href="default_volume_tables.xml"/>

    <!-- End of Volume section -->

</audioPolicyConfiguration>

如有哪位大神指导具体差异,请留言指点,在此先谢谢了。
我是基于这个配置文件测试的,录制不到系统声音内容,下一篇我在 《Android音频框架之三 用户录音启动流程源码走读 startRecord 》
中,把如何修改 TinyAlsaHAL 的源码,来实现录制安卓系统声音具体方法。

step 7

接下来源码是根据用户参数,具体获取 inputSource 参数过程,我们简单走读一下源码。

@ frameworks/av/services/audiopilicy/managerdefault/AudioPolicyManager.cpp
status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr,
                                             audio_io_handle_t *input,
                                             audio_session_t session,
                                             uid_t uid,
                                             uint32_t samplingRate,
                                             audio_format_t format,
                                             audio_channel_mask_t channelMask,
                                             audio_input_flags_t flags,
                                             audio_port_handle_t selectedDeviceId,
                                             input_type_t *inputType)
{
    ALOGV("getInputForAttr() source %d, samplingRate %d, format %d, channelMask %x,"
            "session %d, flags %#x",
          attr->source, samplingRate, format, channelMask, session, flags);

    *input = AUDIO_IO_HANDLE_NONE;
    *inputType = API_INPUT_INVALID;
    audio_devices_t device;
    // handle legacy remote submix case where the address was not always specified
    String8 address = String8("");
    audio_source_t inputSource = attr->source;
    audio_source_t halInputSource;
    AudioMix *policyMix = NULL;

    if (inputSource == AUDIO_SOURCE_DEFAULT) {
        inputSource = AUDIO_SOURCE_MIC;
    }
    halInputSource = inputSource;

    // Explicit routing?
    sp<DeviceDescriptor> deviceDesc;
    for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
        if (mAvailableInputDevices[i]->getId() == selectedDeviceId) {
            deviceDesc = mAvailableInputDevices[i];
            break;
        }
    }
    mInputRoutes.addRoute(session, SessionRoute::STREAM_TYPE_NA, inputSource, deviceDesc, uid);

    if (inputSource == AUDIO_SOURCE_REMOTE_SUBMIX &&
            strncmp(attr->tags, "addr=", strlen("addr=")) == 0) {
        status_t ret = mPolicyMixes.getInputMixForAttr(*attr, &policyMix);
        if (ret != NO_ERROR) {
            return ret;
        }
        *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
        device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
        address = String8(attr->tags + strlen("addr="));
    } else {
        device = getDeviceAndMixForInputSource(inputSource, &policyMix);                            //>  根据输入源获取 Mixer 信息 见: step 7.1
        if (device == AUDIO_DEVICE_NONE) {
            ALOGW("getInputForAttr() could not find device for source %d", inputSource);
            return BAD_VALUE;
        }
        if (policyMix != NULL) {
            address = policyMix->mDeviceAddress;
            if (policyMix->mMixType == MIX_TYPE_RECORDERS) {
                // there is an external policy, but this input is attached to a mix of recorders,
                // meaning it receives audio injected into the framework, so the recorder doesn't
                // know about it and is therefore considered "legacy"
                *inputType = API_INPUT_LEGACY;
            } else {
                // recording a mix of players defined by an external policy, we're rerouting for
                // an external policy
                *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
            }
        } else if (audio_is_remote_submix_device(device)) {
            address = String8("0");
            *inputType = API_INPUT_MIX_CAPTURE;
        } else if (device == AUDIO_DEVICE_IN_TELEPHONY_RX) {
            *inputType = API_INPUT_TELEPHONY_RX;
        } else {
            *inputType = API_INPUT_LEGACY;
        }

    }

    *input = getInputForDevice(device, address, session, uid, inputSource,
                               samplingRate, format, channelMask, flags,
                               policyMix);                                    //> 根据输入源获取设备信息: step 7.2
    if (*input == AUDIO_IO_HANDLE_NONE) {
        mInputRoutes.removeRoute(session);
        return INVALID_OPERATION;
    }
    ALOGV("getInputForAttr() returns input type = %d", *inputType);
    return NO_ERROR;
}

step 7.1

因配置文件中没有此部分内容,程序未检索出可用信息。

audio_devices_t AudioPolicyManager::getDeviceAndMixForInputSource(audio_source_t inputSource,
                                                                  AudioMix **policyMix)
{
    ALOGD("Debug_dump_info: %s,%d ", __func__, __LINE__);
    audio_devices_t availableDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
    audio_devices_t selectedDeviceFromMix =
           mPolicyMixes.getDeviceAndMixForInputSource(inputSource, availableDeviceTypes, policyMix);
    ALOGD("Debug_dump_info: %s,%d ", __func__, __LINE__);
    if (selectedDeviceFromMix != AUDIO_DEVICE_NONE) {
        return selectedDeviceFromMix;
    }
    return getDeviceForInputSource(inputSource);
}

audio_devices_t AudioPolicyManager::getDeviceForInputSource(audio_source_t inputSource)
{
    ALOGD("Debug_dump_info: %s , %d mInputRoutes.size:%d ", __func__, __LINE__,mInputRoutes.size());
    size_t routeIndex = 0;
    for (/*size_t routeIndex = 0*/; routeIndex < mInputRoutes.size(); routeIndex++) {
         sp<SessionRoute> route = mInputRoutes.valueAt(routeIndex);
         if (inputSource == route->mSource && route->isActive()) {
             return route->mDeviceDescriptor->type();
         }
     }
     ALOGD("Debug_dump_info: %s , %d routeIndex:%d ", __func__, __LINE__, routeIndex);
     return mEngine->getDeviceForInputSource(inputSource);                              //> 根据输入源检查设备权限 见: step 7.3
}

step 7.2:

根据输入源为 8 检索出对应的声卡和设备编号内容。

audio_io_handle_t AudioPolicyManager::getInputForDevice(audio_devices_t device,
                                                        String8 address,
                                                        audio_session_t session,
                                                        uid_t uid,
                                                        audio_source_t inputSource,
                                                        uint32_t samplingRate,
                                                        audio_format_t format,
                                                        audio_channel_mask_t channelMask,
                                                        audio_input_flags_t flags,
                                                        AudioMix *policyMix)
{
    audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
    audio_source_t halInputSource = inputSource;
    bool isSoundTrigger = false;

    ALOGD("Debug_dump_info: %s : %d  ", __func__, __LINE__);
    // find a compatible input profile (not necessarily identical in parameters)
    sp<IOProfile> profile;
    // samplingRate and flags may be updated by getInputProfile
    uint32_t profileSamplingRate = (samplingRate == 0) ? SAMPLE_RATE_HZ_DEFAULT : samplingRate;
    audio_format_t profileFormat = format;
    audio_channel_mask_t profileChannelMask = channelMask;
    audio_input_flags_t profileFlags = flags;
    for (;;) {
        profile = getInputProfile(device, address,
                                  profileSamplingRate, profileFormat, profileChannelMask,
                                  profileFlags);
        if (profile != 0) {
            break; // success
        } else if (profileFlags & AUDIO_INPUT_FLAG_RAW) {
            profileFlags = (audio_input_flags_t) (profileFlags & ~AUDIO_INPUT_FLAG_RAW); // retry
        } else if (profileFlags != AUDIO_INPUT_FLAG_NONE) {
            profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
        } else { // fail
            ALOGW("Debug_dump_info: getInputForDevice() could not find profile for device 0x%X,"
                  "samplingRate %u, format %#x, channelMask 0x%X, flags %#x",
                    device, samplingRate, format, channelMask, flags);
            return input;
        }
    }
    // Pick input sampling rate if not specified by client
    if (samplingRate == 0) {
        samplingRate = profileSamplingRate;
    }

    if (profile->getModuleHandle() == 0) {
        ALOGE("Debug_dump_info: getInputForAttr(): HW module %s not opened", profile->getModuleName());
        return input;
    }

    sp<AudioSession> audioSession = new AudioSession(session,
                                                              inputSource,
                                                              format,
                                                              samplingRate,
                                                              channelMask,
                                                              flags,
                                                              uid,
                                                              isSoundTrigger,
                                                              policyMix, mpClientInterface);

    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
    config.sample_rate = profileSamplingRate;
    config.channel_mask = profileChannelMask;
    config.format = profileFormat;
    ALOGD("Debug_dump_info: %s : %d  ", __func__, __LINE__);
    status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
                                                   &input,
                                                   &config,
                                                   &device,
                                                   address,
                                                   halInputSource,
                                                   profileFlags);
    ALOGD("Debug_dump_info: %s : %d  ", __func__, __LINE__);
    // only accept input with the exact requested set of parameters
    if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE ||
        (profileSamplingRate != config.sample_rate) ||
        !audio_formats_match(profileFormat, config.format) ||
        (profileChannelMask != config.channel_mask)) {
        ALOGW("Debug_dump_info: getInputForAttr() failed opening input: samplingRate %d"
              ", format %d, channelMask %x",
                samplingRate, format, channelMask);
        if (input != AUDIO_IO_HANDLE_NONE) {
            mpClientInterface->closeInput(input);
        }
        return AUDIO_IO_HANDLE_NONE;
    }
    ALOGD("Debug_dump_info: %s : %d  ", __func__, __LINE__);
    sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile);
    inputDesc->mSamplingRate = profileSamplingRate;
    inputDesc->mFormat = profileFormat;
    inputDesc->mChannelMask = profileChannelMask;
    inputDesc->mDevice = device;
    inputDesc->mPolicyMix = policyMix;
    inputDesc->addAudioSession(session, audioSession);

    addInput(input, inputDesc);
    mpClientInterface->onAudioPortListUpdate();
    ALOGD("Debug_dump_info: %s : %d  ", __func__, __LINE__);
    return input;
}

step 7.3

@ frameworks/av/services/audiopilicy/engineconfigable/src/Engine.h

 class ManagerInterfaceImpl : public AudioPolicyManagerInterface
    {
    public:
        ManagerInterfaceImpl(Engine *policyEngine)
            : mPolicyEngine(policyEngine) {}

        virtual android::status_t initCheck()
        {
            return mPolicyEngine->initCheck();
        }
        virtual void setObserver(AudioPolicyManagerObserver *observer)
        {
            mPolicyEngine->setObserver(observer);
        }
        virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const     //> call function
        {
            return mPolicyEngine->getPropertyForKey<audio_devices_t, audio_source_t>(inputSource);
        }
        virtual audio_devices_t getDeviceForStrategy(routing_strategy stategy) const;
        virtual routing_strategy getStrategyForStream(audio_stream_type_t stream)
        {
            return mPolicyEngine->getPropertyForKey<routing_strategy, audio_stream_type_t>(stream);
        }

    private:
    /* Copy facilities are put private to disable copy. */
    Engine(const Engine &object);
    Engine &operator=(const Engine &object);
    

    template <typename Property, typename Key>
    Property getPropertyForKey(Key key) const;

    template <typename Property, typename Key>
    bool setPropertyForKey(const Property &property, const Key &key);

    }

@ frameworks/av/services/audiopilicy/engineconfigable/src/Engine.cpp
template <typename Property, typename Key>
Property Engine::getPropertyForKey(Key key) const               //> permission capture allowed
{
    Element<Key> *element = getFromCollection<Key>(key);
    if (element == NULL) {
        ALOGE("%s: Element not found within collection", __FUNCTION__);
        return static_cast<Property>(0);
    }
    return element->template get<Property>();
}

至此,用户新建 AudioRecord 实例时,安卓系统流程就如代码走读中描述,我们简单总结如下:
1> . 安卓系统启动时、加载 libandroid_runtime 库系统,也是安卓系统核心框架;
2> . 系统启动文件 init.rc 中,加载 AudioRecordService 服务,系统后台就运行着 AudioRecord 服务;
3> . 用户新建 AudioRecord、调用系统 jni 接口函数,启动安卓系统库中创建 AudioRecord过程;
4> . 系统根据音频源类型 REMOTE_SUBMIX、在配置文件生成对象中,获取新建设备相关属性;
获取到设备相关参数后、并没有在此部分新建设备,而是在 StartAudioRecord 过程中,创建的设备,
也就是笔者接下来分享内容中。

  • 2
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值