Android13 AudioFlinger createRecord流程分析

AudioFlinger在进行RecordTrack相关操作时需要先创建RecordTrack,在JAVA层AudioRecord的构造函数中会通过JNI调用C++层AudioRecord的set函数,在AudioRecord的set函数中会调用通过AudioSystem获取IAudioFlinger,然后调用AudioFlinger的createRecord函数,从AudioRecord到IAudioFlinger的部分在AudioRecord已经分析过了,下面我们继续分析AudioFlinger的部分:

RecordTrack在AudioFlinger的createRecord函数中调用:

//frameworks/av/services/audioflinger/AudioFlinger.cpp
status_t AudioFlinger::createRecord(const media::CreateRecordRequest& _input,
                                    media::CreateRecordResponse& _output)
{
    CreateRecordInput input = VALUE_OR_RETURN_STATUS(CreateRecordInput::fromAidl(_input));
    CreateRecordOutput output;


    sp<RecordThread::RecordTrack> recordTrack;
    sp<RecordHandle> recordHandle;
    sp<Client> client;
    status_t lStatus;
    audio_session_t sessionId = input.sessionId;
    audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;


    output.cblk.clear();
    output.buffers.clear();
    output.inputId = AUDIO_IO_HANDLE_NONE;


    // TODO b/182392553: refactor or clean up
    AttributionSourceState adjAttributionSource = input.clientInfo.attributionSource;
    bool updatePid = (adjAttributionSource.pid == -1);
    const uid_t callingUid = IPCThreadState::self()->getCallingUid();
    const uid_t currentUid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(
           adjAttributionSource.uid));
    if (!isAudioServerOrMediaServerUid(callingUid)) {
        ALOGW_IF(currentUid != callingUid,
                "%s uid %d tried to pass itself off as %d",
                __FUNCTION__, callingUid, currentUid);
        adjAttributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
        updatePid = true;
    }
    const pid_t callingPid = IPCThreadState::self()->getCallingPid();
    const pid_t currentPid = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(
            adjAttributionSource.pid));
    if (updatePid) {
        ALOGW_IF(currentPid != (pid_t)-1 && currentPid != callingPid,
                 "%s uid %d pid %d tried to pass itself off as pid %d",
                 __func__, callingUid, callingPid, currentPid);
        adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
    }


    // we don't yet support anything other than linear PCM
    if (!audio_is_valid_format(input.config.format) || !audio_is_linear_pcm(input.config.format)) {
        ALOGE("createRecord() invalid format %#x", input.config.format);
        lStatus = BAD_VALUE;
        goto Exit;
    }


    // further channel mask checks are performed by createRecordTrack_l()
    if (!audio_is_input_channel(input.config.channel_mask)) {
        ALOGE("createRecord() invalid channel mask %#x", input.config.channel_mask);
        lStatus = BAD_VALUE;
        goto Exit;
    }


    if (sessionId == AUDIO_SESSION_ALLOCATE) {
        sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
    } else if (audio_unique_id_get_use(sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
        lStatus = BAD_VALUE;
        goto Exit;
    }


    output.sessionId = sessionId;
    output.selectedDeviceId = input.selectedDeviceId;
    output.flags = input.flags;


    client = registerPid(VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(adjAttributionSource.pid)));


    // Not a conventional loop, but a retry loop for at most two iterations total.
    // Try first maybe with FAST flag then try again without FAST flag if that fails.
    // Exits loop via break on no error of got exit on error
    // The sp<> references will be dropped when re-entering scope.
    // The lack of indentation is deliberate, to reduce code churn and ease merges.
    for (;;) {
    // release previously opened input if retrying.
    if (output.inputId != AUDIO_IO_HANDLE_NONE) {
        recordTrack.clear();
        AudioSystem::releaseInput(portId);
        output.inputId = AUDIO_IO_HANDLE_NONE;
        output.selectedDeviceId = input.selectedDeviceId;
        portId = AUDIO_PORT_HANDLE_NONE;
    }
    lStatus = AudioSystem::getInputForAttr(&input.attr, &output.inputId,
                                      input.riid,
                                      sessionId,
                                    // FIXME compare to AudioTrack
                                      adjAttributionSource,
                                      &input.config,
                                      output.flags, &output.selectedDeviceId, &portId);
    if (lStatus != NO_ERROR) {
        ALOGE("createRecord() getInputForAttr return error %d", lStatus);
        goto Exit;
    }


    {
        Mutex::Autolock _l(mLock);
        RecordThread *thread = checkRecordThread_l(output.inputId); 
        if (thread == NULL) {
            ALOGW("createRecord() checkRecordThread_l failed, input handle %d", output.inputId);
            lStatus = FAILED_TRANSACTION;
            goto Exit;
        }


        ALOGV("createRecord() lSessionId: %d input %d", sessionId, output.inputId);


        output.sampleRate = input.config.sample_rate;
        output.frameCount = input.frameCount;
        output.notificationFrameCount = input.notificationFrameCount;


        recordTrack = thread->createRecordTrack_l(client, input.attr, &output.sampleRate, //创建recordTrack 
                                                  input.config.format, input.config.channel_mask,
                                                  &output.frameCount, sessionId,
                                                  &output.notificationFrameCount,
                                                  callingPid, adjAttributionSource, &output.flags,
                                                  input.clientInfo.clientTid,
                                                  &lStatus, portId, input.maxSharedAudioHistoryMs);
        LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));


        // lStatus == BAD_TYPE means FAST flag was rejected: request a new input from
        // audio policy manager without FAST constraint
        if (lStatus == BAD_TYPE) {
            continue;
        }


        if (lStatus != NO_ERROR) {
            goto Exit;
        }


        if (recordTrack->isFastTrack()) {
            output.serverConfig = {
                    thread->sampleRate(),
                    thread->channelMask(),
                    thread->format()
            };
        } else {
            output.serverConfig = {
                    recordTrack->sampleRate(),
                    recordTrack->channelMask(),
                    recordTrack->format()
            };
        }


        // Check if one effect chain was awaiting for an AudioRecord to be created on this
        // session and move it to this thread.
        sp<EffectChain> chain = getOrphanEffectChain_l(sessionId);
        if (chain != 0) {
            Mutex::Autolock _l(thread->mLock);
            thread->addEffectChain_l(chain);
        }
        break;
    }
    // End of retry loop.
    // The lack of indentation is deliberate, to reduce code churn and ease merges.
    }


    output.cblk = recordTrack->getCblk();
    output.buffers = recordTrack->getBuffers();
    output.portId = portId;


    output.audioRecord = new RecordHandle(recordTrack); //创建RecordHandle
    _output = VALUE_OR_FATAL(output.toAidl());


Exit:
    if (lStatus != NO_ERROR) {
        // remove local strong reference to Client before deleting the RecordTrack so that the
        // Client destructor is called by the TrackBase destructor with mClientLock held
        // Don't hold mClientLock when releasing the reference on the track as the
        // destructor will acquire it.
        {
            Mutex::Autolock _cl(mClientLock);
            client.clear();
        }
        recordTrack.clear();
        if (output.inputId != AUDIO_IO_HANDLE_NONE) {
            AudioSystem::releaseInput(portId);
        }
    }


    return lStatus;
}

以上函数有三个关键步骤:

  • 调用AudioSystem::getInputForAttr函数获取到匹配的输入设备。

  • 调用recordTrack = thread->createRecordTrack_l函数创建RecordTrack。

  • 调用new RecordHandle创建TrackHandle。

下面分别进行分析:

AudioSystem getInputForAttr

调用AudioSystem::getInputForAttr函数获取到匹配的输入设备:

//frameworks/av/media/libaudioclient/AudioSystem.cpp
status_t AudioSystem::getInputForAttr(const audio_attributes_t* attr,
                                      audio_io_handle_t* input,
                                      audio_unique_id_t riid,
                                      audio_session_t session,
                                      const AttributionSourceState &attributionSource,
                                      const audio_config_base_t* config,
                                      audio_input_flags_t flags,
                                      audio_port_handle_t* selectedDeviceId,
                                      audio_port_handle_t* portId) {
    if (attr == NULL) {
        ALOGE("getInputForAttr NULL attr - shouldn't happen");
        return BAD_VALUE;
    }
    if (input == NULL) {
        ALOGE("getInputForAttr NULL input - shouldn't happen");
        return BAD_VALUE;
    }
    if (selectedDeviceId == NULL) {
        ALOGE("getInputForAttr NULL selectedDeviceId - shouldn't happen");
        return BAD_VALUE;
    }
    if (portId == NULL) {
        ALOGE("getInputForAttr NULL portId - shouldn't happen");
        return BAD_VALUE;
    }


    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); //取得IAudioPolicyService接口
    if (aps == 0) return NO_INIT;


    media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
            legacy2aidl_audio_attributes_t_AudioAttributesInternal(*attr));
    int32_t inputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(*input));
    int32_t riidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_unique_id_t_int32_t(riid));
    int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
    AudioConfigBase configAidl = VALUE_OR_RETURN_STATUS(
            legacy2aidl_audio_config_base_t_AudioConfigBase(*config, true /*isInput*/));
    int32_t flagsAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
    int32_t selectedDeviceIdAidl = VALUE_OR_RETURN_STATUS(
            legacy2aidl_audio_port_handle_t_int32_t(*selectedDeviceId));


    media::GetInputForAttrResponse response;


    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
            aps->getInputForAttr(attrAidl, inputAidl, riidAidl, sessionAidl, attributionSource, //调用IAudioPolicyService的getInputForAttr函数
                configAidl, flagsAidl, selectedDeviceIdAidl, &response)));


    *input = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(response.input));
    *selectedDeviceId = VALUE_OR_RETURN_STATUS(
            aidl2legacy_int32_t_audio_port_handle_t(response.selectedDeviceId));
    *portId = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_port_handle_t(response.portId));


    return OK;
}

AudioPolicyService getInputForAttr

AudioSystem的getInputForAttr函数会调用到AudioPolicyService的getInputForAttr函数:

//frameworks/av/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
Status AudioPolicyService::getInputForAttr(const media::AudioAttributesInternal& attrAidl,
                                           int32_t inputAidl,
                                           int32_t riidAidl,
                                           int32_t sessionAidl,
                                           const AttributionSourceState& attributionSource,
                                           const AudioConfigBase& configAidl,
                                           int32_t flagsAidl,
                                           int32_t selectedDeviceIdAidl,
                                           media::GetInputForAttrResponse* _aidl_return) {
    audio_attributes_t attr = VALUE_OR_RETURN_BINDER_STATUS(
            aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
    audio_io_handle_t input = VALUE_OR_RETURN_BINDER_STATUS(
            aidl2legacy_int32_t_audio_io_handle_t(inputAidl));
    audio_unique_id_t riid = VALUE_OR_RETURN_BINDER_STATUS(
            aidl2legacy_int32_t_audio_unique_id_t(riidAidl));
    audio_session_t session = VALUE_OR_RETURN_BINDER_STATUS(
            aidl2legacy_int32_t_audio_session_t(sessionAidl));
    audio_config_base_t config = VALUE_OR_RETURN_BINDER_STATUS(
            aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl, true /*isInput*/));
    audio_input_flags_t flags = VALUE_OR_RETURN_BINDER_STATUS(
      
<think>我们正在讨论AudioRecord与AudioFlinger之间的音频数据交互流程。根据Android音频子系统的设计,AudioRecord负责从音频输入设备(如麦克风)采集音频数据,而AudioFlinger作为音频系统的核心服务,负责管理音频流和硬件设备。下面将详细分析两者之间的数据交互过程。 ### 1. **AudioRecord初始化与AudioFlinger建立连接** 当创建AudioRecord对象时,系统会通过一系列调用与AudioFlinger建立连接: - **绑定音频输入源**:根据指定的音频源(如麦克风)找到对应的输入设备。 - **创建Track**:在AudioFlinger中创建一个RecordTrack(或RecordThread)用于管理该音频输入流。 - **共享内存分配**:AudioFlinger会分配一块共享内存(通过内存映射方式)作为音频数据的环形缓冲区(FIFO)。 ```mermaid sequenceDiagram participant AR as AudioRecord participant AF as AudioFlinger AR->>AF: 请求分配输入流 (createRecord) AF-->>AR: 返回共享内存控制块 (audio_record_cblk_t) AF->>AF: 创建RecordThread AR->>AR: 初始化环形缓冲区 ``` ### 2. **音频数据流传输机制** 音频数据通过环形缓冲区进行传输: - **生产者-消费者模型**: - **生产者**:AudioRecord将采集到的音频数据写入环形缓冲区。 - **消费者**:AudioFlinger的RecordThread从缓冲区读取数据并处理(如重采样、混音等)。 - **同步控制**:通过`audio_record_cblk_t`控制块实现同步(如读写位置指针、锁机制)[^3][^4]。 ### 3. **关键数据结构** - **环形缓冲区 (FIFO)**: - 由共享内存实现,通过`audio_record_cblk_t`管理[^3]。 - 包含读写指针、数据帧计数等元数据。 - **控制块 (audio_record_cblk_t)**: - 原子变量:读写位置索引(避免锁竞争)。 - 条件变量:通知数据可用性(如缓冲区满/空时的等待/唤醒)[^4]。 ### 4. **数据读写流程** #### **AudioRecord写入数据** ```java // 伪代码:AudioRecord.read() 内部流程 1. 检查缓冲区可写空间 2. 将麦克风采集的数据拷贝到环形缓冲区 3. 更新写指针 4. 通知AudioFlinger有新的数据可用 ``` #### **AudioFlinger读取数据** ```cpp // 伪代码:RecordThread::threadLoop() while (true) { 1. 等待缓冲区有足够数据(条件变量触发) 2. 从环形缓冲区读取数据 3. 执行音频处理(如重采样、格式转换) 4. 将数据传递给上层应用或保存到文件 5. 更新读指针 } ``` ### 5. **异常处理机制** - **缓冲区欠载**:当AudioFlinger读取速度超过写入速度时,返回错误码`ERROR_INVALID_OPERATION`。 - **权限问题**:未获取`RECORD_AUDIO`权限会导致初始化失败。 - **资源竞争**:多路录音时,硬件资源不足会触发`ERROR_DEAD_OBJECT`。 ### 6. **完整交互流程图** ```mermaid graph TD A[AudioRecord初始化] --> B[绑定AudioFlinger] B --> C[创建RecordTrack] C --> D[分配共享内存缓冲区] D --> E[启动录音线程] E --> F[持续写入音频数据] F --> G[AudioFlinger读取数据] G --> H[音频处理] H --> I[输出到目标] ``` ### 7. **性能优化点** - **缓冲区大小**:根据`getMinBufferSize()`动态调整,避免频繁线程切换。 - **内存对齐**:共享内存按缓存行对齐(64字节)减少竞争。 - **低延迟模式**:Android 10+支持`AUDIO_PERFORMANCE_MODE_LOW_LATENCY`[^2]。 > 💡 **设计要点**: > AudioRecord与AudioFlinger的交互基于**零拷贝原则**——数据直接在共享内存中传递,无需跨进程复制[^4]。这显著降低了音频采集的延迟。 --- §§相关问题§§ 1. AudioRecord的环形缓冲区如何避免读写冲突? 2. Android低延迟音频采集的实现原理是什么? 3. 多路录音时AudioFlinger如何管理多个AudioRecord? 4. AudioRecord采集的音频数据如何传递给MediaCodec编码?</think>### Android AudioRecord与AudioFlinger音频数据交互流程详解 #### 1. **初始化阶段建立连接** 当创建`AudioRecord`实例时,会通过Binder机制与`AudioFlinger`服务建立连接: ```mermaid sequenceDiagram participant AR as AudioRecord participant AF as AudioFlinger AR->>AF: createRecord() 请求 AF->>AF: 根据音频源选择输入设备 AF->>AF: 创建RecordThread线程 AF-->>AR: 返回IAudioRecord接口 AR->>AR: 通过IAudioTrack获取共享内存控制块 ``` - **关键步骤**: 1. `AudioRecord`调用`createRecord()`请求连接 2. `AudioFlinger`根据音频源(如麦克风)选择硬件设备 3. 创建专用的`RecordThread`处理音频流 4. 返回`IAudioRecord`接口用于后续控制[^2][^4] #### 2. **共享内存缓冲区设计** 音频数据通过**环形缓冲区(FIFO)** 实现零拷贝传输: ```mermaid graph LR A[麦克风硬件] --> B(AudioRecord) B --> C[环形缓冲区] C --> D(RecordThread) D --> E[AudioFlinger] ``` - **核心组件**: - `audio_record_cblk_t`:共享内存控制块 - 包含读写指针、帧计数等元数据 - 使用原子操作保证线程安全 - 数据缓冲区:PCM音频数据存储区 - 同步机制:条件变量实现读写同步[^3][^4] #### 3. **数据流传输过程 ##### AudioRecord侧(生产者): ```java // 伪代码:数据写入流程 void AudioRecord::read() { // 1. 等待缓冲区可写空间 waitForFreeSpace(); // 2. 从音频驱动读取数据 size_t framesRead = audioHwDev->read(); // 3. 数据写入环形缓冲区 memcpy(sharedBuffer + writePos, pcmData, dataSize); // 4. 更新写指针 atomic_add(&cblk->framesWritten, framesRead); // 5. 唤醒消费者 signalDataAvailable(); } ``` ##### AudioFlinger侧(消费者): ```cpp // RecordThread线程循环 void RecordThread::threadLoop() { while(!exitPending()) { // 1. 等待数据可用 waitForDataAvailable(); // 2. 计算可读帧数 framesToRead = cblk->framesAvailable(); // 3. 从缓冲区读取数据 memcpy(processBuffer, sharedBuffer + readPos, dataSize); // 4. 更新读指针 atomic_add(&cblk->framesRead, framesToRead); // 5. 音频处理(重采样/格式转换) processAudio(processBuffer); // 6. 分发到目标应用 deliverToClient(); } } ``` #### 4. **同步控制机制** - **缓冲区状态管理**: - 写阻塞:当`framesWritten - framesRead == bufferSize`时,生产者等待 - 读阻塞:当`framesWritten == framesRead`时,消费者等待 - **优先级控制**: - `RecordThread`运行在`PRIORITY_URGENT_AUDIO`优先级 - 确保实时性要求[^1][^4] #### 5. **异常处理场景** | 错误场景 | 触发条件 | 处理机制 | |---------|---------|---------| | 缓冲区溢出 | 写入速度 > 读取速度 | 丢弃新数据,返回`WOULD_BLOCK` | | 设备丢失 | 耳机拔出/麦克风禁用 | 回调`onError(ERROR_DEAD_OBJECT)` | | 权限失效 | RECORD_AUDIO权限撤销 | 终止流,抛出`SecurityException` | #### 6. **性能优化设计** 1. **内存对齐**: ```cpp // 缓冲区按cache line对齐(通常64字节) posix_memalign(&buffer, 64, bufferSize); ``` 2. **批处理优化**: - 每次读写至少处理`minFrameCount`帧(约10ms数据) 3. **直接传输路径**: - 支持`AudioRecord.obtainDirectBuffer()`避免拷贝[^5] > 💡 **关键设计思想**: > 通过**共享内存+控制块**的架构,实现跨进程音频传输的零拷贝,满足实时性要求: > $$ \text{延迟} \propto \frac{\text{缓冲区大小}}{\text{采样率}} $$ > 典型配置:160帧缓冲区 @ 16kHz采样率 → 10ms延迟 ---
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值