Android AudioTrack创建流程

Android AudioTrack创建流程

AudioTrack是应用播放音频数据入口,通过该类应用可将解码后的音频数据播放出来。本篇主要简单介绍AudioTrack的创建流程。

一、AudioTrack设置

当构造一个 AudioTrack 实例时,APP侧会调用AudioTrack::set()函数,其中transferType代表数据传输方式,有主动write和callback两种方式。

status_t AudioTrack::set(
        audio_stream_type_t streamType,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        int frameCountInt,
        audio_output_flags_t flags,
        callback_t cbf,
        void* user,
        int notificationFrames,
        const sp<IMemory>& sharedBuffer,
        bool threadCanCallJava,
        int sessionId,
        transfer_type transferType,
        const audio_offload_info_t *offloadInfo,
        int uid)
{
    //设置音频数据传输类型、类型、数据格式
    ...
    mVolume[LEFT] = 1.0f;
    mVolume[RIGHT] = 1.0f;
    mCallback = callback;
    //如果设置了提供音频数据的回调函数,则启动AudioTrackThread线程来提供音频数据
    if (callback != NULL) {
        mAudioTrackThread = new AudioTrackThread::make(*this);
        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
    }

    status_t status = createTrack_l();
    ...
}

在set函数中调用了createTrack_l()。

status_t AudioTrack::createTrack_l()
{
    // 用AudioTrack::set中初始化的值给input赋值
    const sp<IAudioFlinger> &audioFlinger = AudioSystem::get_audio_flinger();
    IAudioFlinger::CreateTrackInput input;
    ...
    media::CreateTrackResponse response;
    status = audioFlinger->createTrack(input.toAidl, response);

    // createTrack函数返回的response给AudioTrack一些参数赋值
    IAudioFlinger::CreateTrackOutput output;
    output = response;
    ...
    mNotificationFramesAct = output.notificationFrameCount;
    mAfFrameCount = output.afFrameCount; //AudioFling中的Framecount,采样率和声道数
    ...
    mAfLatency= output.afLatency;
    mLatency = mAfLatency  + (1000LL * mFrameCount) / mSampleRate;

    // AudioTrack和AudioFlinger之间的共享buffer,且AudioTrackClientProxy管理AudioTrack侧共享buffer
    std::optional<media::ShareFileRegion> sfr;
    output.audioTrack->getCblk(&sfr);
    sp<IMemory> iMem = aidl2legacy_NullableShareFileRegion_IMemory(sfr);
    mCblkMemory = iMem;
    audio_track_clbk_t* cblk = static_cast<audio_track_clbk_t *>(iMemPointer);
    mCblk = cblk;
    buffers = cblk + 1;
    mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
}

二、AudioFlinger创建Track

status_t AudioFlinger::createTrack(const media::CreateTrackResponse& _input,
                                    media::CreateTrackResponse& _output)
{   // 经过一系列的调用,进入 AudioPolicyManager::getOutputForDevice()
    // 如果输出标识置了 AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD 或 AUDIO_OUTPUT_FLAG_DIRECT,
    // 那么最终调用 AudioFlinger::openOutput() 打开输出标识对应的输出流设备并创建相关的
    // PlaybackThread,保存该 PlaybackThread 对应的 audio_io_handle_t 给 AudioTrack;
    // 如果输出标识是其他类型,那么根据策略选择一个输出流设备和 PlaybackThread,并保存该
    // PlaybackThread 对应的 audio_io_handle_t 给 AudioTrack
    lStatus = AudioSystem::getOutputForAttr(&localAttr, &output.outputId, sessionId, &streamType, 
                                 &adjAttributionSource, &input.config, input.flags,
                                 &output.selectedDeviceId, &portId, &secondaryOutputs,
                                 &isSpatialized, &isBitperfect);

    // 根据传入来的 audio_io_handle_t,找到对应的 PlaybackThread
    PlaybackThread *thread = checkPlaybackThread_l(output.outputId);

    // 在 PlaybackThread 上创建一个音频流管理对象 Track
    track = thread->createTrack_l(client, &streamType, localAttr, &output.sampleRate, input.config.format
              input.config.channel_mask, &output.frameCount, &output.notificationFrameCount,
              input.notificationsPerBuffer, input.speed, input.sharedBuffer, sessionId, &output.flags,
              callingPid, adjAttributionSource, input.clientInfo.clientTid, &lStatus, portId,
              input.audioTrackCallback, isSpatialized, isBitperfect);

    output.afFrameCount = thread->frameCount();
    ...
    // 创建 Track 的通讯代理 TrackHandle 并返回
    output.audioTrack = new TrackHandle(track);
}

getOutputForAttr返回属性后,调用createTrack_l去创建Track。

sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(...)
{
    ...
    size_t minFrameCount = 0;
    uint32_t latencyMs = latency_l();
    minFrameCount = AudioSystem::calculateMinFrameCount(latencyMs, mNormalFrameCount,
                        mSampleRate, samplerate, speed /*, 0 mNotificationPerBufferReq */)
    if(frameCount < minFrameCount)
        frameCount = minFrameCount;

    // notificationFrameCount值为SharedBuffer剩余多少要求再次写数据
    const int nBuffering = (frameCount * mSampleRate) / (mNormalFrameCount * sampleRate) == 3? 3 : 2;
    maxNotificationFrames = frameCount / nBuffering;
    notificationFrameCount = maxNotificationFrames;

    // Track构造函数中创建SharedBuffer
    track = new Track(...);
    mTracks.add(track);
}

看看 Track 的构造过程,主要分析数据 FIFO 及它的控制块是如何分配的:

AudioFlinger::PlaybackThread::Track::Track()
    :   TrackBase(), ...
{
    if (sharedBuffer == 0) {
        // 数据传输模式为 MODE_STREAM 模式,创建一个 AudioTrackServerProxy 对象
        // PlaybackThread 将持续使用它从 FIFO 上取得可读数据的位置
        mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
                mFrameSize, !isExternalTrack(), sampleRate);
    } else {
        // 数据传输模式为 MODE_STATIC 模式,创建一个 StaticAudioTrackServerProxy 对象
        mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
                mFrameSize);
    }
    mServerProxy = mAudioTrackServerProxy;

    // 为 Track 分配一个名称,AudioMixer 会根据 TrackName 找到对应的 Track
    mName = thread->getTrackName_l(channelMask, format, sessionId);
    ...
}

AudioFlinger::ThreadBase::TrackBase::TrackBase(...)
{
    // ......

    // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
    size_t size = sizeof(audio_track_cblk_t);
    size_t bufferSize = (buffer == NULL ? roundup(frameCount) : frameCount) * mFrameSize;
    if (buffer == NULL && alloc == ALLOC_CBLK) {
        // 这个 size 将是分配的匿名共享内存的大小
        // 等于控制块的大小(sizeof(audio_track_cblk_t)加上数据 FIFO的大小(bufferSize)
        // 待会看到这块内存的结构,就明白这样分配的意义了
        size += bufferSize;
    }

    if (client != 0) {
        // 分配一块匿名共享内存
        mCblkMemory = client->heap()->allocate(size);
        if (mCblkMemory == 0 ||
                (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) {
            ALOGE("not enough memory for AudioTrack size=%u", size);
            client->heap()->dump("AudioTrack");
            mCblkMemory.clear();
            return;
        }
    } else {
        // this syntax avoids calling the audio_track_cblk_t constructor twice
        mCblk = (audio_track_cblk_t *) new uint8_t[size];
        // assume mCblk != NULL
    }

    // construct the shared structure in-place.
    if (mCblk != NULL) {
        // 这是 C++ 的 placement new(定位创建对象)语法:new(@BUFFER) @CLASS();
        // 可以在特定内存位置上构造一个对象
        // 这里,在匿名共享内存首地址上构造了一个 audio_track_cblk_t 对象
        // 这样 AudioTrack 与 AudioFlinger 都能访问这个 audio_track_cblk_t 对象了
        new(mCblk) audio_track_cblk_t();

        // 如下分配数据 FIFO,将用于 AudioTrack 与 AudioFlinger 的数据交换
        switch (alloc) {
        // ......
        case ALLOC_CBLK:
            // clear all buffers
            if (buffer == NULL) {
                // 数据传输模式为 MODE_STREAM/TRANSFER_SYNC 时,数据 FIFO 的分配
                // 数据 FIFO 的首地址紧靠控制块(audio_track_cblk_t)之后
                //   |                                                         |
                //   | -------------------> mCblkMemory <--------------------- |
                //   |                                                         |
                //   +--------------------+------------------------------------+
                //   | audio_track_cblk_t |             Buffer                 |
                //   +--------------------+------------------------------------+
                //   ^                    ^
                //   |                    |
                //   mCblk               mBuffer
                mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
                memset(mBuffer, 0, bufferSize);
            } else {
                // 数据传输模式为 MODE_STATIC/TRANSFER_SHARED 时,直接指向 sharedBuffer
                // sharedBuffer 是应用进程分配的匿名共享内存,应用进程已经一次性把数据
                // 写到 sharedBuffer 来了,AudioFlinger 可以直接从这里读取
                //   +--------------------+    +-----------------------------------+
                //   | audio_track_cblk_t |    |            sharedBuffer           |
                //   +--------------------+    +-----------------------------------+
                //   ^                         ^
                //   |                         |
                //   mCblk                    mBuffer
                mBuffer = buffer;
            }
            break;
        // ......
        }

        // ......
    }
}

三、AudioPolicyManager获得output

status AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
                                              audio_io_handle_t *output,
                                              audio_session_t session,
                                              audio_stream_type_t *stream,
                                              const AttributionSourceState& attributionSource,
                                              const audio_config_t *config,
                                              audio_output_flags_t flags,
                                              audio_port_handle_t *selectedDeviceId,
                                              audio_port_handle_t *portId,
                                              std::vector<audio_io_handle_t> *secondaryOutputs,
                                              output_type_t *outputType,
                                              bool *isSpatialized,
                                              bool *isBitperfect)
{   // 根据Attr获取对应的output
    status_t status = getOutputForAttrInt(&resultAttr, output, session, attr, stream, 
                  uid, config, flags, selectedDeviceId, &isRequestedDeviceForExclusiveUse, 
                  secondaryOutputs != nullptr? &secondaryMixes : nullptr, outputType,
                  isSpatialized, isBitperfect);

    // 根据output值获取outputDesc
    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
    sp<TrackClientDescriptor> clientDesc = new TrackClientDescriptor(...);
    outputDesc->addClient(clientDesc);
}

接着看getOutputForAttrInt函数:

status AudioPolicyManager::getOutputForAttrInt(...)
{   // 根据Attribute返回stream类型
    status_t status = getAudioAttributes(resultAttr, attr, stream);
    *stream = mEngine->getStreamTypeForAttributes(*resultAttr);

    // 如果支持动态路由,根据mPolicyMixes直接返回
    // 在汽车应用场景一般跟据USAGE返回SwAudioOutputDescriptor
    bool usePrimaryOutputFromPolicyMixes = false;
    sp<AudioPolicyMix> primaryMix;
    status = mPolicyMixes.getOutputForAttr(*resultAttr, clientConfig, uid, session, *flags,
                         mAvailableOutputDevices, requestedDevice, primaryMix, secondaryMixes,
                         usePrimaryOutputFromPolicyMixes)
    if(usePrimaryOutputFromPolicyMixes) {
        sp<DeviceDescriptor> deviceDesc = mAvailableOutputDevices.getDevice(primaryMix->mDeviceType,
                                            primaryMix->mDeviceAddress, AUDIO_FORMAT_DEFAUT);
        sp<SwAudioOutputDescriptor> policyDesc = primaryMix-getOutput();
        policyDesc->mPolicyMix = primaryMix;
        *output = policyDesc->mIoHandle;
        *selectedDeviceId = deviceDesc != 0? deviceDesc->getId() : AUDIO_PORT_HANDLE_NONE;
        *outputType = API_OUTPUT_LEGACY;
        return NO_ERROR;
    }

    // 根据Attribute获取当前的输出设备
    outputDevices = mEngine->getOutputDevicesForAttributes(*resultAttr, requestedDevice, false);
    *output = AUDIO_IO_HANDLE_NONE;
    // 根据音频设备,获取当前的输出路径
    *output = getOutputForDevice(outputDevices, session, resultAttr, config, flags, isSpatialized,
                            info, resultAttr->flags & AUDIO_FLAG_MUTE_HAPTIC);
    *selectedDeviceId = getFirstDeviceId(outputDevices);
}

mPolicyMixes是CarAudioService设置的,AudioPolicyMix下的getOutputForAttr

status AudioPolicyMixCollection::getOutputForAttr(const audio_attributes_t& *attributes,
                    const audio_config_base_t& config, const uid_t uid,
                    const audio_session_t session, audio_output_flags_t flags,
                    const DeviceVector &avaiableOutputDevices,
                    const sp<DeviceDescriptor> requestedDevice,
                    sp<AudioPolicyMix> &primaryMix,
                    std::vector<sp<AudioPolicyMix>> *secondaryMixes,
                    bool& usePrimaryOutputFromPolicyMixes)
{
    bool mixesDisallowsRequestedDevice = false;
    for(size_t i = 0; i < size(); i++) {
        sp<AudioPolicyMix> policyMix = itemAt(i);
        const bool primaryOutputMix = !is_mix_loopback_render(policyMix->mRouteFlags);
        sp<DeviceDescriptor> mixDevice = getOutputDeviceForMix(policyMix->get());
        if(mixesDisallowsRequestedDevice(policyMix->get(), requestedDevice, uid, session);
            mixesDisallowsRequestedDevice = true;

        if(!mixMatch(policyMix->get(), i, attributes, config, uid, session))
            continue;

        if(mixDevice != nullptr && mixDevice->equals(requestedDevice))
            mixesDisallowsRequestedDevice = false;

        if(primaryOutputMix)
            primaryMix = policyMix;
    }
    usePrimaryOutputFromPolicyMixes = (mixesDisallowsRequestedDevice ||
                requestedDevice == nullptr) && primaryMix != nullptr;
    return NO_ERROR;
}

getOutputForDevice函数实现

audio_io_handle_t AudioPolicyManager::getOutputForDevices(
                const DeviceVector &devices,
                audio_session_t session,
                const audio_attributes_t *attr,
                audio_output_flags_t *flags,
                bool *isSpatialized,
                sp<PreferredMixerAttributesInfo> prefMixerAttributesInfo,
                bool forceMutingHaptic)
{
    audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
    audio_stream_type_t stream = mEngine->getStreamTypeForAttributes(*attr);
    // 根据stream属性设置flags值
    if(stream != AUDIO_STREAM_MUSIC){
        *flags = *flag & ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
    } ...

    // 如果可以空间音频,直接返回output
    *isSpatialized = false;
    if(mSpatializerOutput != nullptr
            && canBeSpatializedInt(attr, config, devices.toTypeAddrvector())){
        *isSpatialized = true;
        return mSpatializerOutput->mIoHandle;
    }

    *flags = *flag & ~AUDIO_OUTPUT_FLAG_DIRECT;
    // 跟据flags等参数从获取到的outputs中选中合适的output
    output = selectOutput(outputs, *flags, config->format, channelMask, config->sample_rate, session);
    return output;
}
  • 5
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值