Android13 AAudioStreamBuilder_openStream流程分析

AAudioAudio.cpp的AAudioStreamBuilder_openStream方法:

//frameworks/av/media/libaaudio/src/core/AAudioAudio.cpp
AAUDIO_API aaudio_result_t  AAudioStreamBuilder_openStream(AAudioStreamBuilder* builder,
                                                     AAudioStream** streamPtr)
{
    AudioStream *audioStream = nullptr;
    aaudio_stream_id_t id = 0;
    // Please leave these logs because they are very helpful when debugging.
    ALOGI("%s() called ----------------------------------------", __func__);
    AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
    aaudio_result_t result = streamBuilder->build(&audioStream); //调用AudioStreamBuilder的build方法
    if (result == AAUDIO_OK) {
        *streamPtr = (AAudioStream*) audioStream;
        id = audioStream->getId();
    } else {
        *streamPtr = nullptr;
    }
    ALOGI("%s() returns %d = %s for s#%u ----------------",
        __func__, result, AAudio_convertResultToText(result), id);
    return result;
}

调用AudioStreamBuilder的build方法:

//frameworks/av/media/libaaudio/src/core/AudioStreamBuilder.cpp
aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {


    if (streamPtr == nullptr) {
        ALOGE("%s() streamPtr is null", __func__);
        return AAUDIO_ERROR_NULL;
    }
    *streamPtr = nullptr;


    logParameters();


    aaudio_result_t result = validate();
    if (result != AAUDIO_OK) {
        return result;
    }


    std::vector<AudioMMapPolicyInfo> policyInfos;
    // The API setting is the highest priority.
    aaudio_policy_t mmapPolicy = AudioGlobal_getMMapPolicy();
    // If not specified then get from a system property.
    if (mmapPolicy == AAUDIO_UNSPECIFIED && android::AudioSystem::getMmapPolicyInfo(
                AudioMMapPolicyType::DEFAULT, &policyInfos) == NO_ERROR) {
        mmapPolicy = getAAudioPolicy(policyInfos);
    }
    // If still not specified then use the default.
    if (mmapPolicy == AAUDIO_UNSPECIFIED) {
        mmapPolicy = AAUDIO_MMAP_POLICY_DEFAULT;
    }


    policyInfos.clear();
    aaudio_policy_t mmapExclusivePolicy = AAUDIO_UNSPECIFIED;
    if (android::AudioSystem::getMmapPolicyInfo(
            AudioMMapPolicyType::EXCLUSIVE, &policyInfos) == NO_ERROR) {
        mmapExclusivePolicy = getAAudioPolicy(policyInfos);
    }
    if (mmapExclusivePolicy == AAUDIO_UNSPECIFIED) {
        mmapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
    }


    aaudio_sharing_mode_t sharingMode = getSharingMode();
    if ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE)
        && (mmapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
        ALOGD("%s() EXCLUSIVE sharing mode not supported. Use SHARED.", __func__);
        sharingMode = AAUDIO_SHARING_MODE_SHARED;
        setSharingMode(sharingMode);
    }


    bool allowMMap = mmapPolicy != AAUDIO_POLICY_NEVER;
    bool allowLegacy = mmapPolicy != AAUDIO_POLICY_ALWAYS;


    // TODO Support other performance settings in MMAP mode.
    // Disable MMAP if low latency not requested.
    if (getPerformanceMode() != AAUDIO_PERFORMANCE_MODE_LOW_LATENCY) {
        ALOGD("%s() MMAP not used because AAUDIO_PERFORMANCE_MODE_LOW_LATENCY not requested.",
              __func__);
        allowMMap = false;
    }


    // SessionID and Effects are only supported in Legacy mode.
    if (getSessionId() != AAUDIO_SESSION_ID_NONE) {
        ALOGD("%s() MMAP not used because sessionId specified.", __func__);
        allowMMap = false;
    }


    if (!allowMMap && !allowLegacy) {
        ALOGE("%s() no backend available: neither MMAP nor legacy path are allowed", __func__);
        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
    }


    setPrivacySensitive(false);
    if (mPrivacySensitiveReq == PRIVACY_SENSITIVE_DEFAULT) {
        // When not explicitly requested, set privacy sensitive mode according to input preset:
        // communication and camcorder captures are considered privacy sensitive by default.
        aaudio_input_preset_t preset = getInputPreset();
        if (preset == AAUDIO_INPUT_PRESET_CAMCORDER
                || preset == AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION) {
            setPrivacySensitive(true);
        }
    } else if (mPrivacySensitiveReq == PRIVACY_SENSITIVE_ENABLED) {
        setPrivacySensitive(true);
    }


    android::sp<AudioStream> audioStream;
    result = builder_createStream(getDirection(), sharingMode, allowMMap, audioStream);
    if (result == AAUDIO_OK) {
        // Open the stream using the parameters from the builder.
        result = audioStream->open(*this);
        if (result != AAUDIO_OK) {
            bool isMMap = audioStream->isMMap();
            if (isMMap && allowLegacy) {
                ALOGV("%s() MMAP stream did not open so try Legacy path", __func__);
                // If MMAP stream failed to open then TRY using a legacy stream.
                result = builder_createStream(getDirection(), sharingMode,
                                              false, audioStream);
                if (result == AAUDIO_OK) {
                    result = audioStream->open(*this);
                }
            }
        }
        if (result == AAUDIO_OK) {
            audioStream->registerPlayerBase();
            audioStream->logOpenActual();
            *streamPtr = startUsingStream(audioStream);
        } // else audioStream will go out of scope and be deleted
    }


    return result;
}

上面方法主要处理如下:

调用builder_createStream方法创建数据流。

调用AudioStream的open方法打开数据流。

调用AudioStream的registerPlayerBase方法。

调用startUsingStream方法,开始使用流。

下面我们分别进行分析:

builder_createStream

builder_createStream方法:

//frameworks/av/media/libaaudio/src/core/AudioStreamBuilder.cpp
static aaudio_result_t builder_createStream(aaudio_direction_t direction,
                                            aaudio_sharing_mode_t /*sharingMode*/,
                                            bool tryMMap,
                                            android::sp<AudioStream> &stream) {
    aaudio_result_t result = AAUDIO_OK;


    switch (direction) {


        case AAUDIO_DIRECTION_INPUT: //输入流
            if (tryMMap) {
                stream = new AudioStreamInternalCapture(AAudioBinderClient::getInstance(),
                                                                 false); //创建AudioStreamInternalCapture对象
            } else {
                stream = new AudioStreamRecord(); //创建AudioStreamRecord对象
            }
            break;


        case AAUDIO_DIRECTION_OUTPUT: //输出流
            if (tryMMap) {
                stream = new AudioStreamInternalPlay(AAudioBinderClient::getInstance(),
                                                              false); //创建AudioStreamInternalPlay对象
            } else {
                stream = new AudioStreamTrack(); //创建AudioStreamTrack对象
            }
            break;


        default:
            ALOGE("%s() bad direction = %d", __func__, direction);
            result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
    }
    return result;
}

builder_createStream方法中根据流的方向创建Capture流和Play流:

AudioStreamInternalCapture

AudioStreamInternalCapture的构造方法如下:

//frameworks/av/media/libaaudio/src/core/AudioStreamInternalCapture.cpp
AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface  &serviceInterface,
                                                 bool inService)
    : AudioStreamInternal(serviceInterface, inService) {


}

调用AudioStreamInternal方法:

//frameworks/av/media/libaaudio/src/core/AudioStreamInternal.cpp
AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface  &serviceInterface, bool inService)
        : AudioStream()
        , mClockModel()
        , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
        , mInService(inService)
        , mServiceInterface(serviceInterface)
        , mAtomicInternalTimestamp()
        , mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
        , mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
        {
}

调用AudioStream方法:

//frameworks/av/media/libaaudio/src/core/AudioStream.cpp
AudioStream::AudioStream()
        : mPlayerBase(new MyPlayerBase())
        , mStreamId(AAudio_getNextStreamId())
        {
    setPeriodNanoseconds(0); //设置周期纳米秒
}

AudioStreamInternalPlay

AudioStreamInternalPlay的构造方法:

//frameworks/av/media/libaaudio/src/core/AudioStreamInternalPlay.cpp
AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface  &serviceInterface,
                                                       bool inService)
        : AudioStreamInternal(serviceInterface, inService) {


}

AudioStreamRecord

AudioStreamRecord构造方法:

//frameworks/av/media/libaaudio/src/core/AudioStreamRecord.cpp
AudioStreamRecord::AudioStreamRecord()
    : AudioStreamLegacy()
    , mFixedBlockWriter(*this)
{
}

调用AudioStreamLegacy方法:

//frameworks/av/media/libaaudio/src/core/AudioStreamLegacy.cpp
AudioStreamLegacy::AudioStreamLegacy()
        : AudioStream() {
}

调用AudioStream方法:

//frameworks/av/media/libaaudio/src/core/AudioStream.cpp
AudioStream::AudioStream()
        : mPlayerBase(new MyPlayerBase())
        , mStreamId(AAudio_getNextStreamId())
        {
    setPeriodNanoseconds(0);
}

AudioStreamTrack

AudioStreamTrack构造方法:

//frameworks/av/media/libaaudio/src/core/AudioStreamTrack.cpp
AudioStreamTrack::AudioStreamTrack()
    : AudioStreamLegacy()
    , mFixedBlockReader(*this)
{
}

AudioStream::open

AudioStream的open方法:

//frameworks/av/media/libaaudio/src/core/AudioStream.cpp
aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
{
    // Call here as well because the AAudioService will call this without calling build().
    aaudio_result_t result = builder.validate();
    if (result != AAUDIO_OK) {
        return result;
    }


    // Copy parameters from the Builder because the Builder may be deleted after this call.
    // TODO AudioStream should be a subclass of AudioStreamParameters
    mSamplesPerFrame = builder.getSamplesPerFrame();
    mChannelMask = builder.getChannelMask();
    mSampleRate = builder.getSampleRate();
    mDeviceId = builder.getDeviceId();
    mFormat = builder.getFormat();
    mSharingMode = builder.getSharingMode();
    mSharingModeMatchRequired = builder.isSharingModeMatchRequired();
    mPerformanceMode = builder.getPerformanceMode();


    mUsage = builder.getUsage();
    if (mUsage == AAUDIO_UNSPECIFIED) {
        mUsage = AAUDIO_USAGE_MEDIA;
    }
    mContentType = builder.getContentType();
    if (mContentType == AAUDIO_UNSPECIFIED) {
        mContentType = AAUDIO_CONTENT_TYPE_MUSIC;
    }
    mSpatializationBehavior = builder.getSpatializationBehavior();
    // for consistency with other properties, note UNSPECIFIED is the same as AUTO
    if (mSpatializationBehavior == AAUDIO_UNSPECIFIED) {
        mSpatializationBehavior = AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO;
    }
    mIsContentSpatialized = builder.isContentSpatialized();
    mInputPreset = builder.getInputPreset();
    if (mInputPreset == AAUDIO_UNSPECIFIED) {
        mInputPreset = AAUDIO_INPUT_PRESET_VOICE_RECOGNITION;
    }
    mAllowedCapturePolicy = builder.getAllowedCapturePolicy();
    if (mAllowedCapturePolicy == AAUDIO_UNSPECIFIED) {
        mAllowedCapturePolicy = AAUDIO_ALLOW_CAPTURE_BY_ALL;
    }
    mIsPrivacySensitive = builder.isPrivacySensitive();


    // callbacks
    mFramesPerDataCallback = builder.getFramesPerDataCallback();
    mDataCallbackProc = builder.getDataCallbackProc();
    mErrorCallbackProc = builder.getErrorCallbackProc();
    mDataCallbackUserData = builder.getDataCallbackUserData();
    mErrorCallbackUserData = builder.getErrorCallbackUserData();


    return AAUDIO_OK;
}

AudioStream::registerPlayerBase

registerPlayerBase方法:

//frameworks/av/media/libaaudio/src/core/AudioStream.cpp
class AudioStream : public android::AudioSystem::AudioDeviceCallback {
    const android::sp<MyPlayerBase>   mPlayerBase;
    void registerPlayerBase() {
        if (getDirection() == AAUDIO_DIRECTION_OUTPUT) {
            mPlayerBase->registerWithAudioManager(this);
        }
    }
}

调用MyPlayerBase的registerWithAudioManager方法:

//frameworks/av/media/libaaudio/src/core/AudioStream.cpp
void AudioStream::MyPlayerBase::registerWithAudioManager(const android::sp<AudioStream>& parent) {
    std::lock_guard<std::mutex> lock(mParentLock);
    mParent = parent;
    if (!mRegistered) {
        init(android::PLAYER_TYPE_AAUDIO, AAudioConvert_usageToInternal(parent->getUsage()),
            (audio_session_t)parent->getSessionId());
        mRegistered = true;
    }
}

startUsingStream

startUsingStream方法:

//frameworks/av/media/libaaudio/src/core/AudioStreamBuilder.cpp
AudioStream *AudioStreamBuilder::startUsingStream(android::sp<AudioStream> &audioStream) {
    // Increment the smart pointer so it will not get deleted when
    // we pass it to the C caller and it goes out of scope.
    // The C code cannot hold a smart pointer so we increment the reference
    // count to indicate that the C app owns a reference.
    audioStream->incStrong(nullptr);
    return audioStream.get();
}
  • 4
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
`guide_usb_openstream`是一个使用QT开发的用于打开USB摄像头设备流的函数。它的声明如下: ```cpp int guide_usb_openstream(const DeviceInfo* pInfo, OnFrameDataReceivedCB frameCallBack, OnDeviceConnectStatusCB connectStatusCallBack); ``` 该函数有三个参数: 1. `pInfo`:指向`DeviceInfo`结构的指针,包含要打开的设备的信息。 2. `frameCallBack`:指向帧数据接收回调函数的指针。 3. `connectStatusCallBack`:指向设备连接状态回调函数的指针。 其中,`DeviceInfo`结构包含了如下信息: ```cpp struct DeviceInfo { int vendorId; // USB设备的供应商ID int productId; // USB设备的产品ID int interfaceNum; // USB设备的接口编号 int format; // 视频流格式 int width; // 视频流宽度 int height; // 视频流高度 int fps; // 视频流帧率 }; ``` `OnFrameDataReceivedCB`和`OnDeviceConnectStatusCB`分别是帧数据接收回调函数和设备连接状态回调函数的函数指针类型,它们的声明如下: ```cpp typedef void (*OnFrameDataReceivedCB)(const char* pData, int size, void* pUser); typedef void (*OnDeviceConnectStatusCB)(int status, void* pUser); ``` `OnFrameDataReceivedCB`函数在每次收到帧数据时被调用,它有三个参数: 1. `pData`:指向帧数据的指针。 2. `size`:帧数据的大小。 3. `pUser`:用户数据指针,用于传递额外的用户数据给回调函数。 `OnDeviceConnectStatusCB`函数在设备连接状态发生变化时被调用,它有两个参数: 1. `status`:设备连接状态,0表示设备已连接,1表示设备已断开。 2. `pUser`:用户数据指针,用于传递额外的用户数据给回调函数。 该函数的返回值是一个整数类型的错误码,如果返回0表示函数执行成功,否则表示函数执行失败。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值