AAudioAudio.cpp的AAudioStreamBuilder_openStream方法:
//frameworks/av/media/libaaudio/src/core/AAudioAudio.cpp
AAUDIO_API aaudio_result_t AAudioStreamBuilder_openStream(AAudioStreamBuilder* builder,
AAudioStream** streamPtr)
{
AudioStream *audioStream = nullptr;
aaudio_stream_id_t id = 0;
// Please leave these logs because they are very helpful when debugging.
ALOGI("%s() called ----------------------------------------", __func__);
AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
aaudio_result_t result = streamBuilder->build(&audioStream); //调用AudioStreamBuilder的build方法
if (result == AAUDIO_OK) {
*streamPtr = (AAudioStream*) audioStream;
id = audioStream->getId();
} else {
*streamPtr = nullptr;
}
ALOGI("%s() returns %d = %s for s#%u ----------------",
__func__, result, AAudio_convertResultToText(result), id);
return result;
}
调用AudioStreamBuilder的build方法:
//frameworks/av/media/libaaudio/src/core/AudioStreamBuilder.cpp
aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
if (streamPtr == nullptr) {
ALOGE("%s() streamPtr is null", __func__);
return AAUDIO_ERROR_NULL;
}
*streamPtr = nullptr;
logParameters();
aaudio_result_t result = validate();
if (result != AAUDIO_OK) {
return result;
}
std::vector<AudioMMapPolicyInfo> policyInfos;
// The API setting is the highest priority.
aaudio_policy_t mmapPolicy = AudioGlobal_getMMapPolicy();
// If not specified then get from a system property.
if (mmapPolicy == AAUDIO_UNSPECIFIED && android::AudioSystem::getMmapPolicyInfo(
AudioMMapPolicyType::DEFAULT, &policyInfos) == NO_ERROR) {
mmapPolicy = getAAudioPolicy(policyInfos);
}
// If still not specified then use the default.
if (mmapPolicy == AAUDIO_UNSPECIFIED) {
mmapPolicy = AAUDIO_MMAP_POLICY_DEFAULT;
}
policyInfos.clear();
aaudio_policy_t mmapExclusivePolicy = AAUDIO_UNSPECIFIED;
if (android::AudioSystem::getMmapPolicyInfo(
AudioMMapPolicyType::EXCLUSIVE, &policyInfos) == NO_ERROR) {
mmapExclusivePolicy = getAAudioPolicy(policyInfos);
}
if (mmapExclusivePolicy == AAUDIO_UNSPECIFIED) {
mmapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
}
aaudio_sharing_mode_t sharingMode = getSharingMode();
if ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE)
&& (mmapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
ALOGD("%s() EXCLUSIVE sharing mode not supported. Use SHARED.", __func__);
sharingMode = AAUDIO_SHARING_MODE_SHARED;
setSharingMode(sharingMode);
}
bool allowMMap = mmapPolicy != AAUDIO_POLICY_NEVER;
bool allowLegacy = mmapPolicy != AAUDIO_POLICY_ALWAYS;
// TODO Support other performance settings in MMAP mode.
// Disable MMAP if low latency not requested.
if (getPerformanceMode() != AAUDIO_PERFORMANCE_MODE_LOW_LATENCY) {
ALOGD("%s() MMAP not used because AAUDIO_PERFORMANCE_MODE_LOW_LATENCY not requested.",
__func__);
allowMMap = false;
}
// SessionID and Effects are only supported in Legacy mode.
if (getSessionId() != AAUDIO_SESSION_ID_NONE) {
ALOGD("%s() MMAP not used because sessionId specified.", __func__);
allowMMap = false;
}
if (!allowMMap && !allowLegacy) {
ALOGE("%s() no backend available: neither MMAP nor legacy path are allowed", __func__);
return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
}
setPrivacySensitive(false);
if (mPrivacySensitiveReq == PRIVACY_SENSITIVE_DEFAULT) {
// When not explicitly requested, set privacy sensitive mode according to input preset:
// communication and camcorder captures are considered privacy sensitive by default.
aaudio_input_preset_t preset = getInputPreset();
if (preset == AAUDIO_INPUT_PRESET_CAMCORDER
|| preset == AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION) {
setPrivacySensitive(true);
}
} else if (mPrivacySensitiveReq == PRIVACY_SENSITIVE_ENABLED) {
setPrivacySensitive(true);
}
android::sp<AudioStream> audioStream;
result = builder_createStream(getDirection(), sharingMode, allowMMap, audioStream);
if (result == AAUDIO_OK) {
// Open the stream using the parameters from the builder.
result = audioStream->open(*this);
if (result != AAUDIO_OK) {
bool isMMap = audioStream->isMMap();
if (isMMap && allowLegacy) {
ALOGV("%s() MMAP stream did not open so try Legacy path", __func__);
// If MMAP stream failed to open then TRY using a legacy stream.
result = builder_createStream(getDirection(), sharingMode,
false, audioStream);
if (result == AAUDIO_OK) {
result = audioStream->open(*this);
}
}
}
if (result == AAUDIO_OK) {
audioStream->registerPlayerBase();
audioStream->logOpenActual();
*streamPtr = startUsingStream(audioStream);
} // else audioStream will go out of scope and be deleted
}
return result;
}
上面方法主要处理如下:
调用builder_createStream方法创建数据流。
调用AudioStream的open方法打开数据流。
调用AudioStream的registerPlayerBase方法。
调用startUsingStream方法,开始使用流。
下面我们分别进行分析:
builder_createStream
builder_createStream方法:
//frameworks/av/media/libaaudio/src/core/AudioStreamBuilder.cpp
static aaudio_result_t builder_createStream(aaudio_direction_t direction,
aaudio_sharing_mode_t /*sharingMode*/,
bool tryMMap,
android::sp<AudioStream> &stream) {
aaudio_result_t result = AAUDIO_OK;
switch (direction) {
case AAUDIO_DIRECTION_INPUT: //输入流
if (tryMMap) {
stream = new AudioStreamInternalCapture(AAudioBinderClient::getInstance(),
false); //创建AudioStreamInternalCapture对象
} else {
stream = new AudioStreamRecord(); //创建AudioStreamRecord对象
}
break;
case AAUDIO_DIRECTION_OUTPUT: //输出流
if (tryMMap) {
stream = new AudioStreamInternalPlay(AAudioBinderClient::getInstance(),
false); //创建AudioStreamInternalPlay对象
} else {
stream = new AudioStreamTrack(); //创建AudioStreamTrack对象
}
break;
default:
ALOGE("%s() bad direction = %d", __func__, direction);
result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
}
return result;
}
builder_createStream方法中根据流的方向创建Capture流和Play流:
AudioStreamInternalCapture
AudioStreamInternalCapture的构造方法如下:
//frameworks/av/media/libaaudio/src/core/AudioStreamInternalCapture.cpp
AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface,
bool inService)
: AudioStreamInternal(serviceInterface, inService) {
}
调用AudioStreamInternal方法:
//frameworks/av/media/libaaudio/src/core/AudioStreamInternal.cpp
AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
: AudioStream()
, mClockModel()
, mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
, mInService(inService)
, mServiceInterface(serviceInterface)
, mAtomicInternalTimestamp()
, mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
, mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
{
}
调用AudioStream方法:
//frameworks/av/media/libaaudio/src/core/AudioStream.cpp
AudioStream::AudioStream()
: mPlayerBase(new MyPlayerBase())
, mStreamId(AAudio_getNextStreamId())
{
setPeriodNanoseconds(0); //设置周期纳米秒
}
AudioStreamInternalPlay
AudioStreamInternalPlay的构造方法:
//frameworks/av/media/libaaudio/src/core/AudioStreamInternalPlay.cpp
AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
bool inService)
: AudioStreamInternal(serviceInterface, inService) {
}
AudioStreamRecord
AudioStreamRecord构造方法:
//frameworks/av/media/libaaudio/src/core/AudioStreamRecord.cpp
AudioStreamRecord::AudioStreamRecord()
: AudioStreamLegacy()
, mFixedBlockWriter(*this)
{
}
调用AudioStreamLegacy方法:
//frameworks/av/media/libaaudio/src/core/AudioStreamLegacy.cpp
AudioStreamLegacy::AudioStreamLegacy()
: AudioStream() {
}
调用AudioStream方法:
//frameworks/av/media/libaaudio/src/core/AudioStream.cpp
AudioStream::AudioStream()
: mPlayerBase(new MyPlayerBase())
, mStreamId(AAudio_getNextStreamId())
{
setPeriodNanoseconds(0);
}
AudioStreamTrack
AudioStreamTrack构造方法:
//frameworks/av/media/libaaudio/src/core/AudioStreamTrack.cpp
AudioStreamTrack::AudioStreamTrack()
: AudioStreamLegacy()
, mFixedBlockReader(*this)
{
}
AudioStream::open
AudioStream的open方法:
//frameworks/av/media/libaaudio/src/core/AudioStream.cpp
aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
{
// Call here as well because the AAudioService will call this without calling build().
aaudio_result_t result = builder.validate();
if (result != AAUDIO_OK) {
return result;
}
// Copy parameters from the Builder because the Builder may be deleted after this call.
// TODO AudioStream should be a subclass of AudioStreamParameters
mSamplesPerFrame = builder.getSamplesPerFrame();
mChannelMask = builder.getChannelMask();
mSampleRate = builder.getSampleRate();
mDeviceId = builder.getDeviceId();
mFormat = builder.getFormat();
mSharingMode = builder.getSharingMode();
mSharingModeMatchRequired = builder.isSharingModeMatchRequired();
mPerformanceMode = builder.getPerformanceMode();
mUsage = builder.getUsage();
if (mUsage == AAUDIO_UNSPECIFIED) {
mUsage = AAUDIO_USAGE_MEDIA;
}
mContentType = builder.getContentType();
if (mContentType == AAUDIO_UNSPECIFIED) {
mContentType = AAUDIO_CONTENT_TYPE_MUSIC;
}
mSpatializationBehavior = builder.getSpatializationBehavior();
// for consistency with other properties, note UNSPECIFIED is the same as AUTO
if (mSpatializationBehavior == AAUDIO_UNSPECIFIED) {
mSpatializationBehavior = AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO;
}
mIsContentSpatialized = builder.isContentSpatialized();
mInputPreset = builder.getInputPreset();
if (mInputPreset == AAUDIO_UNSPECIFIED) {
mInputPreset = AAUDIO_INPUT_PRESET_VOICE_RECOGNITION;
}
mAllowedCapturePolicy = builder.getAllowedCapturePolicy();
if (mAllowedCapturePolicy == AAUDIO_UNSPECIFIED) {
mAllowedCapturePolicy = AAUDIO_ALLOW_CAPTURE_BY_ALL;
}
mIsPrivacySensitive = builder.isPrivacySensitive();
// callbacks
mFramesPerDataCallback = builder.getFramesPerDataCallback();
mDataCallbackProc = builder.getDataCallbackProc();
mErrorCallbackProc = builder.getErrorCallbackProc();
mDataCallbackUserData = builder.getDataCallbackUserData();
mErrorCallbackUserData = builder.getErrorCallbackUserData();
return AAUDIO_OK;
}
AudioStream::registerPlayerBase
registerPlayerBase方法:
//frameworks/av/media/libaaudio/src/core/AudioStream.cpp
class AudioStream : public android::AudioSystem::AudioDeviceCallback {
const android::sp<MyPlayerBase> mPlayerBase;
void registerPlayerBase() {
if (getDirection() == AAUDIO_DIRECTION_OUTPUT) {
mPlayerBase->registerWithAudioManager(this);
}
}
}
调用MyPlayerBase的registerWithAudioManager方法:
//frameworks/av/media/libaaudio/src/core/AudioStream.cpp
void AudioStream::MyPlayerBase::registerWithAudioManager(const android::sp<AudioStream>& parent) {
std::lock_guard<std::mutex> lock(mParentLock);
mParent = parent;
if (!mRegistered) {
init(android::PLAYER_TYPE_AAUDIO, AAudioConvert_usageToInternal(parent->getUsage()),
(audio_session_t)parent->getSessionId());
mRegistered = true;
}
}
startUsingStream
startUsingStream方法:
//frameworks/av/media/libaaudio/src/core/AudioStreamBuilder.cpp
AudioStream *AudioStreamBuilder::startUsingStream(android::sp<AudioStream> &audioStream) {
// Increment the smart pointer so it will not get deleted when
// we pass it to the C caller and it goes out of scope.
// The C code cannot hold a smart pointer so we increment the reference
// count to indicate that the C app owns a reference.
audioStream->incStrong(nullptr);
return audioStream.get();
}