AudioFlinger在进行PlaybackTrack相关操作时需要先创建PlaybackTrack,在JAVA层AudioTrack的构造函数中会通过JNI调用C++层AudioTrack的set函数,在AudioTrack的set函数中会调用通过AudioSystem获取IAudioFlinger,然后调用AudioFlinger的createTrack函数,从AudioTrack到IAudioFlinger的部分在AudioTrack已经分析过了,下面我们继续分析AudioFlinger的部分:
PlaybackTrack在AudioFlinger的createTrack函数中调用:
//frameworks/av/services/audioflinger/AudioFlinger.cpp
// IAudioFlinger interface
status_t AudioFlinger::createTrack(const media::CreateTrackRequest& _input,
media::CreateTrackResponse& _output)
{
// Local version of VALUE_OR_RETURN, specific to this method's calling conventions.
CreateTrackInput input = VALUE_OR_RETURN_STATUS(CreateTrackInput::fromAidl(_input));
CreateTrackOutput output;
sp<PlaybackThread::Track> track;
sp<TrackHandle> trackHandle;
sp<Client> client;
status_t lStatus;
audio_stream_type_t streamType;
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
std::vector<audio_io_handle_t> secondaryOutputs;
bool isSpatialized = false;;
// TODO b/182392553: refactor or make clearer
pid_t clientPid =
VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_pid_t(input.clientInfo.attributionSource.pid));
bool updatePid = (clientPid == (pid_t)-1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
uid_t clientUid =
VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_uid_t(input.clientInfo.attributionSource.uid));
audio_io_handle_t effectThreadId = AUDIO_IO_HANDLE_NONE;
std::vector<int> effectIds;
audio_attributes_t localAttr = input.attr;
AttributionSourceState adjAttributionSource = input.clientInfo.attributionSource;
if (!isAudioServerOrMediaServerUid(callingUid)) {
ALOGW_IF(clientUid != callingUid,
"%s uid %d tried to pass itself off as %d",
__FUNCTION__, callingUid, clientUid);
adjAttributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
clientUid = callingUid;
updatePid = true;
}
const pid_t callingPid = IPCThreadState::self()->getCallingPid();
if (updatePid) {
ALOGW_IF(clientPid != (pid_t)-1 && clientPid != callingPid,
"%s uid %d pid %d tried to pass itself off as pid %d",
__func__, callingUid, callingPid, clientPid);
clientPid = callingPid;
adjAttributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(callingPid));
}
audio_session_t sessionId = input.sessionId;
if (sessionId == AUDIO_SESSION_ALLOCATE) {
sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
} else if (audio_unique_id_get_use(sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
lStatus = BAD_VALUE;
goto Exit;
}
output.sessionId = sessionId;
output.outputId = AUDIO_IO_HANDLE_NONE;
output.selectedDeviceId = input.selectedDeviceId;
//通过AudioSystem,经过binder会调用到AudioPolicyManager的getOutputForDevices函数,通过一系列调用,回到AudioFlinger的openOutput函数中,然后根据不同的flag创建对应的播放线程
lStatus = AudioSystem::getOutputForAttr(&localAttr, &output.outputId, sessionId, &streamType,
adjAttributionSource, &input.config, input.flags,
&output.selectedDeviceId, &portId, &secondaryOutputs,
&isSpatialized);
if (lStatus != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
ALOGE("createTrack() getOutputForAttr() return error %d or invalid output handle", lStatus);
goto Exit;
}
// client AudioTrack::set already implements AUDIO_STREAM_DEFAULT => AUDIO_STREAM_MUSIC,
// but if someone uses binder directly they could bypass that and cause us to crash
if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
ALOGE("createTrack() invalid stream type %d", streamType);
lStatus = BAD_VALUE;
goto Exit;
}
// further channel mask checks are performed by createTrack_l() depending on the thread type
if (!audio_is_output_channel(input.config.channel_mask)) {
ALOGE("createTrack() invalid channel mask %#x", input.config.channel_mask);
lStatus = BAD_VALUE;
goto Exit;
}
// further format checks are performed by createTrack_l() depending on the thread type
if (!audio_is_valid_format(input.config.format)) {
ALOGE("createTrack() invalid format %#x", input.config.format);
lStatus = BAD_VALUE;
goto Exit;
}
{
Mutex::Autolock _l(mLock);
PlaybackThread *thread = checkPlaybackThread_l(output.outputId);
if (thread == NULL) {
ALOGE("no playback thread found for output handle %d", output.outputId);
lStatus = BAD_VALUE;
goto Exit;
}
client = registerPid(clientPid);
PlaybackThread *effectThread = NULL;
// check if an effect chain with the same session ID is present on another
// output thread and move it here.
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);
if (mPlaybackThreads.keyAt(i) != output.outputId) {
uint32_t sessions = t->hasAudioSession(sessionId);
if (sessions & ThreadBase::EFFECT_SESSION) {
effectThread = t.get();
break;
}
}
}
ALOGV("createTrack() sessionId: %d", sessionId);
//设置Output
output.sampleRate = input.config.sample_rate;
output.frameCount = input.frameCount;
output.notificationFrameCount = input.notificationFrameCount;
output.flags = input.flags;
output.streamType = streamType;
track = thread->createTrack_l(client, streamType, localAttr, &output.sampleRate, //调用createTrack_l,创建PlaybackTrack
input.config.format, input.config.channel_mask,
&output.frameCount, &output.notificationFrameCount,
input.notificationsPerBuffer, input.speed,
input.sharedBuffer, sessionId, &output.flags,
callingPid, adjAttributionSource, input.clientInfo.clientTid,
&lStatus, portId, input.audioTrackCallback, isSpatialized);
LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (track == 0));
// we don't abort yet if lStatus != NO_ERROR; there is still work to be done regardless
output.afFrameCount = thread->frameCount();
output.afSampleRate = thread->sampleRate();
output.afLatencyMs = thread->latency();
output.portId = portId;
if (lStatus == NO_ERROR) {
// Connect secondary outputs. Failure on a secondary output must not imped the primary
// Any secondary output setup failure will lead to a desync between the AP and AF until
// the track is destroyed.
updateSecondaryOutputsForTrack_l(track.get(), thread, secondaryOutputs);
}
// move effect chain to this output thread if an effect on same session was waiting
// for a track to be created
if (lStatus == NO_ERROR && effectThread != NULL) {
// no risk of deadlock because AudioFlinger::mLock is held
Mutex::Autolock _dl(thread->mLock);
Mutex::Autolock _sl(effectThread->mLock);
if (moveEffectChain_l(sessionId, effectThread, thread) == NO_ERROR) {
effectThreadId = thread->id();
effectIds = thread->getEffectIds_l(sessionId);
}
}
// Look for sync events awaiting for a session to be used.
for (size_t i = 0; i < mPendingSyncEvents.size(); i++) {
if (mPendingSyncEvents[i]->triggerSession() == sessionId) {
if (thread->isValidSyncEvent(mPendingSyncEvents[i])) {
if (lStatus == NO_ERROR) {
(void) track->setSyncEvent(mPendingSyncEvents[i]);
} else {
mPendingSyncEvents[i]->cancel();
}
mPendingSyncEvents.removeAt(i);
i--;
}
}
}
if ((output.flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) == AUDIO_OUTPUT_FLAG_HW_AV_SYNC) {
setAudioHwSyncForSession_l(thread, sessionId);
}
}
if (lStatus != NO_ERROR) {
// remove local strong reference to Client before deleting the Track so that the
// Client destructor is called by the TrackBase destructor with mClientLock held
// Don't hold mClientLock when releasing the reference on the track as the
// destructor will acquire it.
{
Mutex::Autolock _cl(mClientLock);
client.clear();
}
track.clear();
goto Exit;
}
// effectThreadId is not NONE if an effect chain corresponding to the track session
// was found on another thread and must be moved on this thread
if (effectThreadId != AUDIO_IO_HANDLE_NONE) {
AudioSystem::moveEffectsToIo(effectIds, effectThreadId);
}
output.audioTrack = new TrackHandle(track); //创建TrackHandle
_output = VALUE_OR_FATAL(output.toAidl());
Exit:
if (lStatus != NO_ERROR && output.outputId != AUDIO_IO_HANDLE_NONE) {
AudioSystem::releaseOutput(portId);
}
return lStatus;
}
以上函数有三个关键步骤:
-
调用AudioSystem::getOutputForAttr函数获取到匹配的输出设备,这些处理和AudioPolicy关系比较大,会在AudioPolicy中分析。
-
调用thread->createTrack_l函数创建PlaybackTrack。
-
调用new TrackHandle创建TrackHandle。
下面分别进行分析:
AudioSystem getOutputForAttr
AudioFlinger的createTrack函数中会调用AudioSystem的getOutputForAttr方法,我们就从这里开始分析:
//frameworks/av/media/libaudioclient/AudioSystem.cpp
status_t AudioSystem::getOutputForAttr(audio_attributes_t* attr,
audio_io_handle_t* output,
audio_session_t session,
audio_stream_type_t* stream,
const AttributionSourceState& attributionSource,
const audio_config_t* config,
audio_output_flags_t flags,
audio_port_handle_t* selectedDeviceId,
audio_port_handle_t* portId,
std::vector<audio_io_handle_t>* secondaryOutputs,
bool *isSpatialized) {
if (attr == nullptr) {
ALOGE("%s NULL audio attributes", __func__);
return BAD_VALUE;
}
if (output == nullptr) {
ALOGE("%s NULL output - shouldn't happen", __func__);
return BAD_VALUE;
}
if (selectedDeviceId == nullptr) {
ALOGE("%s NULL selectedDeviceId - shouldn't happen", __func__);
return BAD_VALUE;
}
if (portId == nullptr) {
ALOGE("%s NULL portId - shouldn't happen", __func__);
return BAD_VALUE;
}
if (secondaryOutputs == nullptr) {
ALOGE("%s NULL secondaryOutputs - shouldn't happen", __func__);
return BAD_VALUE;
}
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); //获取IAudioPolicyService
if (aps == 0) return NO_INIT;
media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_attributes_t_AudioAttributesInternal(*attr));
int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
AudioConfig configAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_config_t_AudioConfig(*config, false /*isInput*/));
int32_t flagsAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
int32_t selectedDeviceIdAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_port_handle_t_int32_t(*selectedDeviceId));
media::GetOutputForAttrResponse responseAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getOutputForAttr(attrAidl, sessionAidl, attributionSource, configAidl, flagsAidl,
selectedDeviceIdAidl, &responseAidl))); //调用IAudioPolicyService的getOutputForAttr函数获取responseAidl
*output = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_io_handle_t(responseAidl.output));
if (stream != nullptr) {
*stream = VALUE_OR_RETURN_STATUS(
aidl2legacy_AudioStreamType_audio_stream_type_t(responseAidl.stream));
}
*selectedDeviceId = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_port_handle_t(responseAidl.selectedDeviceId));
*portId = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_port_handle_t(responseAidl.portId));
*secondaryOutputs = VALUE_OR_RETURN_STATUS(convertContainer<std::vector<audio_io_handle_t>>(
responseAidl.secondaryOutputs, aidl2legacy_int32_t_audio_io_handle_t));
*isSpatialized = responseAidl.isSpatialized;
return OK;
}
AudioPolicyService getOutputForAttr
AudioSystem的getOutputForAttr函数会调用到AudioPolicyService的getOutputForAttr函数:
//frameworks/av/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
Status AudioPolicyService::getOutputForAttr(const media::AudioAttributesInternal& attrAidl,
int32_t sessionAidl,
const AttributionSourceState& attributionSource,
const AudioConfig& configAidl,
int32_t flagsAidl,
int32_t selectedDeviceIdAidl,
media::GetOutputForAttrResponse* _aidl_return)
{
audio_attributes_t attr = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
audio_session_t session = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_session_t(sessionAidl));
audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
audio_config_t config = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioConfig_audio_config_t(configAidl, false /*isInput*/));
audio_output_flags_t flags = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_output_flags_t_mask(flagsAidl));
audio_port_handle_t selectedDeviceId = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_port_handle_t(selectedDeviceIdAidl));
audio_io_handle_t output;
audio_port_handle_t portId;
std::vector<audio_io_handle_t> secondaryOutputs;
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
}
RETURN_IF_BINDER_ERROR(
binderStatusFromStatusT(AudioValidator::validateAudioAttributes(attr, "68953950")));
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr, attributionSource)));
ALOGV("%s()", __func__);
Mutex::Autolock _l(mLock);
// TODO b/182392553: refactor or remove
AttributionSourceState adjAttributionSource = attributionSource;
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
if (!isAudioServerOrMediaServerUid(callingUid) || attributionSource.uid == -1) {
int32_t callingUidAidl = VALUE_OR_RETURN_BINDER_STATUS(
legacy2aidl_uid_t_int32_t(callingUid));
ALOGW_IF(attributionSource.uid != -1 && attributionSource.uid != callingUidAidl,
"%s uid %d tried to pass itself off as %d", __func__,
callingUidAidl, attributionSource.uid);
adjAttributionSource.uid = callingUidAidl;
}
if (!mPackageManager.allowPlaybackCapture(VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_uid_t(adjAttributionSource.uid)))) {
attr.flags = static_cast<audio_flags_mask_t>(attr.flags | AUDIO_FLAG_NO_MEDIA_PROJECTION);
}
if (((attr.flags & (AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE)) != 0)
&& !bypassInterruptionPolicyAllowed(adjAttributionSource)) {
attr.flags = static_cast<audio_flags_mask_t>(
attr.flags & ~(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE));
}
if (attr.content_type == AUDIO_CONTENT_TYPE_ULTRASOUND) {
if (!accessUltrasoundAllowed(adjAttributionSource)) {
ALOGE("%s: permission denied: ultrasound not allowed for uid %d pid %d",
__func__, adjAttributionSource.uid, adjAttributionSource.pid);
return binderStatusFromStatusT(PERMISSION_DENIED);
}
}
AutoCallerClear acc;
AudioPolicyInterface::output_type_t outputType;
bool isSpatialized = false;
status_t result = mAudioPolicyManager->getOutputForAttr(&attr, &output, session, //调用AudioPolicyManager的getOutputForAttr函数
&stream,
adjAttributionSource,
&config,
&flags, &selectedDeviceId, &portId,
&secondaryOutputs,
&outputType,
&isSpatialized);
// FIXME: Introduce a way to check for the the telephony device before opening the output
if (result == NO_ERROR) {
// enforce permission (if any) required for each type of input
switch (outputType) {
case AudioPolicyInterface::API_OUTPUT_LEGACY:
break;
case AudioPolicyInterface::API_OUTPUT_TELEPHONY_TX:
if (((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)
&& !callAudioInterceptionAllowed(adjAttributionSource)) {
ALOGE("%s() permission denied: call redirection not allowed for uid %d",
__func__, adjAttributionSource.uid);
result = PERMISSION_DENIED;
} else if (!modifyPhoneStateAllowed(adjAttributionSource)) {
ALOGE("%s() permission denied: modify phone state not allowed for uid %d",
__func__, adjAttributionSource.uid);
result = PERMISSION_DENIED;
}
break;
case AudioPolicyInterface::API_OUT_MIX_PLAYBACK:
if (!modifyAudioRoutingAllowed(adjAttributionSource)) {
ALOGE("%s() permission denied: modify audio routing not allowed for uid %d",
__func__, adjAttributionSource.uid);
result = PERMISSION_DENIED;
}
break;
case AudioPolicyInterface::API_OUTPUT_INVALID:
default:
LOG_ALWAYS_FATAL("%s() encountered an invalid output type %d",
__func__, (int)outputType);
}
}
if (result == NO_ERROR) {
sp<AudioPlaybackClient> client =
new AudioPlaybackClient(attr, output, adjAttributionSource, session, //创建AudioPlayback客户端
portId, selectedDeviceId, stream, isSpatialized);
mAudioPlaybackClients.add(portId, client);
_aidl_return->output = VALUE_OR_RETURN_BINDER_STATUS(
legacy2aidl_audio_io_handle_t_int32_t(output));
_aidl_return->stream = VALUE_OR_RETURN_BINDER_STATUS(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
_aidl_return->selectedDeviceId = VALUE_OR_RETURN_BINDER_STATUS(
legacy2aidl_audio_port_handle_t_int32_t(selectedDeviceId));
_aidl_return->portId = VALUE_OR_RETURN_BINDER_STATUS(
legacy2aidl_audio_port_handle_t_int32_t(portId));
_aidl_return->secondaryOutputs = VALUE_OR_RETURN_BINDER_STATUS(
convertContainer<std::vector<int32_t>>(secondaryOutputs,
legacy2aidl_audio_io_handle_t_int32_t));
_aidl_return->isSpatialized = isSpatialized;
}
return binderStatusFromStatusT(result);
}
AudioPolicyManager getOutputForAttr
在AudioPolicyService的getOutputForAttr中,会调用AudioPolicyManager的getOutputForAttr函数:
//frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
const AttributionSourceState& attributionSource,
const audio_config_t *config,
audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId,
std::vector<audio_io_handle_t> *secondaryOutputs,
output_type_t *outputType,
bool *isSpatialized)
{
// The supplied portId must be AUDIO_PORT_HANDLE_NONE
if (*portId != AUDIO_PORT_HANDLE_NONE) {
return INVALID_OPERATION;
}
const uid_t uid = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_uid_t(attributionSource.uid));
const audio_port_handle_t requestedPortId = *selectedDeviceId;
audio_attributes_t resultAttr;
bool isRequestedDeviceForExclusiveUse = false;
std::vector<sp<AudioPolicyMix>> secondaryMixes;
const sp<DeviceDescriptor> requestedDevice =
mAvailableOutputDevices.getDeviceFromId(requestedPortId); //获取输出设备通过节点ID
// Prevent from storing invalid requested device id in clients
const audio_port_handle_t sanitizedRequestedPortId =
requestedDevice != nullptr ? requestedPortId : AUDIO_PORT_HANDLE_NONE;
*selectedDeviceId = sanitizedRequestedPortId;
status_t status = getOutputForAttrInt(&resultAttr, output, session, attr, stream, uid, //调用getOutputForAttrInt函数
config, flags, selectedDeviceId, &isRequestedDeviceForExclusiveUse,
secondaryOutputs != nullptr ? &secondaryMixes : nullptr, outputType, isSpatialized);
if (status != NO_ERROR) {
return status;
}
std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryOutputDescs;
if (secondaryOutputs != nullptr) {
for (auto &secondaryMix : secondaryMixes) {
sp<SwAudioOutputDescriptor> outputDesc = secondaryMix->getOutput();
if (outputDesc != nullptr &&
outputDesc->mIoHandle != AUDIO_IO_HANDLE_NONE) {
secondaryOutputs->push_back(outputDesc->mIoHandle);
weakSecondaryOutputDescs.push_back(outputDesc);
}
}
}
audio_config_base_t clientConfig = {.sample_rate = config->sample_rate,
.channel_mask = config->channel_mask,
.format = config->format,
};
*portId = PolicyAudioPort::getNextUniqueId();
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
sp<TrackClientDescriptor> clientDesc =
new TrackClientDescriptor(*portId, uid, session, resultAttr, clientConfig,
sanitizedRequestedPortId, *stream,
mEngine->getProductStrategyForAttributes(resultAttr),
toVolumeSource(resultAttr),
*flags, isRequestedDeviceForExclusiveUse,
std::move(weakSecondaryOutputDescs),
outputDesc->mPolicyMix);
outputDesc->addClient(clientDesc);
ALOGV("%s() returns output %d requestedPortId %d selectedDeviceId %d for port ID %d", __func__,
*output, requestedPortId, *selectedDeviceId, *portId);
return NO_ERROR;
}
在AudioPolicyManager::getOutputForAttr中会调用getOutputForAttrInt函数:
//frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
status_t AudioPolicyManager::getOutputForAttrInt(
audio_attributes_t *resultAttr,
audio_io_handle_t *output,
audio_session_t session,
const audio_attributes_t *attr,
audio_stream_type_t *stream,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
bool *isRequestedDeviceForExclusiveUse,
std::vector<sp<AudioPolicyMix>> *secondaryMixes,
output_type_t *outputType,
bool *isSpatialized)
{
DeviceVector outputDevices;
const audio_port_handle_t requestedPortId = *selectedDeviceId;
DeviceVector msdDevices = getMsdAudioOutDevices();
const sp<DeviceDescriptor> requestedDevice =
mAvailableOutputDevices.getDeviceFromId(requestedPortId);//获取请求的输出设备
*outputType = API_OUTPUT_INVALID;
*isSpatialized = false;
status_t status = getAudioAttributes(resultAttr, attr, *stream);
if (status != NO_ERROR) {
return status;
}
if (auto it = mAllowedCapturePolicies.find(uid); it != end(mAllowedCapturePolicies)) {
resultAttr->flags = static_cast<audio_flags_mask_t>(resultAttr->flags | it->second);
}
*stream = mEngine->getStreamTypeForAttributes(*resultAttr);
ALOGV("%s() attributes=%s stream=%s session %d selectedDeviceId %d", __func__,
toString(*resultAttr).c_str(), toString(*stream).c_str(), session, requestedPortId);
// The primary output is the explicit routing (eg. setPreferredDevice) if specified,
// otherwise, fallback to the dynamic policies, if none match, query the engine.
// Secondary outputs are always found by dynamic policies as the engine do not support them
sp<AudioPolicyMix> primaryMix;
status = mPolicyMixes.getOutputForAttr(*resultAttr, uid, *flags, primaryMix, secondaryMixes);
if (status != OK) {
return status;
}
// Explicit routing is higher priority then any dynamic policy primary output
bool usePrimaryOutputFromPolicyMixes = requestedDevice == nullptr && primaryMix != nullptr;
// FIXME: in case of RENDER policy, the output capabilities should be checked
if ((usePrimaryOutputFromPolicyMixes
|| (secondaryMixes != nullptr && !secondaryMixes->empty()))
&& !audio_is_linear_pcm(config->format)) {
ALOGD("%s: rejecting request as dynamic audio policy only support pcm", __func__);
return BAD_VALUE;
}
if (usePrimaryOutputFromPolicyMixes) {
sp<DeviceDescriptor> deviceDesc =
mAvailableOutputDevices.getDevice(primaryMix->mDeviceType,
primaryMix->mDeviceAddress,
AUDIO_FORMAT_DEFAULT);
sp<SwAudioOutputDescriptor> policyDesc = primaryMix->getOutput();
if (deviceDesc != nullptr
&& (policyDesc == nullptr || (policyDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT))) {
audio_io_handle_t newOutput;
status = openDirectOutput(
*stream, session, config,
(audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT),
DeviceVector(deviceDesc), &newOutput); //打开输出设备
if (status != NO_ERROR) {
policyDesc = nullptr;
} else {
policyDesc = mOutputs.valueFor(newOutput);
primaryMix->setOutput(policyDesc);
}
}
if (policyDesc != nullptr) {
policyDesc->mPolicyMix = primaryMix;
*output = policyDesc->mIoHandle;
*selectedDeviceId = deviceDesc != 0 ? deviceDesc->getId() : AUDIO_PORT_HANDLE_NONE;
ALOGV("getOutputForAttr() returns output %d", *output);
if (resultAttr->usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
*outputType = API_OUT_MIX_PLAYBACK;
} else {
*outputType = API_OUTPUT_LEGACY;
}
return NO_ERROR;
}
}
// Virtual sources must always be dynamicaly or explicitly routed
if (resultAttr->usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
return BAD_VALUE;
}
// explicit routing managed by getDeviceForStrategy in APM is now handled by engine
// in order to let the choice of the order to future vendor engine
outputDevices = mEngine->getOutputDevicesForAttributes(*resultAttr, requestedDevice, false); //获取打开的输出设备
if ((resultAttr->flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
}
// Set incall music only if device was explicitly set, and fallback to the device which is
// chosen by the engine if not.
// FIXME: provide a more generic approach which is not device specific and move this back
// to getOutputForDevice.
// TODO: Remove check of AUDIO_STREAM_MUSIC once migration is completed on the app side.
if (outputDevices.onlyContainsDevicesWithType(AUDIO_DEVICE_OUT_TELEPHONY_TX) &&
(*stream == AUDIO_STREAM_MUSIC || resultAttr->usage == AUDIO_USAGE_VOICE_COMMUNICATION) &&
audio_is_linear_pcm(config->format) &&
isCallAudioAccessible()) {
if (requestedPortId != AUDIO_PORT_HANDLE_NONE) {
*flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
*isRequestedDeviceForExclusiveUse = true;
}
}
ALOGV("%s() device %s, sampling rate %d, format %#x, channel mask %#x, flags %#x stream %s",
__func__, outputDevices.toString().c_str(), config->sample_rate, config->format,
config->channel_mask, *flags, toString(*stream).c_str());
*output = AUDIO_IO_HANDLE_NONE;
if (!msdDevices.isEmpty()) {
*output = getOutputForDevices(msdDevices, session, resultAttr, config, flags, isSpatialized); //从已经打开的输出设备中,根据stream、outputDevice找到数据句柄
if (*output != AUDIO_IO_HANDLE_NONE && setMsdOutputPatches(&outputDevices) == NO_ERROR) {
ALOGV("%s() Using MSD devices %s instead of devices %s",
__func__, msdDevices.toString().c_str(), outputDevices.toString().c_str());
} else {
*output = AUDIO_IO_HANDLE_NONE;
}
}
if (*output == AUDIO_IO_HANDLE_NONE) {
*output = getOutputForDevices(outputDevices, session, resultAttr, config,
flags, isSpatialized, resultAttr->flags & AUDIO_FLAG_MUTE_HAPTIC);
}
if (*output == AUDIO_IO_HANDLE_NONE) {
return INVALID_OPERATION;
}
*selectedDeviceId = getFirstDeviceId(outputDevices);
for (auto &outputDevice : outputDevices) {
if (outputDevice->getId() == getConfig().getDefaultOutputDevice()->getId()) {
*selectedDeviceId = outputDevice->getId();
break;
}
}
if (outputDevices.onlyContainsDevicesWithType(AUDIO_DEVICE_OUT_TELEPHONY_TX)) {
*outputType = API_OUTPUT_TELEPHONY_TX;
} else {
*outputType = API_OUTPUT_LEGACY;
}
ALOGV("%s returns output %d selectedDeviceId %d", __func__, *output, *selectedDeviceId);
return NO_ERROR;
}
这个主要步骤如下:
-
调用mAvailableOutputDevices.getDeviceFromId获取请求的输出设备。
-
调用openDirectOutput函数打开输出设备。
-
调用mEngine->getOutputDevicesForAttributes获取打开的输出设备。
-
调用getOutputForDevices从已经打开的输出设备中,根据stream、outputDevice找到数据句柄。
我们继续分析打开音频设备的流程,如下为详细时序:
AudioPolicyManager openDirectOutput
首先是AudioPolicyManager的openDirectOutput开始:
//frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
status_t AudioPolicyManager::openDirectOutput(audio_stream_type_t stream,
audio_session_t session,
const audio_config_t *config,
audio_output_flags_t flags,
const DeviceVector &devices,
audio_io_handle_t *output) {
*output = AUDIO_IO_HANDLE_NONE;
// skip direct output selection if the request can obviously be attached to a mixed output
// and not explicitly requested
if (((flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
audio_is_linear_pcm(config->format) && config->sample_rate <= SAMPLE_RATE_HZ_MAX &&
audio_channel_count_from_out_mask(config->channel_mask) <= 2) {
return NAME_NOT_FOUND;
}
// Do not allow offloading if one non offloadable effect is enabled or MasterMono is enabled.
// This prevents creating an offloaded track and tearing it down immediately after start
// when audioflinger detects there is an active non offloadable effect.
// FIXME: We should check the audio session here but we do not have it in this context.
// This may prevent offloading in rare situations where effects are left active by apps
// in the background.
sp<IOProfile> profile;
if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
!(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) {
profile = getProfileForOutput(
devices, config->sample_rate, config->format, config->channel_mask,
flags, true /* directOnly */);
}
if (profile == nullptr) {
return NAME_NOT_FOUND;
}
// exclusive outputs for MMAP and Offload are enforced by different session ids.
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (!desc->isDuplicated() && (profile == desc->mProfile)) {
// reuse direct output if currently open by the same client
// and configured with same parameters
if ((config->sample_rate == desc->getSamplingRate()) &&
(config->format == desc->getFormat()) &&
(config->channel_mask == desc->getChannelMask()) &&
(session == desc->mDirectClientSession)) {
desc->mDirectOpenCount++;
ALOGV("%s reusing direct output %d for session %d", __func__,
mOutputs.keyAt(i), session);
*output = mOutputs.keyAt(i);
return NO_ERROR;
}
}
}
if (!profile->canOpenNewIo()) {
return NAME_NOT_FOUND;
}
sp<SwAudioOutputDescriptor> outputDesc =
new SwAudioOutputDescriptor(profile, mpClientInterface); //创建SwAudioOutputDescriptor对象
// An MSD patch may be using the only output stream that can service this request. Release
// all MSD patches to prioritize this request over any active output on MSD.
releaseMsdOutputPatches(devices);
status_t status =
outputDesc->open(config, nullptr /* mixerConfig */, devices, stream, flags, output); //调用SwAudioOutputDescriptor的open函数
// only accept an output with the requested parameters
if (status != NO_ERROR ||
(config->sample_rate != 0 && config->sample_rate != outputDesc->getSamplingRate()) ||
(config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->getFormat()) ||
(config->channel_mask != 0 && config->channel_mask != outputDesc->getChannelMask())) {
ALOGV("%s failed opening direct output: output %d sample rate %d %d,"
"format %d %d, channel mask %04x %04x", __func__, *output, config->sample_rate,
outputDesc->getSamplingRate(), config->format, outputDesc->getFormat(),
config->channel_mask, outputDesc->getChannelMask());
if (*output != AUDIO_IO_HANDLE_NONE) {
outputDesc->close();
}
// fall back to mixer output if possible when the direct output could not be open
if (audio_is_linear_pcm(config->format) &&
config->sample_rate <= SAMPLE_RATE_HZ_MAX) {
return NAME_NOT_FOUND;
}
*output = AUDIO_IO_HANDLE_NONE;
return BAD_VALUE;
}
outputDesc->mDirectOpenCount = 1;
outputDesc->mDirectClientSession = session;
addOutput(*output, outputDesc);
mPreviousOutputs = mOutputs;
ALOGV("%s returns new direct output %d", __func__, *output);
mpClientInterface->onAudioPortListUpdate();
return NO_ERROR;
}
openDirectOutput函数中会调用SwAudioOutputDescriptor的open函数:
//frameworks/av/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
AudioPolicyClientInterface * const mClientInterface;
//frameworks/av/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
status_t SwAudioOutputDescriptor::open(const audio_config_t *halConfig,
const audio_config_base_t *mixerConfig,
const DeviceVector &devices,
audio_stream_type_t stream,
audio_output_flags_t flags,
audio_io_handle_t *output)
{
mDevices = devices;
sp<DeviceDescriptor> device = devices.getDeviceForOpening();
LOG_ALWAYS_FATAL_IF(device == nullptr,
"%s failed to get device descriptor for opening "
"with the requested devices, all device types: %s",
__func__, dumpDeviceTypes(devices.types()).c_str());
audio_config_t lHalConfig;
if (halConfig == nullptr) {
lHalConfig = AUDIO_CONFIG_INITIALIZER;
lHalConfig.sample_rate = mSamplingRate;
lHalConfig.channel_mask = mChannelMask;
lHalConfig.format = mFormat;
} else {
lHalConfig = *halConfig;
}
// if the selected profile is offloaded and no offload info was specified,
// create a default one
if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) &&
lHalConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
lHalConfig.offload_info = AUDIO_INFO_INITIALIZER;
lHalConfig.offload_info.sample_rate = lHalConfig.sample_rate;
lHalConfig.offload_info.channel_mask = lHalConfig.channel_mask;
lHalConfig.offload_info.format = lHalConfig.format;
lHalConfig.offload_info.stream_type = stream;
}
audio_config_base_t lMixerConfig;
if (mixerConfig == nullptr) {
lMixerConfig = AUDIO_CONFIG_BASE_INITIALIZER;
lMixerConfig.sample_rate = lHalConfig.sample_rate;
lMixerConfig.channel_mask = lHalConfig.channel_mask;
lMixerConfig.format = lHalConfig.format;
} else {
lMixerConfig = *mixerConfig;
}
mFlags = (audio_output_flags_t)(mFlags | flags);
// If no mixer config is specified for a spatializer output, default to 5.1 for proper
// configuration of the final downmixer or spatializer
if ((mFlags & AUDIO_OUTPUT_FLAG_SPATIALIZER) != 0
&& mixerConfig == nullptr) {
lMixerConfig.channel_mask = AUDIO_CHANNEL_OUT_5POINT1;
}
ALOGV("opening output for device %s profile %p name %s",
mDevices.toString().c_str(), mProfile.get(), mProfile->getName().c_str());
status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(), //调用AudioPolicyClientInterface的openOutput
output,
&lHalConfig,
&lMixerConfig,
device,
&mLatency,
mFlags);
if (status == NO_ERROR) {
LOG_ALWAYS_FATAL_IF(*output == AUDIO_IO_HANDLE_NONE,
"%s openOutput returned output handle %d for device %s, "
"selected device %s for opening",
__FUNCTION__, *output, devices.toString().c_str(),
device->toString().c_str());
mSamplingRate = lHalConfig.sample_rate;
mChannelMask = lHalConfig.channel_mask;
mFormat = lHalConfig.format;
mMixerChannelMask = lMixerConfig.channel_mask;
mId = PolicyAudioPort::getNextUniqueId();
mIoHandle = *output;
mProfile->curOpenCount++;
}
return status;
}
调用AudioPolicyClientInterface的openOutput函数会调用到AudioPolicyServicee::AudioPolicyClient::openOutput函数:
// frameworks/av/services/audiopolicy/service/AudioPolicyClientImpl.cpp
status_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *halConfig,
audio_config_base_t *mixerConfig,
const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags)
{
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); //取得IAudioFlinger
if (af == 0) {
ALOGW("%s: could not get AudioFlinger", __func__);
return PERMISSION_DENIED;
}
media::OpenOutputRequest request;
media::OpenOutputResponse response;
request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
request.halConfig = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_config_t_AudioConfig(*halConfig, false /*isInput*/));
request.mixerConfig = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_config_base_t_AudioConfigBase(*mixerConfig, false /*isInput*/));
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
status_t status = af->openOutput(request, &response); //调用IAudioFlinger的openOutput
if (status == OK) {
*output = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(response.output));
*halConfig = VALUE_OR_RETURN_STATUS(
aidl2legacy_AudioConfig_audio_config_t(response.config, false /*isInput*/));
*latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(response.latencyMs));
}
return status;
}
AudioFlinger openOutput
调用AudioFlinger的openOutput方法:
//frameworks/av/services/audioflinger/AudioFlinger.cpp
status_t AudioFlinger::openOutput(const media::OpenOutputRequest& request,
media::OpenOutputResponse* response)
{
audio_module_handle_t module = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_module_handle_t(request.module));
audio_config_t halConfig = VALUE_OR_RETURN_STATUS(
aidl2legacy_AudioConfig_audio_config_t(request.halConfig, false /*isInput*/));
audio_config_base_t mixerConfig = VALUE_OR_RETURN_STATUS(
aidl2legacy_AudioConfigBase_audio_config_base_t(request.mixerConfig, false/*isInput*/));
sp<DeviceDescriptorBase> device = VALUE_OR_RETURN_STATUS(
aidl2legacy_DeviceDescriptorBase(request.device));
audio_output_flags_t flags = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_output_flags_t_mask(request.flags));
audio_io_handle_t output;
uint32_t latencyMs;
ALOGI("openOutput() this %p, module %d Device %s, SamplingRate %d, Format %#08x, "
"Channels %#x, flags %#x",
this, module,
device->toString().c_str(),
halConfig.sample_rate,
halConfig.format,
halConfig.channel_mask,
flags);
audio_devices_t deviceType = device->type();
const String8 address = String8(device->address().c_str());
if (deviceType == AUDIO_DEVICE_NONE) {
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
sp<ThreadBase> thread = openOutput_l(module, &output, &halConfig,
&mixerConfig, deviceType, address, flags);
if (thread != 0) {
if ((flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0) {
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
latencyMs = playbackThread->latency();
// notify client processes of the new output creation
playbackThread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
// the first primary output opened designates the primary hw device if no HW module
// named "primary" was already loaded.
AutoMutex lock(mHardwareLock);
if ((mPrimaryHardwareDev == nullptr) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
ALOGI("Using module %d as the primary audio interface", module);
mPrimaryHardwareDev = playbackThread->getOutput()->audioHwDev;
mHardwareStatus = AUDIO_HW_SET_MODE;
mPrimaryHardwareDev->hwDevice()->setMode(mMode);
mHardwareStatus = AUDIO_HW_IDLE;
}
} else {
MmapThread *mmapThread = (MmapThread *)thread.get();
mmapThread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
}
response->output = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
response->config = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_config_t_AudioConfig(halConfig, false /*isInput*/));
response->latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(latencyMs));
response->flags = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
return NO_ERROR;
}
return NO_INIT;
}
在openOutput函数中会调用openOutput_l函数:
//frameworks/av/services/audioflinger/AudioFlinger.cpp
sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *halConfig,
audio_config_base_t *mixerConfig __unused,
audio_devices_t deviceType,
const String8& address,
audio_output_flags_t flags)
{
AudioHwDevice *outHwDev = findSuitableHwDev_l(module, deviceType); //寻址输出的硬件设备
if (outHwDev == NULL) {
return nullptr;
}
if (*output == AUDIO_IO_HANDLE_NONE) {
*output = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT);
} else {
// Audio Policy does not currently request a specific output handle.
// If this is ever needed, see openInput_l() for example code.
ALOGE("openOutput_l requested output handle %d is not AUDIO_IO_HANDLE_NONE", *output);
return nullptr;
}
#ifndef MULTICHANNEL_EFFECT_CHAIN
if (flags & AUDIO_OUTPUT_FLAG_SPATIALIZER) {
ALOGE("openOutput_l() cannot create spatializer thread "
"without #define MULTICHANNEL_EFFECT_CHAIN");
return nullptr;
}
#endif
mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
// FOR TESTING ONLY:
// This if statement allows overriding the audio policy settings
// and forcing a specific format or channel mask to the HAL/Sink device for testing.
if (!(flags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT))) {
// Check only for Normal Mixing mode
if (kEnableExtendedPrecision) {
// Specify format (uncomment one below to choose)
//halConfig->format = AUDIO_FORMAT_PCM_FLOAT;
//halConfig->format = AUDIO_FORMAT_PCM_24_BIT_PACKED;
//halConfig->format = AUDIO_FORMAT_PCM_32_BIT;
//halConfig->format = AUDIO_FORMAT_PCM_8_24_BIT;
// ALOGV("openOutput_l() upgrading format to %#08x", halConfig->format);
}
if (kEnableExtendedChannels) {
// Specify channel mask (uncomment one below to choose)
//halConfig->channel_mask = audio_channel_out_mask_from_count(4); // for USB 4ch
//halConfig->channel_mask = audio_channel_mask_from_representation_and_bits(
// AUDIO_CHANNEL_REPRESENTATION_INDEX, (1 << 4) - 1); // another 4ch example
}
}
AudioStreamOut *outputStream = NULL;
status_t status = outHwDev->openOutputStream( //打开输出硬件设备的输出流
&outputStream,
*output,
deviceType,
flags,
halConfig,
address.string());
mHardwareStatus = AUDIO_HW_IDLE;
if (status == NO_ERROR) {
if (flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) {
sp<MmapPlaybackThread> thread =
new MmapPlaybackThread(this, *output, outHwDev, outputStream, mSystemReady); //创建MmapPlayback线程
mMmapThreads.add(*output, thread);
ALOGV("openOutput_l() created mmap playback thread: ID %d thread %p",
*output, thread.get());
return thread;
} else {
sp<PlaybackThread> thread;
if (flags & AUDIO_OUTPUT_FLAG_SPATIALIZER) {
thread = new SpatializerThread(this, outputStream, *output, //创建Spatializer线程
mSystemReady, mixerConfig);
ALOGV("openOutput_l() created spatializer output: ID %d thread %p",
*output, thread.get());
} else if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
thread = new OffloadThread(this, outputStream, *output, mSystemReady); //创建Offload线程
ALOGV("openOutput_l() created offload output: ID %d thread %p",
*output, thread.get());
} else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
|| !isValidPcmSinkFormat(halConfig->format)
|| !isValidPcmSinkChannelMask(halConfig->channel_mask)) {
thread = new DirectOutputThread(this, outputStream, *output, mSystemReady); //创建DirectOutput线程
ALOGV("openOutput_l() created direct output: ID %d thread %p",
*output, thread.get());
} else {
thread = new MixerThread(this, outputStream, *output, mSystemReady); //创建Mixer线程
ALOGV("openOutput_l() created mixer output: ID %d thread %p",
*output, thread.get());
}
mPlaybackThreads.add(*output, thread);
struct audio_patch patch;
mPatchPanel.notifyStreamOpened(outHwDev, *output, &patch);
if (thread->isMsdDevice()) {
thread->setDownStreamPatch(&patch);
}
return thread;
}
}
return nullptr;
}
在openOutput_l中会调用AudioHwDevice的openOutputStream函数:
//frameworks/av/services/audioflinger/AudioHwDevice.cpp
status_t AudioHwDevice::openOutputStream(
AudioStreamOut **ppStreamOut,
audio_io_handle_t handle,
audio_devices_t deviceType,
audio_output_flags_t flags,
struct audio_config *config,
const char *address)
{
struct audio_config originalConfig = *config;
AudioStreamOut *outputStream = new AudioStreamOut(this, flags); //创建一个AudioStreamOut对象
// Try to open the HAL first using the current format.
ALOGV("openOutputStream(), try "
" sampleRate %d, Format %#x, "
"channelMask %#x",
config->sample_rate,
config->format,
config->channel_mask);
status_t status = outputStream->open(handle, deviceType, config, address); //打开outputStream
if (status != NO_ERROR) {
delete outputStream;
outputStream = NULL;
// FIXME Look at any modification to the config.
// The HAL might modify the config to suggest a wrapped format.
// Log this so we can see what the HALs are doing.
ALOGI("openOutputStream(), HAL returned"
" sampleRate %d, Format %#x, "
"channelMask %#x, status %d",
config->sample_rate,
config->format,
config->channel_mask,
status);
// If the data is encoded then try again using wrapped PCM.
bool wrapperNeeded = !audio_has_proportional_frames(originalConfig.format)
&& ((flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0)
&& ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0);
if (wrapperNeeded) {
if (SPDIFEncoder::isFormatSupported(originalConfig.format)) {
outputStream = new SpdifStreamOut(this, flags, originalConfig.format);
status = outputStream->open(handle, deviceType, &originalConfig, address);
if (status != NO_ERROR) {
ALOGE("ERROR - openOutputStream(), SPDIF open returned %d",
status);
delete outputStream;
outputStream = NULL;
}
} else {
ALOGE("ERROR - openOutputStream(), SPDIFEncoder does not support format 0x%08x",
originalConfig.format);
}
}
}
*ppStreamOut = outputStream;
return status;
}
AudioStreamOut open
在openOutputStream中会创建一个AudioStreamOut对象,并打开它:
//frameworks/av/services/audioflinger/AudioStreamOut.cpp
sp<DeviceHalInterface> mHwDevice;
sp<DeviceHalInterface> hwDevice() const { return mHwDevice; }
sp<DeviceHalInterface> AudioStreamOut::hwDev() const
{
return audioHwDev->hwDevice();
}
status_t AudioStreamOut::open(
audio_io_handle_t handle,
audio_devices_t deviceType,
struct audio_config *config,
const char *address)
{
sp<StreamOutHalInterface> outStream;
audio_output_flags_t customFlags = (config->format == AUDIO_FORMAT_IEC61937)
? (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO)
: flags;
int status = hwDev()->openOutputStream( //调用DeviceHalHidl的openOutputStream
handle,
deviceType,
customFlags,
config,
address,
&outStream);
ALOGV("AudioStreamOut::open(), HAL returned "
" stream %p, sampleRate %d, Format %#x, "
"channelMask %#x, status %d",
outStream.get(),
config->sample_rate,
config->format,
config->channel_mask,
status);
// Some HALs may not recognize AUDIO_FORMAT_IEC61937. But if we declare
// it as PCM then it will probably work.
if (status != NO_ERROR && config->format == AUDIO_FORMAT_IEC61937) {
struct audio_config customConfig = *config;
customConfig.format = AUDIO_FORMAT_PCM_16_BIT;
status = hwDev()->openOutputStream(
handle,
deviceType,
customFlags,
&customConfig,
address,
&outStream);
ALOGV("AudioStreamOut::open(), treat IEC61937 as PCM, status = %d", status);
}
if (status == NO_ERROR) {
stream = outStream;
mHalFormatHasProportionalFrames = audio_has_proportional_frames(config->format);
status = stream->getFrameSize(&mHalFrameSize);
LOG_ALWAYS_FATAL_IF(status != OK, "Error retrieving frame size from HAL: %d", status);
LOG_ALWAYS_FATAL_IF(mHalFrameSize <= 0, "Error frame size was %zu but must be greater than"
" zero", mHalFrameSize);
}
return status;
}
hwDev()->openOutputStream会通过DeviceHalInterface接口调用DeviceHalHidl的openOutputStream函数:
//frameworks/av/media/libaudiohal/impl/DeviceHalHidl.h
sp<::android::hardware::audio::CPP_VERSION::IDevice> mDevice;
//frameworks/av/media/libaudiohal/impl/DeviceHalHidl.cpp
status_t DeviceHalHidl::openOutputStream(
audio_io_handle_t handle,
audio_devices_t deviceType,
audio_output_flags_t flags,
struct audio_config *config,
const char *address,
sp<StreamOutHalInterface> *outStream) {
TIME_CHECK();
if (mDevice == 0) return NO_INIT;
DeviceAddress hidlDevice;
if (status_t status = CoreUtils::deviceAddressFromHal(deviceType, address, &hidlDevice);
status != OK) {
return status;
}
AudioConfig hidlConfig;
if (status_t status = HidlUtils::audioConfigFromHal(*config, false /*isInput*/, &hidlConfig);
status != OK) {
return status;
}
#if !(MAJOR_VERSION == 7 && MINOR_VERSION == 1)
//TODO: b/193496180 use spatializer flag at audio HAL when available
if ((flags & AUDIO_OUTPUT_FLAG_SPATIALIZER) != 0) {
flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_SPATIALIZER);
flags = (audio_output_flags_t)
(flags | AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
}
#endif
CoreUtils::AudioOutputFlags hidlFlags;
if (status_t status = CoreUtils::audioOutputFlagsFromHal(flags, &hidlFlags); status != OK) {
return status;
}
Result retval = Result::NOT_INITIALIZED;
#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
Return<void> ret = mDevice->openOutputStream_7_1(
#else
Return<void> ret = mDevice->openOutputStream( //调用Audio HAL的openOutputStream
#endif
handle, hidlDevice, hidlConfig, hidlFlags,
#if MAJOR_VERSION >= 4
{} /* metadata */,
#endif
[&](Result r, const sp<::android::hardware::audio::CPP_VERSION::IStreamOut>& result,
const AudioConfig& suggestedConfig) {
retval = r;
if (retval == Result::OK) {
*outStream = new StreamOutHalHidl(result);
}
HidlUtils::audioConfigToHal(suggestedConfig, config);
});
return processReturn("openOutputStream", ret, retval);
}
再向下分析就是AudioHAL部分了,这里就不再继续分析了。
AudioFlinger PlaybackThread createTrack_l
其中createTrack_l创建的Track类型为 sp<PlaybackThread::Track> track;,因此会调用sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l函数:
//frameworks/av/services/audioflinger/Threads.cpp
// PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held
sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
const sp<AudioFlinger::Client>& client,
audio_stream_type_t streamType,
const audio_attributes_t& attr,
uint32_t *pSampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
size_t *pNotificationFrameCount,
uint32_t notificationsPerBuffer,
float speed,
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
audio_output_flags_t *flags,
pid_t creatorPid,
const AttributionSourceState& attributionSource,
pid_t tid,
status_t *status,
audio_port_handle_t portId,
const sp<media::IAudioTrackCallback>& callback,
bool isSpatialized)
{
size_t frameCount = *pFrameCount;
size_t notificationFrameCount = *pNotificationFrameCount;
sp<Track> track;
status_t lStatus;
audio_output_flags_t outputFlags = mOutput->flags;
audio_output_flags_t requestedFlags = *flags;
uint32_t sampleRate;
if (sharedBuffer != 0 && checkIMemory(sharedBuffer) != NO_ERROR) {
lStatus = BAD_VALUE;
goto Exit;
}
if (*pSampleRate == 0) {
*pSampleRate = mSampleRate;
}
sampleRate = *pSampleRate;
// special case for FAST flag considered OK if fast mixer is present
if (hasFastMixer()) {
outputFlags = (audio_output_flags_t)(outputFlags | AUDIO_OUTPUT_FLAG_FAST);
}
// Check if requested flags are compatible with output stream flags
if ((*flags & outputFlags) != *flags) {
ALOGW("createTrack_l(): mismatch between requested flags (%08x) and output flags (%08x)",
*flags, outputFlags);
*flags = (audio_output_flags_t)(*flags & outputFlags);
}
// client expresses a preference for FAST, but we get the final say
if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
if (
// PCM data
audio_is_linear_pcm(format) &&
// TODO: extract as a data library function that checks that a computationally
// expensive downmixer is not required: isFastOutputChannelConversion()
(channelMask == (mChannelMask | mHapticChannelMask) ||
mChannelMask != AUDIO_CHANNEL_OUT_STEREO ||
(channelMask == AUDIO_CHANNEL_OUT_MONO
/* && mChannelMask == AUDIO_CHANNEL_OUT_STEREO */)) &&
// hardware sample rate
(sampleRate == mSampleRate) &&
// normal mixer has an associated fast mixer
hasFastMixer() &&
// there are sufficient fast track slots available
(mFastTrackAvailMask != 0)
// FIXME test that MixerThread for this fast track has a capable output HAL
// FIXME add a permission test also?
) {
// static tracks can have any nonzero framecount, streaming tracks check against minimum.
if (sharedBuffer == 0) {
// read the fast track multiplier property the first time it is needed
int ok = pthread_once(&sFastTrackMultiplierOnce, sFastTrackMultiplierInit);
if (ok != 0) {
ALOGE("%s pthread_once failed: %d", __func__, ok);
}
frameCount = max(frameCount, mFrameCount * sFastTrackMultiplier); // incl framecount 0
}
// check compatibility with audio effects.
{ // scope for mLock
Mutex::Autolock _l(mLock);
for (audio_session_t session : {
AUDIO_SESSION_DEVICE,
AUDIO_SESSION_OUTPUT_STAGE,
AUDIO_SESSION_OUTPUT_MIX,
sessionId,
}) {
sp<EffectChain> chain = getEffectChain_l(session);
if (chain.get() != nullptr) {
audio_output_flags_t old = *flags;
chain->checkOutputFlagCompatibility(flags);
if (old != *flags) {
ALOGV("AUDIO_OUTPUT_FLAGS denied by effect, session=%d old=%#x new=%#x",
(int)session, (int)old, (int)*flags);
}
}
}
}
ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_FAST) != 0,
"AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
frameCount, mFrameCount);
} else {
ALOGD("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
"mFrameCount=%zu format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
"sampleRate=%u mSampleRate=%u "
"hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
sharedBuffer.get(), frameCount, mFrameCount, format, mFormat,
audio_is_linear_pcm(format), channelMask, sampleRate,
mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
*flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
}
}
if (!audio_has_proportional_frames(format)) {
if (sharedBuffer != 0) {
// Same comment as below about ignoring frameCount parameter for set()
frameCount = sharedBuffer->size();
} else if (frameCount == 0) {
frameCount = mNormalFrameCount;
}
if (notificationFrameCount != frameCount) {
notificationFrameCount = frameCount;
}
} else if (sharedBuffer != 0) {
// FIXME: Ensure client side memory buffers need
// not have additional alignment beyond sample
// (e.g. 16 bit stereo accessed as 32 bit frame).
size_t alignment = audio_bytes_per_sample(format);
if (alignment & 1) {
// for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
alignment = 1;
}
uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
size_t frameSize = channelCount * audio_bytes_per_sample(format);
if (channelCount > 1) {
// More than 2 channels does not require stronger alignment than stereo
alignment <<= 1;
}
if (((uintptr_t)sharedBuffer->unsecurePointer() & (alignment - 1)) != 0) {
ALOGE("Invalid buffer alignment: address %p, channel count %u",
sharedBuffer->unsecurePointer(), channelCount);
lStatus = BAD_VALUE;
goto Exit;
}
// When initializing a shared buffer AudioTrack via constructors,
// there's no frameCount parameter.
// But when initializing a shared buffer AudioTrack via set(),
// there _is_ a frameCount parameter. We silently ignore it.
frameCount = sharedBuffer->size() / frameSize;
} else {
size_t minFrameCount = 0;
// For fast tracks we try to respect the application's request for notifications per buffer.
if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
if (notificationsPerBuffer > 0) {
// Avoid possible arithmetic overflow during multiplication.
if (notificationsPerBuffer > SIZE_MAX / mFrameCount) {
ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
notificationsPerBuffer, mFrameCount);
} else {
minFrameCount = mFrameCount * notificationsPerBuffer;
}
}
} else {
// For normal PCM streaming tracks, update minimum frame count.
// Buffer depth is forced to be at least 2 x the normal mixer frame count and
// cover audio hardware latency.
// This is probably too conservative, but legacy application code may depend on it.
// If you change this calculation, also review the start threshold which is related.
uint32_t latencyMs = latency_l();
if (latencyMs == 0) {
ALOGE("Error when retrieving output stream latency");
lStatus = UNKNOWN_ERROR;
goto Exit;
}
minFrameCount = AudioSystem::calculateMinFrameCount(latencyMs, mNormalFrameCount,
mSampleRate, sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
}
if (frameCount < minFrameCount) {
frameCount = minFrameCount;
}
}
// Make sure that application is notified with sufficient margin before underrun.
// The client can divide the AudioTrack buffer into sub-buffers,
// and expresses its desire to server as the notification frame count.
if (sharedBuffer == 0 && audio_is_linear_pcm(format)) {
size_t maxNotificationFrames;
if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
// notify every HAL buffer, regardless of the size of the track buffer
maxNotificationFrames = mFrameCount;
} else {
// Triple buffer the notification period for a triple buffered mixer period;
// otherwise, double buffering for the notification period is fine.
//
// TODO: This should be moved to AudioTrack to modify the notification period
// on AudioTrack::setBufferSizeInFrames() changes.
const int nBuffering =
(uint64_t{frameCount} * mSampleRate)
/ (uint64_t{mNormalFrameCount} * sampleRate) == 3 ? 3 : 2;
maxNotificationFrames = frameCount / nBuffering;
// If client requested a fast track but this was denied, then use the smaller maximum.
if (requestedFlags & AUDIO_OUTPUT_FLAG_FAST) {
size_t maxNotificationFramesFastDenied = FMS_20 * sampleRate / 1000;
if (maxNotificationFrames > maxNotificationFramesFastDenied) {
maxNotificationFrames = maxNotificationFramesFastDenied;
}
}
}
if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
if (notificationFrameCount == 0) {
ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
maxNotificationFrames, frameCount);
} else {
ALOGW("Client adjusted notificationFrames from %zu to %zu for frameCount %zu",
notificationFrameCount, maxNotificationFrames, frameCount);
}
notificationFrameCount = maxNotificationFrames;
}
}
*pFrameCount = frameCount;
*pNotificationFrameCount = notificationFrameCount;
switch (mType) {
case DIRECT:
if (audio_is_linear_pcm(format)) { // TODO maybe use audio_has_proportional_frames()?
if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
ALOGE("createTrack_l() Bad parameter: sampleRate %u format %#x, channelMask 0x%08x "
"for output %p with format %#x",
sampleRate, format, channelMask, mOutput, mFormat);
lStatus = BAD_VALUE;
goto Exit;
}
}
break;
case OFFLOAD:
if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
ALOGE("createTrack_l() Bad parameter: sampleRate %d format %#x, channelMask 0x%08x \""
"for output %p with format %#x",
sampleRate, format, channelMask, mOutput, mFormat);
lStatus = BAD_VALUE;
goto Exit;
}
break;
default:
if (!audio_is_linear_pcm(format)) {
ALOGE("createTrack_l() Bad parameter: format %#x \""
"for output %p with format %#x",
format, mOutput, mFormat);
lStatus = BAD_VALUE;
goto Exit;
}
if (sampleRate > mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
ALOGE("Sample rate out of range: %u mSampleRate %u", sampleRate, mSampleRate);
lStatus = BAD_VALUE;
goto Exit;
}
break;
}
lStatus = initCheck();
if (lStatus != NO_ERROR) {
ALOGE("createTrack_l() audio driver not initialized");
goto Exit;
}
{ // scope for mLock
Mutex::Autolock _l(mLock);
// all tracks in same audio session must share the same routing strategy otherwise
// conflicts will happen when tracks are moved from one output to another by audio policy
// manager
product_strategy_t strategy = getStrategyForStream(streamType);
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> t = mTracks[i];
if (t != 0 && t->isExternalTrack()) {
product_strategy_t actual = getStrategyForStream(t->streamType());
if (sessionId == t->sessionId() && strategy != actual) {
ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
strategy, actual);
lStatus = BAD_VALUE;
goto Exit;
}
}
}
// Set DIRECT flag if current thread is DirectOutputThread. This can
// happen when the playback is rerouted to direct output thread by
// dynamic audio policy.
// Do NOT report the flag changes back to client, since the client
// doesn't explicitly request a direct flag.
audio_output_flags_t trackFlags = *flags;
if (mType == DIRECT) {
trackFlags = static_cast<audio_output_flags_t>(trackFlags | AUDIO_OUTPUT_FLAG_DIRECT);
}
track = new Track(this, client, streamType, attr, sampleRate, format, //创建Track对象
channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
sessionId, creatorPid, attributionSource, trackFlags,
TrackBase::TYPE_DEFAULT, portId, SIZE_MAX /*frameCountToBeReady*/,
speed, isSpatialized);
lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
if (lStatus != NO_ERROR) {
ALOGE("createTrack_l() initCheck failed %d; no control block?", lStatus);
// track must be cleared from the caller as the caller has the AF lock
goto Exit;
}
mTracks.add(track); //将创建的track加入到Tracks
{
Mutex::Autolock _atCbL(mAudioTrackCbLock);
if (callback.get() != nullptr) {
mAudioTrackCallbacks.emplace(track, callback);
}
}
sp<EffectChain> chain = getEffectChain_l(sessionId);
if (chain != 0) {
ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
track->setMainBuffer(chain->inBuffer());
chain->setStrategy(getStrategyForStream(track->streamType()));
chain->incTrackCnt();
}
if ((*flags & AUDIO_OUTPUT_FLAG_FAST) && (tid != -1)) {
pid_t callingPid = IPCThreadState::self()->getCallingPid();
// we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
// so ask activity manager to do this on our behalf
sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true /*forApp*/);
}
}
lStatus = NO_ERROR;
Exit:
*status = lStatus;
return track;
}
new Track
在createTrack_l函数中会通过new的方式创建Track类对象,然后调用Track类的构造函数:
// frameworks/av/services/audioflinger/PlaybackTracks.h
// playback track
class Track : public TrackBase, public VolumeProvider {
public:
Track(......);
}
// frameworks/av/services/audioflinger/Tracks.cpp
// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
AudioFlinger::PlaybackThread::Track::Track(
PlaybackThread *thread,
const sp<Client>& client,
audio_stream_type_t streamType,
const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
void *buffer,
size_t bufferSize,
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
pid_t creatorPid,
const AttributionSourceState& attributionSource,
audio_output_flags_t flags,
track_type type,
audio_port_handle_t portId,
size_t frameCountToBeReady,
float speed,
bool isSpatialized)
: TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
// TODO: Using unsecurePointer() has some associated security pitfalls
// (see declaration for details).
// Either document why it is safe in this case or address the
// issue (e.g. by copying).
(sharedBuffer != 0) ? sharedBuffer->unsecurePointer() : buffer,
(sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
sessionId, creatorPid,
VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)), true /*isOut*/,
(type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
type,
portId,
std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(portId)),
mFillingUpStatus(FS_INVALID),
// mRetryCount initialized later when needed
mSharedBuffer(sharedBuffer),
mStreamType(streamType),
mMainBuffer(thread->sinkBuffer()),
mAuxBuffer(NULL),
mAuxEffectId(0), mHasVolumeController(false),
mFrameMap(16 /* sink-frame-to-track-frame map memory */),
mVolumeHandler(new media::VolumeHandler(sampleRate)),
mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(attributionSource, attr, id(),
streamType)),
// mSinkTimestamp
mFastIndex(-1),
mCachedVolume(1.0),
/* The track might not play immediately after being active, similarly as if its volume was 0.
* When the track starts playing, its volume will be computed. */
mFinalVolume(0.f),
mResumeToStopping(false),
mFlushHwPending(false),
mFlags(flags),
mSpeed(speed),
mIsSpatialized(isSpatialized)
{
// client == 0 implies sharedBuffer == 0
ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
ALOGV_IF(sharedBuffer != 0, "%s(%d): sharedBuffer: %p, size: %zu",
__func__, mId, sharedBuffer->unsecurePointer(), sharedBuffer->size());
if (mCblk == NULL) {
return;
}
uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
ALOGE("%s(%d): no more tracks available", __func__, mId);
releaseCblk(); // this makes the track invalid.
return;
}
if (sharedBuffer == 0) {
mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount, //创建AudioTrackService代理
mFrameSize, !isExternalTrack(), sampleRate);
} else {
mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
mFrameSize, sampleRate);
}
mServerProxy = mAudioTrackServerProxy;
mServerProxy->setStartThresholdInFrames(frameCountToBeReady); // update the Cblk value
// only allocate a fast track index if we were able to allocate a normal track name
if (flags & AUDIO_OUTPUT_FLAG_FAST) {
// FIXME: Not calling framesReadyIsCalledByMultipleThreads() exposes a potential
// race with setSyncEvent(). However, if we call it, we cannot properly start
// static fast tracks (SoundPool) immediately after stopping.
//mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
int i = __builtin_ctz(thread->mFastTrackAvailMask);
ALOG_ASSERT(0 < i && i < (int)FastMixerState::sMaxFastTracks);
// FIXME This is too eager. We allocate a fast track index before the
// fast track becomes active. Since fast tracks are a scarce resource,
// this means we are potentially denying other more important fast tracks from
// being created. It would be better to allocate the index dynamically.
mFastIndex = i;
thread->mFastTrackAvailMask &= ~(1 << i);
}
mServerLatencySupported = checkServerLatencySupported(format, flags);
#ifdef TEE_SINK
mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
+ "_" + std::to_string(mId) + "_T");
#endif
if (thread->supportsHapticPlayback()) {
// If the track is attached to haptic playback thread, it is potentially to have
// HapticGenerator effect, which will generate haptic data, on the track. In that case,
// external vibration is always created for all tracks attached to haptic playback thread.
mAudioVibrationController = new AudioVibrationController(this);
std::string packageName = attributionSource.packageName.has_value() ?
attributionSource.packageName.value() : "";
mExternalVibration = new os::ExternalVibration(
mUid, packageName, mAttr, mAudioVibrationController);
}
// Once this item is logged by the server, the client can add properties.
const char * const traits = sharedBuffer == 0 ? "" : "static";
mTrackMetrics.logConstructor(creatorPid, uid, id(), traits, streamType);
}
new TrackHandle
待更新