http://thinks.me/2016/03/17/audiotrack_create/
AudioTrack getMinBufferSize的代码流程竟然如此复杂,因为其中引入了 AUDIO_STEAM_XXX
对设备的选择得到 audio_io_handle_t
https://me.zhoujinjian.com/posts/20200408/
Android Audio System源码分析(3):Android Audio 系统源码分析
java层
frameworks/base/media/java/android/media/AudioTrack.java
JNI
native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
static jint android_media_AudioTrack_get_min_buff_size(JNIEnv *env, jobject thiz,
jint sampleRateInHertz, jint channelCount, jint audioFormat) {
size_t frameCount; //java层没有该输入参数,JNI曾引入audio_stream_default
const status_t status = AudioTrack::getMinFrameCount(&frameCount,
AUDIO_STREAM_DEFAULT, sampleRateInHertz);
if (status != NO_ERROR) {
ALOGE("AudioTrack::getMinFrameCount() for sample rate %d failed with status %d",
sampleRateInHertz, status);
return -1;
}
const audio_format_t format = audioFormatToNative(audioFormat);
if (audio_has_proportional_frames(format)) {
const size_t bytesPerSample = audio_bytes_per_sample(format);
return frameCount * channelCount * bytesPerSample;
} else {
return frameCount;
}
}
client AudioTrack
frameworks/av/media/libaudioclient/AudioTrack.cpp
// static
status_t AudioTrack::getMinFrameCount(
size_t* frameCount,
audio_stream_type_t streamType,
uint32_t sampleRate)
{
if (frameCount == NULL) {
return BAD_VALUE;
}
// FIXME handle in server, like createTrack_l(), possible missing info:
// audio_io_handle_t output
// audio_format_t format
// audio_channel_mask_t channelMask
// audio_output_flags_t flags (FAST)
uint32_t afSampleRate;
status_t status;
status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
if (status != NO_ERROR) {
ALOGE("%s(): Unable to query output sample rate for stream type %d; status %d",
__func__, streamType, status);
return status;
}
size_t afFrameCount;
status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
if (status != NO_ERROR) {
ALOGE("%s(): Unable to query output frame count for stream type %d; status %d",
__func__, streamType, status);
return status;
}
uint32_t afLatency;
status = AudioSystem::getOutputLatency(&afLatency, streamType);
if (status != NO_ERROR) {
ALOGE("%s(): Unable to query output latency for stream type %d; status %d",
__func__, streamType, status);
return status;
}
// When called from createTrack, speed is 1.0f (normal speed).
// This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
*frameCount = AudioSystem::calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f /*, 0 notificationsPerBufferReq*/);
// The formula above should always produce a non-zero value under normal circumstances:
// AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
// Return error in the unlikely event that it does not, as that's part of the API contract.
if (*frameCount == 0) {
ALOGE("%s(): failed for streamType %d, sampleRate %u",
__func__, streamType, sampleRate);
return BAD_VALUE;
}
ALOGV("%s(): getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
__func__, *frameCount, afFrameCount, afSampleRate, afLatency);
return NO_ERROR;
}
AudioSystem
client AudioTrack通过AudioSystem才能使用Audio services
frameworks/av/media/libaudioclient/AudioSystem.cpp
status_t AudioSystem::getOutputSamplingRate(uint32_t* samplingRate, audio_stream_type_t streamType)
{
audio_io_handle_t output;
if (streamType == AUDIO_STREAM_DEFAULT) {
streamType = AUDIO_STREAM_MUSIC;
}
output = getOutput(streamType); // 通过 streamType得到 audio_io_handle
if (output == 0) {
return PERMISSION_DENIED;
}
return getSamplingRate(output, samplingRate); //使用audio_io_handle得到SR
}
怎样通过streamType得到audio_io_handle
根据streamType找到合适的设备对应的 audio_io_handle的过程, audioPolicyService -> APM->Engine(具体的策略[配置文件]) 找到合适的设备
大概过程是:
1. streamType -(getAttributesForStreamType) -> attributes (streamType对应的多个attributes)
2. -getOutputDevicesForAttributes-> DeviceVector (attributes 找到对应的devices)
3. -getOutputsForDevices -> SortedVector<audio_io_handle_t> (多个devicess对应多个handles)
4. -selectOutput(audio_io_handles, output_flag, audio_format, channel_flag, samplintRate) -> audio_io_handle (从多handle中根据out flag, format, channel, SR,得到最合适的audio_io_handle)
audio_io_handle_t AudioSystem::getOutput(audio_stream_type_t stream)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return 0;
return aps->getOutput(stream);
}
AudioPolicyService client端接口实现
frameworks/av/media/libaudioclient/IAudioPolicyService.cpp
class BpAudioPolicyService : public BpInterface<IAudioPolicyService>
{
virtual audio_io_handle_t getOutput(audio_stream_type_t stream)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeInt32(static_cast <uint32_t>(stream));
remote()->transact(GET_OUTPUT, data, &reply);
return static_cast <audio_io_handle_t> (reply.readInt32()); //返回值强制类型转换
}
}
AudioPolicyService server端接口实现
status_t BnAudioPolicyService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
case GET_OUTPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_stream_type_t stream =
static_cast <audio_stream_type_t>(data.readInt32());
audio_io_handle_t output = getOutput(stream);
reply->writeInt32(static_cast <int>(output));
return NO_ERROR;
} break;
}
frameworks/av/services/audiopolicy/service/AudioPolicyService.h
class AudioPolicyService :
public BinderService<AudioPolicyService>,
public BnAudioPolicyService,
public IBinder::DeathRecipient
{
static const char *getServiceName() ANDROID_API { return "media.audio_policy"; }
virtual audio_io_handle_t getOutput(audio_stream_type_t stream);
}
// Audio Policy Manager Interface, 也有这么一个Interface
class AudioPolicyInterface
{
virtual audio_io_handle_t getOutput(audio_stream_type_t stream) = 0;
}
class AudioPolicyManager : public AudioPolicyInterface, public AudioPolicyManagerObserver
{
virtual audio_io_handle_t getOutput(audio_stream_type_t stream);
}
上面三处都出现了getOuput函数,APM继承了APMI, AudioPolicyService接口的实现调用了APM这样三者就关联起来
frameworks/av/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream)
{
if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
return AUDIO_IO_HANDLE_NONE;
}
if (mAudioPolicyManager == NULL) {
return AUDIO_IO_HANDLE_NONE;
}
mLock.lock();
sp<AudioPolicyEffects> audioPolicyEffects = mAudioPolicyEffects;
mLock.unlock();
audioPolicyEffects -> createGlobalEffects();
ALOGV("getOutput()");
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
return mAudioPolicyManager->getOutput(stream);
}
APM(AudioPolicyManager)
frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream)
{
DeviceVector devices = mEngine->getOutputDevicesForStream(stream, false /*fromCache*/);
// Note that related method getOutputForAttr() uses getOutputForDevice() not selectOutput().
// We use selectOutput() here since we don't have the desired AudioTrack sample rate,
// format, flags, etc. This may result in some discrepancy for functions that utilize
// getOutput() solely on audio_stream_type such as AudioSystem::getOutputFrameCount()
// and AudioSystem::getOutputSamplingRate().
SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs);
audio_io_handle_t output;
if (stream == AUDIO_STREAM_MUSIC &&
property_get_bool("audio.deep_buffer.media", false /* default_value */)) {
// use DEEP_BUFFER as default output for music stream type
output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_DEEP_BUFFER, AUDIO_FORMAT_INVALID);
}
else{
output = selectOutput(outputs);
}
ALOGV("getOutput() stream %d selected devices %s, output %d", stream,
devices.toString().c_str(), output);
return output;
}
Engine: getOutputDevicesForStream
Engine指的是什么引擎?解析配置文件?
frameworks/av/services/audiopolicy/enginedefault/src/Engine.cpp
//通过stream_type得到attribute, 由attribute得到Device
DeviceVector Engine::getOutputDevicesForStream(audio_stream_type_t stream, bool fromCache) const
{
auto attributes = getAttributesForStreamType(stream);
return getOutputDevicesForAttributes(attributes, nullptr, fromCache);
}
getAttributesForStreamType
frameworks/av/services/audiopolicy/engine/common/src/ProductStrategy.cpp
audio_attributes_t ProductStrategy::getAttributesForStreamType(audio_stream_type_t streamType) const /*one stream type 得到多个属性*/
{
const auto iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
[&streamType](const auto &supportedAttr) {
return supportedAttr.mStream == streamType; });
return iter != end(mAttributesVector) ? iter->mAttributes : AUDIO_ATTRIBUTES_INITIALIZER;
}
// mAttributesVector 怎么生成的?
frameworks/av/services/audiopolicy/engine/common/src/EngineBase.cpp
engineConfig::ParsingResult EngineBase::loadAudioPolicyEngineConfig()
{
auto addSupportedAttributesToGroup = [](auto &group, auto &volumeGroup, auto &strategy) {
for (const auto &attr : group.attributesVect) {
strategy->addAttributes({group.stream, volumeGroup->getId(), attr});
volumeGroup->addSupportedAttributes(attr);
}
};
}
audio_policy_engine_product_strategies.xml
<ProductStrategy name="STRATEGY_MEDIA">
<AttributesGroup streamType="AUDIO_STREAM_ASSISTANT" volumeGroup="assistant">
<Attributes>
<ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
<Usage value="AUDIO_USAGE_ASSISTANT"/>
</Attributes>
</AttributesGroup>
<AttributesGroup streamType="AUDIO_STREAM_MUSIC" volumeGroup="music"> // 解析这个配置文件里,一个streamType得到过得attributes
<Attributes> <Usage value="AUDIO_USAGE_MEDIA"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_GAME"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANT"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/> </Attributes>
<Attributes></Attributes>
</AttributesGroup>
<AttributesGroup streamType="AUDIO_STREAM_SYSTEM" volumeGroup="system">
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
getOutputDevicesForAttributes
DeviceVector Engine::getOutputDevicesForAttributes(const audio_attributes_t &attributes,
const sp<DeviceDescriptor> &preferredDevice,
bool fromCache) const
{
// First check for explict routing device
if (preferredDevice != nullptr) {
ALOGV("%s explicit Routing on device %s", __func__, preferredDevice->toString().c_str());
return DeviceVector(preferredDevice);
} // 由 attribute -> product_strategy
product_strategy_t strategy = getProductStrategyForAttributes(attributes);
const DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
//
// @TODO: what is the priority of explicit routing? Shall it be considered first as it used to
// be by APM?
//
// Honor explicit routing requests only if all active clients have a preferred route in which
// case the last active client route is used
sp<DeviceDescriptor> device = findPreferredDevice(outputs, strategy, availableOutputDevices); // 输出设备,可用的输出设备和策略通过这些参数得到Preferred
if (device != nullptr) {
return DeviceVector(device);
}
return fromCache? mDevicesForStrategies.at(strategy) : getDevicesForProductStrategy(strategy);
}
getOutputsForDevices(devices, mOutputs);
SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevices(
const DeviceVector &devices,
const SwAudioOutputCollection& openOutputs)
{
SortedVector<audio_io_handle_t> outputs;
ALOGVV("%s() devices %s", __func__, devices.toString().c_str());
for (size_t i = 0; i < openOutputs.size(); i++) {
ALOGVV("output %zu isDuplicated=%d device=%s",
i, openOutputs.valueAt(i)->isDuplicated(),
openOutputs.valueAt(i)->supportedDevices().toString().c_str());
if (openOutputs.valueAt(i)->supportsAllDevices(devices)
&& openOutputs.valueAt(i)->devicesSupportEncodedFormats(devices.types())) {
ALOGVV("%s() found output %d", __func__, openOutputs.keyAt(i));
outputs.add(openOutputs.keyAt(i));
}
}
return outputs;
}
selectOutput
audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
audio_format_t format = AUDIO_FORMAT_INVALID,
audio_channel_mask_t channelMask = AUDIO_CHANNEL_NONE,
uint32_t samplingRate = 0);
audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
audio_output_flags_t flags,
audio_format_t format,
audio_channel_mask_t channelMask,
uint32_t samplingRate)
{
audio_io_handle_t primary_output = 0;
bool isCts = getAppMaskByName(callingAppName) & APP_TYPE_CTS_AUDIOPRO ? true : false;
LOG_ALWAYS_FATAL_IF(!(format == AUDIO_FORMAT_INVALID || audio_is_linear_pcm(format)),
"%s called with format %#x", __func__, format);
// Flags disqualifying an output: the match must happen before calling selectOutput()
static const audio_output_flags_t kExcludedFlags = (audio_output_flags_t)
(AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ | AUDIO_OUTPUT_FLAG_DIRECT);
// Flags expressing a functional request: must be honored in priority over
// other criteria
static const audio_output_flags_t kFunctionalFlags = (audio_output_flags_t)
(AUDIO_OUTPUT_FLAG_VOIP_RX | AUDIO_OUTPUT_FLAG_INCALL_MUSIC |
AUDIO_OUTPUT_FLAG_TTS | AUDIO_OUTPUT_FLAG_DIRECT_PCM | AUDIO_OUTPUT_FLAG_VIRTUAL_DEEP_BUFFER);
// Flags expressing a performance request: have lower priority than serving
// requested sampling rate or channel mask
static const audio_output_flags_t kPerformanceFlags = (audio_output_flags_t)
(AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_DEEP_BUFFER |
AUDIO_OUTPUT_FLAG_RAW | AUDIO_OUTPUT_FLAG_SYNC);
const audio_output_flags_t functionalFlags =
(audio_output_flags_t)(flags & kFunctionalFlags);
const audio_output_flags_t performanceFlags =
(audio_output_flags_t)(flags & kPerformanceFlags);
audio_io_handle_t bestOutput = (outputs.size() == 0) ? AUDIO_IO_HANDLE_NONE : outputs[0];
// select one output among several that provide a path to a particular device or set of
// devices (the list was previously build by getOutputsForDevices()).
// The priority is as follows:
// 1: the output supporting haptic playback when requesting haptic playback
// 2: the output with the highest number of requested functional flags
// 3: the output supporting the exact channel mask
// 4: the output with a higher channel count than requested
// 5: the output with a higher sampling rate than requested
// 6: the output with the highest number of requested performance flags
// 7: the output with the bit depth the closest to the requested one
// 8: the primary output
// 9: the first output in the list
// matching criteria values in priority order for best matching output so far
std::vector<uint32_t> bestMatchCriteria(8, 0);
const uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
const uint32_t hapticChannelCount = audio_channel_count_from_out_mask(
channelMask & AUDIO_CHANNEL_HAPTIC_ALL);
for (audio_io_handle_t output : outputs) {
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
// matching criteria values in priority order for current output
std::vector<uint32_t> currentMatchCriteria(8, 0);
if (outputDesc->isDuplicated()) {
continue;
}
if ((kExcludedFlags & outputDesc->mFlags) != 0) {
continue;
}
// If haptic channel is specified, use the haptic output if present.
// When using haptic output, same audio format and sample rate are required.
const uint32_t outputHapticChannelCount = audio_channel_count_from_out_mask(
outputDesc->getChannelMask() & AUDIO_CHANNEL_HAPTIC_ALL);
if ((hapticChannelCount == 0) != (outputHapticChannelCount == 0)) {
continue;
}
if (outputHapticChannelCount >= hapticChannelCount
&& format == outputDesc->getFormat()
&& samplingRate == outputDesc->getSamplingRate()) {
currentMatchCriteria[0] = outputHapticChannelCount;
}
// functional flags match
currentMatchCriteria[1] = popcount(outputDesc->mFlags & functionalFlags);
// channel mask and channel count match
uint32_t outputChannelCount = audio_channel_count_from_out_mask(
outputDesc->getChannelMask());
if (channelMask != AUDIO_CHANNEL_NONE && channelCount > 2 &&
channelCount <= outputChannelCount) {
if ((audio_channel_mask_get_representation(channelMask) ==
audio_channel_mask_get_representation(outputDesc->getChannelMask())) &&
((channelMask & outputDesc->getChannelMask()) == channelMask)) {
currentMatchCriteria[2] = outputChannelCount;
}
currentMatchCriteria[3] = outputChannelCount;
}
// sampling rate match
if (samplingRate > SAMPLE_RATE_HZ_DEFAULT &&
samplingRate <= outputDesc->getSamplingRate()) {
currentMatchCriteria[4] = outputDesc->getSamplingRate();
}
// performance flags match
currentMatchCriteria[5] = popcount(outputDesc->mFlags & performanceFlags);
// format match
if (format != AUDIO_FORMAT_INVALID) {
currentMatchCriteria[6] =
PolicyAudioPort::kFormatDistanceMax -
PolicyAudioPort::formatDistance(format, outputDesc->getFormat());
}
// primary output match
currentMatchCriteria[7] = outputDesc->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY;
if (currentMatchCriteria[7])
primary_output = output;
// compare match criteria by priority then value
if (std::lexicographical_compare(bestMatchCriteria.begin(), bestMatchCriteria.end(),
currentMatchCriteria.begin(), currentMatchCriteria.end())) {
bestMatchCriteria = currentMatchCriteria;
bestOutput = output;
std::stringstream result;
std::copy(bestMatchCriteria.begin(), bestMatchCriteria.end(),
std::ostream_iterator<int>(result, " "));
ALOGV("%s new bestOutput %d criteria %s",
__func__, bestOutput, result.str().c_str());
}
}
{
//only cts use ull, the others use primary output
//if do not have ull profile(audio_policy_configuration.xml), cannot create ull desc.
//so it must create ull output first
sp<SwAudioOutputDescriptor> outputDescPrimary = mOutputs.valueFor(bestOutput);
if (outputDescPrimary != nullptr && (outputDescPrimary->mFlags == (AUDIO_OUTPUT_FLAG_FAST|AUDIO_OUTPUT_FLAG_RAW))
&& !isCts && primary_output != 0) {
bestOutput = primary_output;
}
}
return bestOutput;
}
经过上面的AudioPolicySerivce: getOutput 得到了 audio_io_handle, audio_io_handle作为参数去调用AudioFlinger service
AudioSystem::getSamplingRate
status_t AudioSystem::getSamplingRate(audio_io_handle_t ioHandle,
uint32_t* samplingRate)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
sp<AudioIoDescriptor> desc = getIoDescriptor(ioHandle);
if (desc == 0) {
*samplingRate = af->sampleRate(ioHandle);
} else {
*samplingRate = desc->mSamplingRate;
}
if (*samplingRate == 0) {
ALOGE("AudioSystem::getSamplingRate failed for ioHandle %d", ioHandle);
return BAD_VALUE;
}
ALOGV("getSamplingRate() ioHandle %d, sampling rate %u", ioHandle, *samplingRate);
return NO_ERROR;
}