先附上时序图吧,后面会跟上代码讲解:
一、概述
安卓系统的音量主要有MasterVolume和StreamVolume来控制,这对于安卓开发人员来说并不陌生。MasterVolume就是系统音量,这个全局的变量影响着Android系统的所有音频通路;StreamVolume则对应着Android系统各种各样的StreamType音频流类型,比如我们最常用的Audio_Sream_Music。
插曲:下面大致列出几种比较常用的音频流类型:
AUDIO_STREAM_VOICE_CALL = 0, //电话音
AUDIO_STREAM_SYSTEM = 1, //系统音
AUDIO_STREAM_RING = 2, //铃声音
AUDIO_STREAM_MUSIC = 3, //媒体音
AUDIO_STREAM_ALARM = 4, //闹铃音
AUDIO_STREAM_NOTIFICATION = 5,
AUDIO_STREAM_BLUETOOTH_SCO = 6, //蓝牙通话音
但是我们的目标并不是研究这些StreamType,这些请看其他文章。本篇主要是捋一捋系统音量的设置流程。
二、流程
首先要由我们的设备响应按键事件,一般位于PhoneWindowManager.java(比如电视则是TvWindowManager.java)中,
在interceptKeyBeforeDispatching中实现各种按键事件,我们只看我们想要的部分(KeyEvent.KEYCODE_VOLUME)这个case里处理系统音量的增减(代码保密,不方便贴出)。
根据代码我们会来到AudioService.java的adjustSuggestedStreamVolume中,然后会调用的成员函数adjustStreamVolume中。AudioService是整个系统音量处理部分最核心的内容,所有的音量流程都要过它之手,所以务必熟悉。
AudioService.java:
xref: /frameworks/base/services/core/java/com/android/server/audio/AudioService.java
private void adjustStreamVolume(int streamType, int direction, int flags,
String callingPackage, String caller, int uid) {
/* */
//再次确认参数的正确性
ensureValidDirection(direction);
ensureValidStreamType(streamType);
// checkForRingerModeChange() in place of STREAM_RING or STREAM_NOTIFICATION)
//根据当前对应的streamType确定当前设备的StreamAlias
int streamTypeAlias = mStreamVolumeAlias[streamType];
VolumeStreamState streamState = mStreamStates[streamTypeAlias];
// reset any pending volume command
// 当前音量未处理完成时忽略其他音量设置
synchronized (mSafeMediaVolumeState) {
mPendingVolumeCommand = null;
}
//省略大部分不太重要的代码,太多了
// If the ringermode is suppressing media, prevent changes
if (!volumeAdjustmentAllowedByDnd(streamTypeAlias, flags)) {
adjustVolume = false;
}
int oldIndex = mStreamStates[streamType].getIndex(device);
if (adjustVolume && (direction != AudioManager.ADJUST_SAME)) {
mAudioHandler.removeMessages(MSG_UNMUTE_STREAM);
//check current all device if contained BT sink device
//set bludtooth volume flag
flags &= ~AudioManager.FLAG_BLUETOOTH_ABS_VOLUME;
if ((device & AudioSystem.DEVICE_OUT_ALL_A2DP) != 0) {
flags |= AudioManager.FLAG_BLUETOOTH_ABS_VOLUME;
}
if (isMuteAdjust) {
//静音处理部分,省略
} else if ((direction == AudioManager.ADJUST_RAISE) &&
!checkSafeMediaVolume(streamTypeAlias, aliasIndex + step, device)) {
Log.e(TAG, "adjustStreamVolume() safe volume index = " + oldIndex);
mVolumeController.postDisplaySafeVolumeWarning(flags);
//通过步长计算音量值(因为系统音量一般只有七格或者12格)
} else if (streamState.adjustIndex(direction * step, device, caller)
|| streamState.mIsMuted) {
// Post message to set system volume (it in turn will post a
// message to persist).
if (streamState.mIsMuted) {
// Unmute the stream if it was previously muted
if (direction == AudioManager.ADJUST_RAISE) {
// unmute immediately for volume up
streamState.mute(false);
} else if (direction == AudioManager.ADJUST_LOWER) {
if (mIsSingleVolume) {
sendMsg(mAudioHandler, MSG_UNMUTE_STREAM, SENDMSG_QUEUE,
streamTypeAlias, flags, null, UNMUTE_STREAM_DELAY);
}
}
}
//真正处理音量的地方,给Device设值音量值
sendMsg(mAudioHandler,
MSG_SET_DEVICE_VOLUME,
SENDMSG_QUEUE,
device,
0,
streamState,
0);
}
// Check if volume update should be sent to Hdmi system audio.
// UI 更新相关部分,略
int newIndex = mStreamStates[streamType].getIndex(device);
if (streamTypeAlias == AudioSystem.STREAM_MUSIC) {
setSystemAudioVolume(oldIndex, newIndex, getStreamMaxVolume(streamType), flags);
}
if (mHdmiManager != null) {
synchronized (mHdmiManager) {
// mHdmiCecSink true => mHdmiPlaybackClient != null
if (mHdmiCecSink &&
streamTypeAlias == AudioSystem.STREAM_MUSIC &&
oldIndex != newIndex) {
}
}
}
}
int index = mStreamStates[streamType].getIndex(device);
sendVolumeUpdate(streamType, oldIndex, index, flags);
}
删除了大量的代码,只留下了我们感兴趣的。大部分逻辑都是处理各中环境下音量的,我们直接直奔主题。最后会通过消息队列sendMsg(对消息队列感兴趣可以看另一篇博客:安卓消息队列详解)为Device设值。
插曲:有必要说一下消息队列的另一个参数StreamState。因为安卓各种各样的设备的原因,不同的设备可能存在的音频输入输出设备也不一样(比如电视设备就不存在通话音量)。所以我们可以通过StreamStates这个数组来保存不同设备上的对应音频流类型。当然真正使用的时候只能取一组。
所以大道敞开,我们直接在AudioService.java中寻找MSG_SET_DEVICE_VOLUME这个case,看来是调用的setDeviceVolume方法:
public void handleMessage(Message msg) {
switch (msg.what) {
case MSG_SET_DEVICE_VOLUME:
setDeviceVolume((VolumeStreamState) msg.obj, msg.arg1);
break;
插曲:Device是Android系统中的抽象设备,因为不同设备厂商的借口很难统一,所以Android使用HAL层对各种硬件设备厂商的接口进行封装抽象,大大减少了应用开发人员对底层的交互。常见的设备列表如下:
AUDIO_DEVICE_NONE = 0u, // 0x0 空设备
AUDIO_DEVICE_OUT_SPEAKER = 2u, // 0x2 系统扬声器
AUDIO_DEVICE_OUT_WIRED_HEADPHONE = 8u, // 0x8 耳机
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP = 128u, // 0x80 A2DP蓝牙
AUDIO_DEVICE_OUT_SPDIF = 524288u, // 0x80000 外接功放
好的,我们接着看setDeviceVolume()函数:
private void setDeviceVolume(VolumeStreamState streamState, int device) {
synchronized (VolumeStreamState.class) {
// Apply volume
// 为当前音频流设备设置音量
streamState.applyDeviceVolume_syncVSS(device);
// Apply change to all streams using this one as alias
// 为当前StreamAlias的其他StreamType设置更新音量
int numStreamTypes = AudioSystem.getNumStreamTypes();
for (int streamType = numStreamTypes - 1; streamType >= 0; streamType--) {
if (streamType != streamState.mStreamType &&
mStreamVolumeAlias[streamType] == streamState.mStreamType) {
// Make sure volume is also maxed out on A2DP device for aliased stream
// that may have a different device selected
int streamDevice = getDeviceForStream(streamType);
if ((device != streamDevice) && mAvrcpAbsVolSupported &&
((device & AudioSystem.DEVICE_OUT_ALL_A2DP) != 0)) {
mStreamStates[streamType].applyDeviceVolume_syncVSS(device);
}
umStreamStates[streamType].applyDeviceVolume_syncVSS(streamDevice);
}
}
}
}
首先为我们的目标device设置音量,如果当前设备的其他streamType的音量可以更新,也更新之。接着看applyDeviceVolume_syncVSS()函数:
// must be called while synchronized VolumeStreamState.class
public void applyDeviceVolume_syncVSS(int device) {
int index;
if (mIsMuted) {
index = 0;
} else if ((device & AudioSystem.DEVICE_OUT_ALL_A2DP) != 0 && mAvrcpAbsVolSupported) {
index = (getIndex(device) + 5)/10;
} else if ((device & mFullVolumeDevices) != 0) {
index = (mIndexMax + 5)/10;
} else {
index = (getIndex(device) + 5)/10;
}
AudioSystem.setStreamVolumeIndex(mStreamType, index, device);
}
这个地方初次看可能比较困惑,哪儿来的index?其实我们UI更新音量的时候,就已经根据音量曲线为我们的抽象device设置好的index,所以我们现在的任务是获取index,并将这个值设入底层。代码很简单,不赘述了。
AudioSystem.java: public static native int setStreamVolumeIndex(int stream, int index, int device);
调用了AudioSystem的native方法,根据java的JNI机制,如果没猜错C++层应该有个AudioSystem.cpp类与之对应,并且实现了AudioSystem.java的所有方法。关于JNI请看:JNI介绍
下面正式进入framworkC++层的代码:
AudioSystem.cpp:
status_t AudioSystem::setStreamVolumeIndex(audio_stream_type_t stream,
int index,
audio_devices_t device)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
return aps->setStreamVolumeIndex(stream, index, device);
}
直接调用了AudioPolicyService的setStreamVolumeIndex()函数。在Android源码中,IXXX类一般是XXX类的代理,调用的都是XXX里的函数,所以我们直奔主题,去AudioPolicyService里找setStreamVolumeIndex()函数。
果然在这儿,不过遗憾的是AudioPolicyService的实现并不全是在AudioPolicyService.cpp中,比如这个函数的实现是在AudioPolicyInterfaceImpl.cpp 中。
AudioPolicyInterfaceImpl.cpp:
status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream,
int index,
audio_devices_t device)
{
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
if (!settingsAllowed()) {
return PERMISSION_DENIED;
}
if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
return mAudioPolicyManager->setStreamVolumeIndex(stream,
index,
device);
}
又是一层检查调用!好吧,我们去AudioPolicyManager中看看:
AudioPolicyManager:
status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream,
int index,
audio_devices_t device)
{
// 超出我们音量曲线设定的值就返回
if ((index < mVolumeCurves->getVolumeIndexMin(stream)) ||
(index > mVolumeCurves->getVolumeIndexMax(stream))) {
return BAD_VALUE;
}
// 如果不是输出设备也返回,设什么音量??
if (!audio_is_output_device(device)) {
return BAD_VALUE;
}
bool applyVolume;
if (device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
curStreamDevice |= device;
applyVolume = (curDevice & curStreamDevice) != 0;
} else {
applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
stream, Volume::getDeviceForVolume(curStreamDevice));
}
if (applyVolume) {
//FIXME: workaround for truncated touch sounds
// delayed volume change for system stream to be removed when the problem is
// handled by system UI
status_t volStatus =
checkAndSetVolume((audio_stream_type_t)curStream, index, desc, curDevice,
(stream == AUDIO_STREAM_SYSTEM) ? TOUCH_SOUND_FIXED_DELAY_MS : 0);
if (volStatus != NO_ERROR) {
status = volStatus;
}
}
return status;
}
一大堆的条件判断,然后进入我们的checkAndSetVolume()中,顾名思义,检查并设值。
status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
int index,
const sp<AudioOutputDescriptor>& outputDesc,
audio_devices_t device,
int delayMs,
bool force)
{
// do not change actual stream volume if the stream is muted
if (outputDesc->mMuteCount[stream] != 0) {
ALOGVV("checkAndSetVolume() stream %d muted count %d",
stream, outputDesc->mMuteCount[stream]);
return NO_ERROR;
}
audio_policy_forced_cfg_t forceUseForComm =
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION);
// do not change in call volume if bluetooth is connected and vice versa
if ((stream == AUDIO_STREAM_VOICE_CALL && forceUseForComm == AUDIO_POLICY_FORCE_BT_SCO) ||
(stream == AUDIO_STREAM_BLUETOOTH_SCO && forceUseForComm != AUDIO_POLICY_FORCE_BT_SCO)) {
ALOGV("checkAndSetVolume() cannot set stream %d volume with force use = %d for comm",
stream, forceUseForComm);
return INVALID_OPERATION;
}
//如果设备为空,则默认为输出描述符的device
if (device == AUDIO_DEVICE_NONE) {
device = outputDesc->device();
}
// 将Volume转换为Db音量(-115~0)
float volumeDb = computeVolume(stream, index, device);
if (outputDesc->isFixedVolume(device)) {
volumeDb = 0.0f;
}
// 为设备输出描述符设置音量值
outputDesc->setVolume(volumeDb, stream, device, delayMs, force);
// 处理特殊情况。蓝牙通话和手机通话,显然我们目前不会走这边
if (stream == AUDIO_STREAM_VOICE_CALL ||
stream == AUDIO_STREAM_BLUETOOTH_SCO) {
float voiceVolume;
// Force voice volume to max for bluetooth SCO as volume is managed by the headset
if (stream == AUDIO_STREAM_VOICE_CALL) {
voiceVolume = (float)index/(float)mVolumeCurves->getVolumeIndexMax(stream);
} else {
voiceVolume = 1.0;
}
if (voiceVolume != mLastVoiceVolume) {
mpClientInterface->setVoiceVolume(voiceVolume, delayMs);
mLastVoiceVolume = voiceVolume;
}
}
return NO_ERROR;
}
设备输出描述符有一个简单的继承关系:
一个是软解输出设备描述符,一个是硬解输出设备描述符,我们的音频解码一般都用SwAudioOutputDescriptor:
bool SwAudioOutputDescriptor::setVolume(float volume,
audio_stream_type_t stream,
audio_devices_t device,
uint32_t delayMs,
bool force)
{
bool changed = AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force);
if (changed) {
// Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is
// enabled
float volume = Volume::DbToAmpl(mCurVolume[stream]);
if (stream == AUDIO_STREAM_BLUETOOTH_SCO) {
mClientInterface->setStreamVolume(
AUDIO_STREAM_VOICE_CALL, volume, mIoHandle, delayMs);
}
mClientInterface->setStreamVolume(stream, volume, mIoHandle, delayMs);
}
return changed;
}
代码很容易理解,根据Device获取对应的IOHandle(每个handle对应了底层的一个回放线程)。并设备输出描述符设置音量,如果音量有改变,就调用AudioPolicyClientInterface的setStreamVolume方法。当然我们找不到它的实现,根据继承关系AudioPolicyClient->AudioPolicyClientInterface,我们去AudioPolicyClient看看:
status_t AudioPolicyService::AudioPolicyClient::setStreamVolume(audio_stream_type_t stream,
float volume, audio_io_handle_t output,
int delay_ms)
{
return mAudioPolicyService->setStreamVolume(stream, volume, output,
delay_ms);
}
接着去AudioPolicyService看看:
int AudioPolicyService::setStreamVolume(audio_stream_type_t stream,
float volume,
audio_io_handle_t output,
int delayMs)
{
return (int)mAudioCommandThread->volumeCommand(stream, volume,
output, delayMs);
}
恩,接着去内部类AudioCommandThread中看看:
status_t AudioPolicyService::AudioCommandThread::volumeCommand(audio_stream_type_t stream,
float volume,
audio_io_handle_t output,
int delayMs)
{
sp<AudioCommand> command = new AudioCommand();
command->mCommand = SET_VOLUME;
sp<VolumeData> data = new VolumeData();
data->mStream = stream;
data->mVolume = volume;
data->mIO = output;
command->mParam = data;
command->mWaitStatus = true;
ALOGV("AudioCommandThread() adding set volume stream %d, volume %f, output %d",
stream, volume, output);
return sendCommand(command, delayMs);
}
看来是将我们的SET_VOLUMEcommand打包并发送出去。一般在Thread类中都会启动一个线程循环来接收各种消息指令或者处理大量逻辑事物,AudioCommandThread应该也不例外,我们找到Loop函数看看:
bool AudioPolicyService::AudioCommandThread::threadLoop()
{
nsecs_t waitTime = -1;
mLock.lock();
while (!exitPending())
{
sp<AudioPolicyService> svc;
while (!mAudioCommands.isEmpty() && !exitPending()) {
nsecs_t curTime = systemTime();
// commands are sorted by increasing time stamp: execute them from index 0 and up
if (mAudioCommands[0]->mTime <= curTime) {
sp<AudioCommand> command = mAudioCommands[0];
mAudioCommands.removeAt(0);
mLastCommand = command;
switch (command->mCommand) {
case SET_VOLUME: {
VolumeData *data = (VolumeData *)command->mParam.get();
ALOGV("AudioCommandThread() processing set volume stream %d, \
volume %f, output %d", data->mStream, data->mVolume, data->mIO);
command->mStatus = AudioSystem::setStreamVolume(data->mStream,
data->mVolume,
data->mIO);
}break;
}
}
}
果然没猜错,接收指令并调用了AudioSystem的setStreamVolume函数。然后直接调用AudioFlinger的setStreamVolume函数,简单的调用,不贴代码了。
status_t AudioFlinger::setStreamVolume(audio_stream_type_t stream, float value,
audio_io_handle_t output)
{
// check calling permissions
if (!settingsAllowed()) {
return PERMISSION_DENIED;
}
status_t status = checkStreamType(stream);
if (status != NO_ERROR) {
return status;
}
ALOG_ASSERT(stream != AUDIO_STREAM_PATCH, "attempt to change AUDIO_STREAM_PATCH volume");
AutoMutex lock(mLock);
Vector<VolumeInterface *> volumeInterfaces;
if (output != AUDIO_IO_HANDLE_NONE) {
VolumeInterface *volumeInterface = getVolumeInterface_l(output);
if (volumeInterface == NULL) {
return BAD_VALUE;
}
volumeInterfaces.add(volumeInterface);
}
mStreamTypes[stream].volume = value;
if (volumeInterfaces.size() == 0) {
volumeInterfaces = getAllVolumeInterfaces_l();
}
for (size_t i = 0; i < volumeInterfaces.size(); i++) {
volumeInterfaces[i]->setStreamVolume(stream, value);
}
return NO_ERROR;
}
简单介绍下,先给我们的目标StreamType(当然是system_stream)设置音量值。然后为volumeInterface设值,volumeInterface是PlayBackThread的父类(在AOSP中,XXXinterface一般都是纯虚函数组成的类,相当于Java中的接口)。AudioFlinger是安卓音量处理的核心,在这个类内部维护一个PlayBackThread类,各种回放线程的派生类都继承于它,比如(MixThread,DumplicatingThread~~)。下面大致给出类图:
MixThread为我们最多用到的混音类,DirectOutputThread只有我们创建一个Track并且标注Flags为Direct时才会创建(比如音效,或者需要硬解的音频流)。所以,我们调用VolumeInterface的setStreamVolume最终会调到PlayBackThread的setStreamVolume()。因为volume是通用属性,所以底层的线程没有重载,直接调用即可。
void AudioFlinger::PlaybackThread::setStreamVolume(audio_stream_type_t stream, float value)
{
Mutex::Autolock _l(mLock);
mStreamTypes[stream].volume = value;
broadcast_l();
}
为当前回放线程的streamType对应的stream设置,那么在哪里调用呢?我们知道每个Thread类都维护了一个Loop函数,并且我们的大部分音频处理都要经过MixThread进行混音才能写出。并且MixThread的每次循环处理都会对当前线程挂载的所有Track进行处理,我们去看看(这一块知识点很多,以后有空总结一下):
AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTracks_l(
Vector< sp<Track> > *tracksToRemove)
{
//获取主音量
float masterVolume = mMasterVolume;
bool masterMute = mMasterMute;
if (masterMute) {
masterVolume = 0;
}
// Delegate master volume control to effect in output mix effect chain if needed
sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
if (chain != 0) {
uint32_t v = (uint32_t)(masterVolume * (1 << 24));
chain->setVolume_l(&v, &v);
masterVolume = (float)((v + (1 << 23)) >> 24);
chain.clear();
}
// compute volume for this track
uint32_t vl, vr; // in U8.24 integer format
float vlf, vrf, vaf; // in [0.0, 1.0] float format
if (track->isPausing() || mStreamTypes[track->streamType()].mute) {
vl = vr = 0;
vlf = vrf = vaf = 0.;
if (track->isPausing()) {
track->setPaused();
}
} else {
// read original volumes with volume control
// 从我们设置的StreamType音量里获取音量
float typeVolume = mStreamTypes[track->streamType()].volume;
typeVolume = 1.0f;
// 获取主音量和StreamVolume的音量乘积
float v = masterVolume * typeVolume;
// 分离出左右声道
sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
gain_minifloat_packed_t vlr = proxy->getVolumeLR();
vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
// track volumes come from shared memory, so can't be trusted and must be clamped
if (vlf > GAIN_FLOAT_UNITY) {
ALOGV("Track left volume out of range: %.3g", vlf);
vlf = GAIN_FLOAT_UNITY;
}
if (vrf > GAIN_FLOAT_UNITY) {
ALOGV("Track right volume out of range: %.3g", vrf);
vrf = GAIN_FLOAT_UNITY;
}
const float vh = track->getVolumeHandler()->getVolume(
track->mAudioTrackServerProxy->framesReleased()).first;
// now apply the master volume and stream type volume and shaper volume
vlf *= v * vh;
vrf *= v * vh;
// assuming master volume and stream type volume each go up to 1.0,
// then derive vl and vr as U8.24 versions for the effect chain
const float scaleto8_24 = MAX_GAIN_INT * MAX_GAIN_INT;
vl = (uint32_t) (scaleto8_24 * vlf);
vr = (uint32_t) (scaleto8_24 * vrf);
// vl and vr are now in U8.24 format
uint16_t sendLevel = proxy->getSendLevel_U4_12();
// send level comes from shared memory and so may be corrupt
if (sendLevel > MAX_GAIN_INT) {
ALOGV("Track send level out of range: %04X", sendLevel);
sendLevel = MAX_GAIN_INT;
}
// vaf is represented as [0.0, 1.0] float by rescaling sendLevel
vaf = v * sendLevel * (1. / MAX_GAIN_INT);
}
// 为真正的Mix设置音量参数
mAudioMixer->setParameter(name, param, AudioMixer::VOLUME0, &vlf);
mAudioMixer->setParameter(name, param, AudioMixer::VOLUME1, &vrf);
mAudioMixer->setParameter(name, param, AudioMixer::AUXLEVEL, &vaf);
}
}
最终MiaxThread会根据所设的音量参数重新对所有重采样的Track进行振幅的扩大和缩小,最终达到音量增大减小的效果