概述:目前安卓系统上,音量是根据音频流类型来分类控制的,音频流有STREAM_MUSIC,STREAM_ALARM,STREAM_SYSTEM,STREAM_TTS等十多种,如果增大STREAM_MUSIC流的音量,那么所有媒体应用的音量都跟着增大,无法做到每个媒体应用拥有自己独立的音量。本文在混音器AudioMinxer为音频流Track混音前,设置了应用程序所属音频流Track的音量从而达到音量相互独立的目的。
技术背景:安卓系统(基于Android7.1.1)中管理音频模块Native层的核心系统服务包括两部分,第一部分是AudioFlinger(简称AF),负责音频策略的执行,音频流的混音操作,Audio硬件接口的加载及访问等,JAVA层的每个AudioTrack对象(每一路音频流)在AF中都会抽象为一个Track对象(注意系统中会有同时存在多路音频流的情况),为了能将音频流Track的数据写入Audio硬件中,混音器AudioMixer会不断为各路音频流Track进行混音操作(本质上就是填充一些音频数据,设置音频流相关的参数);另外一部分是AudioPolicyService(简称APS),负责音频策略的制定,不过本次修改不涉及该部分;最后,管理音频模块Java层的核心系统服务是AudioService(简称AS)主要负责各类音频流音量的设置以及音频焦点的管理等。AF和APS都是运行在AudioServer进程中,AS是运行于System_Server进程中,AudioTrack和AudioSystem是运行于应用程序进程,本文涉及好几次Binder跨进程的数据传输,值得引起注意。
具体实现步骤:
1.在AF中将应用程序的包名,pid,音量值等信息封装为一个结构体AudioApplicationRef,再新增该结构体的容器mAudioApplicationRefs成员变量,用于保存应用程序的包名,pid,音量值的对应关系,并对上层提供应用程序包名,pid,音量值对应关系的添加、更新,移除的接口以及根据 应用程序pid 查询音量值的接口
先看AudioFlinger.h(frameworks/av/services/audioflinger/AudioFlinger.h)头文件的相关声明
...
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const;
virtual void editAudioApplicationVolume(const String8& pckName,pid_t pid);//add 设置应用程序pid接口
virtual void removeAudioApplicationVolume(const String8& pckName,pid_t pid);//add 移除应用程序pid接口
virtual void registerClient(const sp<IAudioFlingerClient>& client);
...
void ioConfigChanged(audio_io_config_event event,
const sp<AudioIoDescriptor>& ioDesc,
pid_t pid = 0);
/**add begin 添加/查询应用程序音量接口**/
void addApplicationVolume(const String8& pckName,float volume);
float getApplicationVolume(pid_t pid);
String8 getApplicationVolumeStr() const;
void dumpApplicationVolume();
/**add end**/
// Allocate an audio_unique_id_t.
// Specific types are audio_io_handle_t, audio_session_t, effect ID (int),
// audio_module_handle_t, and audio_patch_handle_t.
// They all share the same ID space, but the namespaces are actually independent
// because there are separate KeyedVectors for each kind of ID.
// The return value is cast to the specific type depending on how the ID will be used.
// FIXME This API does not handle rollover to zero (for unsigned IDs),
// or from positive to negative (for signed IDs).
...
// for mAudioSessionRefs only
struct AudioSessionRef {
AudioSessionRef(audio_session_t sessionid, pid_t pid) :
mSessionid(sessionid), mPid(pid), mCnt(1) {}
const audio_session_t mSessionid;
const pid_t mPid;
int mCnt;
};
//add start
struct AudioApplicationRef {
AudioApplicationRef (const String8& pckName,pid_t pid,float volume):
mPckName(pckName),mPid(pid),mVolume(volume) {}
String8 mPckName;
pid_t mPid;
float mVolume;
};
//add end
mutable Mutex mLock;
...
// protected by mLock
Vector<AudioSessionRef*> mAudioSessionRefs;
Vector<AudioApplicationRef*> mAudioApplicationRefs; //add
...
在头文件中添加了自定义结构体AudioApplicationRef和容器mAudioApplicationRefs,另外还声明了两个虚函数:
1.editAudioApplicationVolume(const String8& pckName,pid_t pid)
2.removeAudioApplicationVolume(const String8& pckName,pid_t pid)
应用程序获取音频焦点的时候,会通过editAudioApplicationVolume()函数将应用的包名和pid 经过Binder接口跨进程写入AF并保存到mAudioApplicationRefs容器中,失去音频焦点时通过removeAudioApplicationVolume()经过Binder接口跨进程从mAudioApplicationRefs容器中移除,后面会看到这两个函数其实是被AudioSystem.cpp类调用,音频应用的焦点在AudioService.java类中是可以知道的;最后声明的四个函数:
1.addApplicationVolume(const String8& pckName,float volume)
给应用程序设置音量的接口,含两个参数一是应用程序的包名,另外一个是应用程序的音量,音量的范围是(0,1]浮点类型,应用程序层可以通过AudioManager设置参数的接口来设置自己独立的音量,当然设置的音量信息最终也会保存到AF的mAudioApplicationRefs容器中
参考代码:AudioManager mAudioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
设置应用音量的格式:应用包名_音量值(注意音量值的取值范围是 0-1 float型)
mAudioManager.setParameters("app_volume=cn.kuwo.player_0.5");
2.getApplicationVolume(pid_t pid)
AudioMixer混音器给系统中的音频流做混音操作时,通过该函数查询(其实具体查询的是mAudioApplicationRefs容器)当前音频流的音量并设置到混音器,参数是音频流所属进程的pid
3.getApplicationVolumeStr() const
应用程序通过该函数查询之前设置的音量信息,当然得注意这里是一股脑返回所有应用程序设置的音量!
参考代码:AudioManager mAudioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
mAudioManager.getParameters("app_volume");
4.dumpApplicationVolume()
打印mAudioApplicationRefs容器中的详细信息,也即是具体的应用程序包名,pid以及音量
再具体看看上述声明的函数在AudioFlinger.cpp(frameworks/av/services/audioflinger/AudioFlinger.cpp)中的实现
status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
{
ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d",
ioHandle, keyValuePairs.string(), IPCThreadState::self()->getCallingPid());
// check calling permissions
if (!settingsAllowed()) {
return PERMISSION_DENIED;
}
// AUDIO_IO_HANDLE_NONE means the parameters are global to the audio hardware interface
if (ioHandle == AUDIO_IO_HANDLE_NONE) {
Mutex::Autolock _l(mLock);
// result will remain NO_INIT if no audio device is present
status_t final_result = NO_INIT;
#ifdef SRS_PROCESSING
POSTPRO_PATCH_PARAMS_SET(keyValuePairs);
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
thread->setPostPro();
}
#endif
{
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_SET_PARAMETER;
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
status_t result = dev->set_parameters(dev, keyValuePairs.string());
// return success if at least one audio device accepts the parameters as not all
// HALs are requested to support all parameters. If no audio device supports the
// requested parameters, the last error is reported.
if (final_result != NO_ERROR) {
final_result = result;
}
}
mHardwareStatus = AUDIO_HW_IDLE;
}
AudioParameter param = AudioParameter(keyValuePairs);
String8 value, key;
key = String8("SND_CARD_STATUS");
if (param.get(key, value) == NO_ERROR) {
ALOGV("Set keySoundCardStatus:%s", value.string());
if ((value.find("OFFLINE", 0) != -1)) {
ALOGV("OFFLINE detected - call InvalidateTracks()");
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
if (thread->getOutput()->flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
thread->invalidateTracks(AUDIO_STREAM_MUSIC);
}
}
}
}
// disable AEC and NS if the device is a BT SCO headset supporting those pre processings
...
/**add begin 根据自定义的key来给应用程序设置音量**/
String8 appVolume;
if(param.get(String8(AudioParameter::keyAppVolume), appVolume) == NO_ERROR){
//com.mogo.music_0.3
std::string content=appVolume.string();
int index=content.find("_",0);
std::string pckName=content.substr(0,index);
std::string v=content.substr(index+1,content.length());
float volume=atof(v.c_str());
addApplicationVolume(String8(pckName.c_str()),volume);
}
/**add end**/
return final_result;
}
...
return BAD_VALUE;
}
String8 AudioFlinger::getParameters(audio_io_handle_t ioHandle, const String8& keys) const
{
ALOGVV("getParameters() io %d, keys %s, calling pid %d",
ioHandle, keys.string(), IPCThreadState::self()->getCallingPid());
Mutex::Autolock _l(mLock);
if (ioHandle == AUDIO_IO_HANDLE_NONE) {
String8 out_s8;
#ifdef SRS_PROCESSING
POSTPRO_PATCH_PARAMS_GET(keys, out_s8);
#endif
/**add begin 同理根据自定义的key判断获取应用程序的音量**/
if(strcmp(keys.string(),String8(AudioParameter::keyAppVolume).string())==0){
return getApplicationVolumeStr();
}
/**add end**/
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
char *s;
{
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_GET_PARAMETER;
audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
s = dev->get_parameters(dev, keys.string());
mHardwareStatus = AUDIO_HW_IDLE;
}
out_s8 += String8(s ? s : "");
free(s);
}
return out_s8;
}
...
return String8("");
}
...
void AudioFlinger::ioConfigChanged(audio_io_config_event event,
const sp<AudioIoDescriptor>& ioDesc,
pid_t pid)
{
Mutex::Autolock _l(mClientLock);
size_t size = mNotificationClients.size();
for (size_t i = 0; i < size; i++) {
if ((pid == 0) || (mNotificationClients.keyAt(i) == pid)) {
mNotificationClients.valueAt(i)->audioFlingerClient()->ioConfigChanged(event, ioDesc);
}
}
}
/** add begin 添加/移除/查询应用程序音量接口的实现**/
void AudioFlinger::addApplicationVolume(const String8& pckName,float volume)
{
size_t num=mAudioApplicationRefs.size();
for(size_t i=0; i<num ;i++){
AudioApplicationRef * ref=mAudioApplicationRefs.itemAt(i);
if(strcmp(ref->mPckName.string(),pckName.string())==0){
ref->mVolume=volume;
return;
}
}
mAudioApplicationRefs.push(new AudioApplicationRef(pckName,0,volume));
dumpApplicationVolume();
}
void AudioFlinger::editAudioApplicationVolume(const String8& pckName,pid_t pid)
{
size_t num=mAudioApplicationRefs.size();
for(size_t i=0; i<num ;i++){
AudioApplicationRef * ref=mAudioApplicationRefs.itemAt(i);
if(strcmp(ref->mPckName.string(),pckName.string())==0){
ref->mPid=pid;
dumpApplicationVolume();
return ;
}
}
mAudioApplicationRefs.push(new AudioApplicationRef(pckName,pid,0));
dumpApplicationVolume();
}
void AudioFlinger::removeAudioApplicationVolume(const String8& pckName,pid_t pid)
{
size_t num=mAudioApplicationRefs.size();
for(size_t i=0; i<num ;i++){
AudioApplicationRef * ref=mAudioApplicationRefs.itemAt(i);
if(strcmp(ref->mPckName.string(),pckName.string())==0 || ref->mPid == pid){
mAudioApplicationRefs.removeAt(i);
delete ref;
dumpApplicationVolume();
return;
}
}
}
float AudioFlinger::getApplicationVolume(pid_t pid)
{
size_t num=mAudioApplicationRefs.size();
for(size_t i=0; i<num ;i++){
AudioApplicationRef * ref=mAudioApplicationRefs.itemAt(i);
if(ref->mPid == pid){
return ref->mVolume;
}
}
dumpApplicationVolume();
return -1;
}
String8 AudioFlinger::getApplicationVolumeStr() const
{
std::string result="";
size_t num=mAudioApplicationRefs.size();
for(size_t i=0; i<num ;i++){
AudioApplicationRef * ref=mAudioApplicationRefs.itemAt(i);
float tempValue= round(ref->mVolume * 100) / 100.0;
std::string pckName=ref->mPckName.string();
result+=(pckName+"_"+std::to_string(tempValue)+";");
}
result=result.substr(0,result.length()-1);
return String8(result.c_str());
}
void AudioFlinger::dumpApplicationVolume()
{
size_t num=mAudioApplicationRefs.size();
for(size_t i=0; i<num ;i++){
AudioApplicationRef * ref=mAudioApplicationRefs.itemAt(i);
ALOGE("AudioApplication==i:%d===mPckName:%s==mPid:%d===mVolume:%f==",(int)i,ref->mPckName.string(),ref->mPid,ref->mVolume);
}
}
/** add end**/
...
1.setParameters()
供AudioManager.setParameters()调用的,也即是在应用层为应用程序设置音量
2.getParameters()
供AudioManager.getParameters()调用的,也即是在应用层查询应用程序的音量信息
其它函数在AudioFlinger.h的头文件中声明的时候已经说明了详细用途
2.在AF中的音频流对象Track上添加pid 成员变量,AF每次构建音频流对象Track时,将应用程序传递过来的pid保存到Track自己的成员变量mPid中
头文件 frameworks/av/services/audioflinger/PlaybackTracks.h 声明成员变量mPid
...
public:
void triggerEvents(AudioSystem::sync_event_t type);
void invalidate();
void disable();
bool isInvalid() const { return mIsInvalid; }
int fastIndex() const { return mFastIndex; }
pid_t mPid;//add 新增pid的声明
protected:
// FILLED state is used for suppressing volume ramp at begin of playing
enum {FS_INVALID, FS_FILLING, FS_FILLED, FS_ACTIVE};
...
frameworks/av/services/audioflinger/Tracks.cpp AF端构建Track对象的时候保存pid信息
// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
AudioFlinger::PlaybackThread::Track::Track(
PlaybackThread *thread,
const sp<Client>& client,
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
void *buffer,
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
int uid,
audio_output_flags_t flags,
track_type type)
: TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
(sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
sessionId, uid, true /*isOut*/,
(type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
type),
mFillingUpStatus(FS_INVALID),
// mRetryCount initialized later when needed
mSharedBuffer(sharedBuffer),
mStreamType(streamType),
mName(-1), // see note below
mMainBuffer(thread->mixBuffer()),
mAuxBuffer(NULL),
mAuxEffectId(0), mHasVolumeController(false),
mPresentationCompleteFrames(0),
mFrameMap(16 /* sink-frame-to-track-frame map memory */),
// mSinkTimestamp
mFastIndex(-1),
mCachedVolume(1.0),
mIsInvalid(false),
mAudioTrackServerProxy(NULL),
mResumeToStopping(false),
mFlushHwPending(false),
mFlags(flags)
{
// client == 0 implies sharedBuffer == 0
ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
sharedBuffer->size());
if (mCblk == NULL) {
return;
}
mPid=client->pid(); //******add 保存pid信息************
if (sharedBuffer == 0) {
mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
mFrameSize, !isExternalTrack(), sampleRate);
} else {
// Is the shared buffer of sufficient size?
// (frameCount * mFrameSize) is <= SIZE_MAX, checked in TrackBase.
if (sharedBuffer->size() < frameCount * mFrameSize) {
// Workaround: clear out mCblk to indicate track hasn't been properly created.
mCblk->~audio_track_cblk_t(); // destroy our shared-structure.
if (mClient == 0) {
free(mCblk);
}
mCblk = NULL;
mSharedBuffer.clear(); // release shared buffer early
android_errorWriteLog(0x534e4554, "38340117");
return;
}
mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
mFrameSize);
}
...
}
AF中没有包名的概念,只能保存应用程序运行时随机分配的pid,不过好在这个pid和包名一样也是唯一的,保存下pid后,后期混音器混音时可以通过Track的pid去AF中查询自己对应的音量。
3.在应用程序请求音频焦点时,将包名和pid的对应关系写入AF,失去音频焦点时移除
首先知道音频应用焦点的类是AudioService.java,不过在写入AF之前还得在AudioSystem类中添加一套JNI接口和一套Native层的Binder接口,才能通过Binder跨进程读写数据。首先是JNI接口
frameworks/av/media/libmedia/AudioSystem.cpp
...
void AudioSystem::releaseAudioSessionId(audio_session_t audioSession, pid_t pid)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af != 0) {
af->releaseAudioSessionId(audioSession, pid);
}
}
/** add begin 添加/移除应用程序的pid**/
void AudioSystem::editAudioApplicationVolume(const String8& pckName, pid_t pid)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af != 0) {
af->editAudioApplicationVolume(pckName,pid);
}
}
void AudioSystem::removeAudioApplicationVolume(const String8& pckName, pid_t pid)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af != 0) {
af->removeAudioApplicationVolume(pckName,pid);
}
}
/** add end**/
audio_hw_sync_t AudioSystem::getAudioHwSyncForSession(audio_session_t sessionId)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return AUDIO_HW_SYNC_INVALID;
return af->getAudioHwSyncForSession(sessionId);
}
...
af->editAudioApplicationVolume(pckName,pid)已经在第一步在AudioFlinger.h中声明了的,同理af->removeAudioApplicationVolume(pckName,pid)也是
frameworks/base/core/jni/android_media_AudioSystem.cpp
...
/**add begin 设置/移除应用程序pid的jni层的实现**/
static void
android_media_AudioSystem_editAudioApplicationVolume(JNIEnv *env, jobject thiz,jstring pckName,jint pid)
{
const jchar* c_pckName=env->GetStringCritical(pckName,0);
String8 c_pckName8;
if(pckName){
c_pckName8=String8(reinterpret_cast<const char16_t*>(c_pckName),
env->GetStringLength(pckName));
env->ReleaseStringCritical(pckName, c_pckName);
}
AudioSystem::editAudioApplicationVolume(c_pckName8, static_cast<pid_t>(pid));
}
static void
android_media_AudioSystem_removeAudioApplicationVolume(JNIEnv *env, jobject thiz,jstring pckName,jint pid)
{
const jchar* c_pckName=env->GetStringCritical(pckName,0);
String8 c_pckName8;
if(pckName){
c_pckName8=String8(reinterpret_cast<const char16_t*>(c_pckName),
env->GetStringLength(pckName));
env->ReleaseStringCritical(pckName, c_pckName);
}
AudioSystem::removeAudioApplicationVolume(c_pckName8, static_cast<pid_t>(pid));
}
/**add end **/
static void
android_media_AudioSystem_error_callback(status_t err)
{
JNIEnv *env = AndroidRuntime::getJNIEnv();
if (env == NULL) {
return;
}
jclass clazz = env->FindClass(kClassPathName);
env->CallStaticVoidMethod(clazz, env->GetStaticMethodID(clazz,
"errorCallbackFromNative","(I)V"),
check_AudioSystem_Command(err));
env->DeleteLocalRef(clazz);
}
...
// ----------------------------------------------------------------------------
static const JNINativeMethod gMethods[] = {
{"setParameters", "(Ljava/lang/String;)I", (void *)android_media_AudioSystem_setParameters},
{"getParameters", "(Ljava/lang/String;)Ljava/lang/String;", (void *)android_media_AudioSystem_getParameters},
/**add begin 添加函数的签名**/
{"editApplicationVolume","(Ljava/lang/String;I)V",(void *) android_media_AudioSystem_editAudioApplicationVolume},
{"removeApplicationVolume","(Ljava/lang/String;I)V",(void *)android_media_AudioSystem_removeAudioApplicationVolume},
/**add end**/
{"muteMicrophone", "(Z)I", (void *)android_media_AudioSystem_muteMicrophone},
{"isMicrophoneMuted", "()Z", (void *)android_media_AudioSystem_isMicrophoneMuted},
{"isStreamActive", "(II)Z", (void *)android_media_AudioSystem_isStreamActive},
{"isStreamActiveRemotely","(II)Z", (void *)android_media_AudioSystem_isStreamActiveRemotely},
{"isSourceActive", "(I)Z", (void *)android_media_AudioSystem_isSourceActive},
...
};
static const JNINativeMethod gEventHandlerMethods[] = {
{"native_setup",
"(Ljava/lang/Object;)V",
(void *)android_media_AudioSystem_eventHandlerSetup},
{"native_finalize",
"()V",
(void *)android_media_AudioSystem_eventHandlerFinalize},
};
...
frameworks/base/media/java/android/media/AudioSystem.java
...
/*
* Gets a group generic audio configuration parameters. The use of these parameters
* are platform dependent, see libaudio
*
* param keys list of parameters
* return value: list of parameters key value pairs in the form:
* key1=value1;key2=value2;...
*/
public static native String getParameters(String keys);
/**add begin JAVA层增加声明本地设置/移除应用程序pid的方法**/
public static native void editApplicationVolume(String pckName,int pid);
public static native void removeApplicationVolume(String pckName,int pid);
/**add end**/
// These match the enum AudioError in frameworks/base/core/jni/android_media_AudioSystem.cpp
/* Command sucessful or Media server restarted. see ErrorCallback */
public static final int AUDIO_STATUS_OK = 0;
/* Command failed or unspecified audio error. see ErrorCallback */
public static final int AUDIO_STATUS_ERROR = 1;
/* Media server died. see ErrorCallback */
public static final int AUDIO_STATUS_SERVER_DIED = 100;
...
然后添加一套Native层的Binder接口,实现数据从AudioSystem.cpp类所在的应用程序进程写到AudioFlinger.cpp类所在的AudioServer进程
frameworks/av/include/media/IAudioFlinger.h
virtual void acquireAudioSessionId(audio_session_t audioSession, pid_t pid) = 0;
virtual void releaseAudioSessionId(audio_session_t audioSession, pid_t pid) = 0;
//add start 新增的Native层 Binder接口
virtual void editAudioApplicationVolume(const String8& pckName,pid_t pid)=0;
virtual void removeAudioApplicationVolume(const String8& pckName,pid_t pid)=0;
//add end
virtual status_t queryNumberEffects(uint32_t *numEffects) const = 0;
virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) const = 0;
frameworks/av/media/libmedia/IAudioFlinger.cpp
enum {
...
ACQUIRE_AUDIO_SESSION_ID,
RELEASE_AUDIO_SESSION_ID,
EDIT_APPLICATION_VOLUME,//设置应用程序pid
REMOVE_APPLICATION_VOLUME,//移除应用程序pid
...
};
virtual void releaseAudioSessionId(audio_session_t audioSession, int pid)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeInt32(audioSession);
data.writeInt32(pid);
remote()->transact(RELEASE_AUDIO_SESSION_ID, data, &reply);
}
/**add begin 设置和移除应用程序音量的实现**/
virtual void editAudioApplicationVolume(const String8& pckName,int pid)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeString8(pckName);
data.writeInt32(pid);
remote()->transact(EDIT_APPLICATION_VOLUME, data, &reply);
}
virtual void removeAudioApplicationVolume(const String8& pckName,int pid)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeString8(pckName);
data.writeInt32(pid);
remote()->transact(REMOVE_APPLICATION_VOLUME, data, &reply);
}
/**add end**/
...
};
IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
// ----------------------------------------------------------------------
status_t BnAudioFlinger::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch (code) {
...
case ACQUIRE_AUDIO_SESSION_ID: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
audio_session_t audioSession = (audio_session_t) data.readInt32();
int pid = data.readInt32();
acquireAudioSessionId(audioSession, pid);
return NO_ERROR;
} break;
case RELEASE_AUDIO_SESSION_ID: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
audio_session_t audioSession = (audio_session_t) data.readInt32();
int pid = data.readInt32();
releaseAudioSessionId(audioSession, pid);
return NO_ERROR;
} break;
/**add begin**/
case EDIT_APPLICATION_VOLUME:{
CHECK_INTERFACE(IAudioFlinger, data, reply);
String8 pckName(data.readString8());
int pid = data.readInt32();
editAudioApplicationVolume(pckName,pid);
return NO_ERROR;
}break;
case REMOVE_APPLICATION_VOLUME:{
CHECK_INTERFACE(IAudioFlinger, data, reply);
String8 pckName(data.readString8());
int pid = data.readInt32();
removeAudioApplicationVolume(pckName,pid);
return NO_ERROR;
}break;
/**add end**/
...
最后在请求音频焦点时调用AudioSystem的editApplicationVolume()方法,失去焦点时调用removeApplicationVolume()方法
frameworks/base/services/core/java/com/android/server/audio/AudioService.java
...
//==========================================================================================
// Audio Focus
//==========================================================================================
public int requestAudioFocus(AudioAttributes aa, int durationHint, IBinder cb,
IAudioFocusDispatcher fd, String clientId, String callingPackageName, int flags,
IAudioPolicyCallback pcb) {
// permission checks
if ((flags & AudioManager.AUDIOFOCUS_FLAG_LOCK) == AudioManager.AUDIOFOCUS_FLAG_LOCK) {
if (AudioSystem.IN_VOICE_COMM_FOCUS_ID.equals(clientId)) {
if (PackageManager.PERMISSION_GRANTED != mContext.checkCallingOrSelfPermission(
android.Manifest.permission.MODIFY_PHONE_STATE)) {
Log.e(TAG, "Invalid permission to (un)lock audio focus", new Exception());
return AudioManager.AUDIOFOCUS_REQUEST_FAILED;
}
} else {
// only a registered audio policy can be used to lock focus
synchronized (mAudioPolicies) {
if (!mAudioPolicies.containsKey(pcb.asBinder())) {
Log.e(TAG, "Invalid unregistered AudioPolicy to (un)lock audio focus");
return AudioManager.AUDIOFOCUS_REQUEST_FAILED;
}
}
}
}
//add 应用程序获取音频焦点,将包名和pid写入AF
AudioSystem.editApplicationVolume(callingPackageName,Binder.getCallingPid());
return mMediaFocusControl.requestAudioFocus(aa, durationHint, cb, fd,
clientId, callingPackageName, flags);
}
public int abandonAudioFocus(IAudioFocusDispatcher fd, String clientId, AudioAttributes aa) {
//add 应用程序失去音频焦点,将包名和pid从AF中移除
AudioSystem.removeApplicationVolume("abandon", Binder.getCallingPid());
return mMediaFocusControl.abandonAudioFocus(fd, clientId, aa);
}
...
4.AF向应用层提供设置应用程序音量的接口,应用程序调用音量设置接口向AF写入应用程序包名和音量的对应关系
frameworks/av/include/media/AudioParameter.h
...
namespace android {
class AudioParameter {
public:
AudioParameter() {}
AudioParameter(const String8& keyValuePairs);
virtual ~AudioParameter();
// reserved parameter keys for changing standard parameters with setParameters() function.
// Using these keys is mandatory for AudioFlinger to properly monitor audio output/input
// configuration changes and act accordingly.
// keyRouting: to change audio routing, value is an int in audio_devices_t
// keySamplingRate: to change sampling rate routing, value is an int
// keyFormat: to change audio format, value is an int in audio_format_t
// keyChannels: to change audio channel configuration, value is an int in audio_channels_t
// keyFrameCount: to change audio output frame count, value is an int
// keyInputSource: to change audio input source, value is an int in audio_source_t
// (defined in media/mediarecorder.h)
// keyScreenState: either "on" or "off"
static const char * const keyRouting;
static const char * const keySamplingRate;
static const char * const keyFormat;
static const char * const keyChannels;
static const char * const keyFrameCount;
static const char * const keyInputSource;
static const char * const keyScreenState;
static const char * const keyAppVolume;//add 设置应用程序音量的key
String8 toString();
status_t add(const String8& key, const String8& value);
...
frameworks/av/media/libmedia/AudioParameter.cpp
namespace android {
// static
const char * const AudioParameter::keyRouting = AUDIO_PARAMETER_STREAM_ROUTING;
const char * const AudioParameter::keySamplingRate = AUDIO_PARAMETER_STREAM_SAMPLING_RATE;
const char * const AudioParameter::keyFormat = AUDIO_PARAMETER_STREAM_FORMAT;
const char * const AudioParameter::keyChannels = AUDIO_PARAMETER_STREAM_CHANNELS;
const char * const AudioParameter::keyFrameCount = AUDIO_PARAMETER_STREAM_FRAME_COUNT;
const char * const AudioParameter::keyInputSource = AUDIO_PARAMETER_STREAM_INPUT_SOURCE;
const char * const AudioParameter::keyScreenState = AUDIO_PARAMETER_KEY_SCREEN_STATE;
const char * const AudioParameter::keyAppVolume=AUDIO_PARAMETER_APPLICATION_VOLUME;//add 设置应用程序音量的key
AudioParameter::AudioParameter(const String8& keyValuePairs)
{
...
}
...
hardware/libhardware/include/hardware/audio.h
...
#define AUDIO_PARAMETER_STREAM_ROUTING "routing" /* audio_devices_t */
#define AUDIO_PARAMETER_STREAM_FORMAT "format" /* audio_format_t */
#define AUDIO_PARAMETER_STREAM_CHANNELS "channels" /* audio_channel_mask_t */
#define AUDIO_PARAMETER_STREAM_FRAME_COUNT "frame_count" /* size_t */
#define AUDIO_PARAMETER_STREAM_INPUT_SOURCE "input_source" /* audio_source_t */
#define AUDIO_PARAMETER_STREAM_SAMPLING_RATE "sampling_rate" /* uint32_t */
#define AUDIO_PARAMETER_APPLICATION_VOLUME "app_volume" /**add 应用程序音量的Key**/
#define AUDIO_PARAMETER_DEVICE_CONNECT "connect" /* audio_devices_t */
#define AUDIO_PARAMETER_DEVICE_DISCONNECT "disconnect" /* audio_devices_t */
/* Query supported formats. The response is a '|' separated list of strings from
* audio_format_t enum e.g: "sup_formats=AUDIO_FORMAT_PCM_16_BIT" */
#define AUDIO_PARAMETER_STREAM_SUP_FORMATS "sup_formats"
/* Query supported channel masks. The response is a '|' separated list of strings from
* audio_channel_mask_t enum e.g: "sup_channels=AUDIO_CHANNEL_OUT_STEREO|AUDIO_CHANNEL_OUT_MONO" */
#define AUDIO_PARAMETER_STREAM_SUP_CHANNELS "sup_channels"
...
在应用程序层,这一步还需要结合第1步AudioFlinger中setParamter()和getParamter()函数实现才能使用,再通过AudioManager.setParamter()和AudioManager.setParamter()方法即可设置和查询应用程序的音量
示例代码:
AudioManager mAudioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
设置应用音量的格式:应用包名_音量值(注意音量值的取值范围是 0-1 float型)
mAudioManager.setParameters("app_volume=cn.kuwo.player_0.5");
5.AF处理各路音频流向混音器AudioMixer设置音频流Track的参数时会设置一次Track的音量,此时可以根据Track的pid 向AF查询该Track对应的应用程序的音量,一旦查询到音量即可利用AudioMixer设置该Track的音量,而且这样设置各个音频流Track之间的音量相互不影响的目的,彼此音量独立
frameworks/av/services/audioflinger/Threads.cpp
// prepareTracks_l() must be called with ThreadBase::mLock held
AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTracks_l(
Vector< sp<Track> > *tracksToRemove)
{
...
if ((framesReady >= minFrames) && track->isReady() &&
!track->isPaused() && !track->isTerminated())
{
ALOGVV("track %d s=%08x [OK] on thread %p", name, cblk->mServer, this);
mixedTracks++;
// track->mainBuffer() != mSinkBuffer or mMixerBuffer means
// there is an effect chain connected to the track
chain.clear();
if (track->mainBuffer() != mSinkBuffer &&
track->mainBuffer() != mMixerBuffer) {
if (mEffectBufferEnabled) {
mEffectBufferValid = true; // Later can set directly.
}
chain = getEffectChain_l(track->sessionId());
// Delegate volume control to effect in track effect chain if needed
if (chain != 0) {
tracksWithEffect++;
} else {
ALOGW("prepareTracks_l(): track %d attached to effect but no chain found on "
"session %d",
name, track->sessionId());
}
}
int param = AudioMixer::VOLUME;
if (track->mFillingUpStatus == Track::FS_FILLED) {
// no ramp for the first volume setting
track->mFillingUpStatus = Track::FS_ACTIVE;
if (track->mState == TrackBase::RESUMING) {
track->mState = TrackBase::ACTIVE;
param = AudioMixer::RAMP_VOLUME;
}
mAudioMixer->setParameter(name, AudioMixer::RESAMPLE, AudioMixer::RESET, NULL);
// FIXME should not make a decision based on mServer
} else if (cblk->mServer != 0) {
// If the track is stopped before the first frame was mixed,
// do not apply ramp
param = AudioMixer::RAMP_VOLUME;
}
// compute volume for this track
uint32_t vl, vr; // in U8.24 integer format
float vlf, vrf, vaf; // in [0.0, 1.0] float format
if (track->isPausing() || mStreamTypes[track->streamType()].mute) {
vl = vr = 0;
vlf = vrf = vaf = 0.;
if (track->isPausing()) {
track->setPaused();
}
} else {
// read original volumes with volume control
float typeVolume = mStreamTypes[track->streamType()].volume;
float v = masterVolume * typeVolume;
AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
gain_minifloat_packed_t vlr = proxy->getVolumeLR();
vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
// track volumes come from shared memory, so can't be trusted and must be clamped
if (vlf > GAIN_FLOAT_UNITY) {
ALOGV("Track left volume out of range: %.3g", vlf);
vlf = GAIN_FLOAT_UNITY;
}
if (vrf > GAIN_FLOAT_UNITY) {
ALOGV("Track right volume out of range: %.3g", vrf);
vrf = GAIN_FLOAT_UNITY;
}
// now apply the master volume and stream type volume
vlf *= v;
vrf *= v;
// assuming master volume and stream type volume each go up to 1.0,
// then derive vl and vr as U8.24 versions for the effect chain
const float scaleto8_24 = MAX_GAIN_INT * MAX_GAIN_INT;
vl = (uint32_t) (scaleto8_24 * vlf);
vr = (uint32_t) (scaleto8_24 * vrf);
// vl and vr are now in U8.24 format
uint16_t sendLevel = proxy->getSendLevel_U4_12();
// send level comes from shared memory and so may be corrupt
if (sendLevel > MAX_GAIN_INT) {
ALOGV("Track send level out of range: %04X", sendLevel);
sendLevel = MAX_GAIN_INT;
}
// vaf is represented as [0.0, 1.0] float by rescaling sendLevel
vaf = v * sendLevel * (1. / MAX_GAIN_INT);
}
#ifdef DOLBY_ENABLE // DOLBY_DAP_PREGAIN
dvlf = vlf;
dvrf = vrf;
#endif // DOLBY_END
// Delegate volume control to effect in track effect chain if needed
if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
// Do not ramp volume if volume is controlled by effect
param = AudioMixer::VOLUME;
// Update remaining floating point volume levels
vlf = (float)vl / (1 << 24);
vrf = (float)vr / (1 << 24);
track->mHasVolumeController = true;
} else {
// force no volume ramp when volume controller was just disabled or removed
// from effect chain to avoid volume spike
if (track->mHasVolumeController) {
param = AudioMixer::VOLUME;
}
track->mHasVolumeController = false;
}
#ifdef DOLBY_ENABLE // DOLBY_DAP_PREGAIN
// Select the maximum volume by scanning all the active audio tracks but not the output one.
if (!track->isOutputTrack() && !EffectDapController::instance()->bypassTrack(track)) {
if (track->mHasVolumeController) {
const float scaleto8_24 = MAX_GAIN_INT * MAX_GAIN_INT;
max_vol = max(max_vol, max(((uint32_t) (scaleto8_24 * vlf * dvlf)), ((uint32_t) (scaleto8_24 * vrf * dvrf))));
} else {
max_vol = max(max_vol, max(vl, vr));
}
}
#endif // DOLBY_END
/**add begin 混音之前向AF查询当前音频流Track对应的音量并应用音量**/
float appVolume=mAudioFlinger->getApplicationVolume(track->mPid);
if(appVolume>0 && appVolume<1) {
vrf=vlf=appVolume;
}
/**add end**/
// XXX: these things DON'T need to be done each time
mAudioMixer->setBufferProvider(name, track);
mAudioMixer->enable(name);
...
}
...
void AudioFlinger::DirectOutputThread::processVolume_l(Track *track, bool lastTrack)
{
float left, right;
if (mMasterMute || mStreamTypes[track->streamType()].mute) {
left = right = 0;
} else {
float typeVolume = mStreamTypes[track->streamType()].volume;
float v = mMasterVolume * typeVolume;
AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
gain_minifloat_packed_t vlr = proxy->getVolumeLR();
left = float_from_gain(gain_minifloat_unpack_left(vlr));
if (left > GAIN_FLOAT_UNITY) {
left = GAIN_FLOAT_UNITY;
}
left *= v;
right = float_from_gain(gain_minifloat_unpack_right(vlr));
if (right > GAIN_FLOAT_UNITY) {
right = GAIN_FLOAT_UNITY;
}
right *= v;
}
if (lastTrack) {
if (left != mLeftVolFloat || right != mRightVolFloat) {
mLeftVolFloat = left;
mRightVolFloat = right;
// Convert volumes from float to 8.24
uint32_t vl = (uint32_t)(left * (1 << 24));
uint32_t vr = (uint32_t)(right * (1 << 24));
// Delegate volume control to effect in track effect chain if needed
// only one effect chain can be present on DirectOutputThread, so if
// there is one, the track is connected to it
if (!mEffectChains.isEmpty()) {
mEffectChains[0]->setVolume_l(&vl, &vr);
left = (float)vl / (1 << 24);
right = (float)vr / (1 << 24);
}
if (mOutput->stream->set_volume) {
/**add begin 直接输出线程中,混音之前向AF查询当前音频流Track对应的音量,并应用音量**/
float appVolume=mAudioFlinger->getApplicationVolume(track->mPid);
if(appVolume > 0 && appVolume < 1) {
left=right=appVolume;
}
/**add end**/
mOutput->stream->set_volume(mOutput->stream, left, right);
}
#ifdef DOLBY_ENABLE // DOLBY_DAP_PREGAIN
if (!EffectDapController::instance()->bypassTrack(track)) {
// Update the volume set for the current thread
EffectDapController::instance()->updatePregain(mType, mId, mOutput->flags, max(vl, vr));
}
#endif // DOLBY_END
}
}
}
...
注意安卓系统中处理音频流的子线程总体分为回放线程和录音线程两种,回放线程中又可细分为四种(详细参考下图),分别是:MixerThread,DirectOutputThread,DuplicatingThread,OffloadThread,在这里根据使用场景笼统的考虑两种线程即可也即是回放线程和需要硬解码的线程,而函数MixerThread::prepareTracks_l()和DirectOutputThread::processVolume_l()就是分别处理的回放线程和硬解码线程的音频流Track相关参数的设置,因此可以在该函数中设置当前Track对应的音量,而且设置完音量之后并不会影响其它音频流Track的音量,实现彼此独立!
6.由于给应用程序设置的音量是运行时通过代码写入的,只在本次安卓设备运行时有效,为了达到持久化的目的,可在给应用设置了音量后,将每个应用程序的音量保存到XML文件中,然后在AudioService服务启动时写入AudioFlinger
新建XML文件 app_volume_list.xml 保存到 device/qcom/msm8953_64/目录下,并修改mk文件,局部编译service.jar模块替换安卓设备的jar即可
xml配置文件格式类似于:
<?xml version="1.0" encoding="UTF-8"?>
<packages-list>
<!--Mogo-->
<package>
<name>com.zhidao.bluetooth</name>
<volume>0.1</volume>
</package>
<package>
<name>com.zhidao.wiki</name>
<volume>0.8</volume>
</package>
<package>
<name>com.zhidao.launcher</name>
<volume>0.5</volume>
</package>
<package>
<name>com.zhidao.hello</name>
<volume>0.4</volume>
</package>
</packages-list>
--- a/qcom/msm8953_64/msm8953_64.mk
+++ b/qcom/msm8953_64/msm8953_64.mk
@@ -28,7 +28,8 @@ PRODUCT_COPY_FILES += device/qcom/msm8953_64/whitelistedapps.xml:system/etc/whit
device/qcom/msm8953_64/promote_adj_list.xml:system/etc/promote_adj_list.xml \
device/qcom/msm8953_64/reduce_adj_list.xml:system/etc/reduce_adj_list.xml \
device/qcom/msm8953_64/Rtkconfig.ini:system/etc/Rtkconfig.ini \
- device/qcom/msm8953_64/special_Permission_list.xml:system/etc/special_Permission_list.xml
+ device/qcom/msm8953_64/special_Permission_list.xml:system/etc/special_Permission_list.xml \
+ device/qcom/msm8953_64/app_volume_list.xml:system/etc/app_volume_list.xml
PRODUCT_PROPERTY_OVERRIDES += \
dalvik.vm.heapminfree=4m \
@@ -221,3 +222,18 @@ PRODUCT_DEL_PACKAGES += \
-include vendor/zhidao/zhidao.mk
#PRODUCT_COPY_FILES += \
$(call find-copy-subdir-files,*,device/qcom/common/zhidao/system,system)
修改AudioService服务:
--- a/base/services/core/java/com/android/server/audio/AudioService.java
+++ b/base/services/core/java/com/android/server/audio/AudioService.java
@@ -116,6 +116,11 @@ import com.android.server.SystemService;
import com.android.server.pm.UserManagerService;
import org.xmlpull.v1.XmlPullParserException;
+import org.xmlpull.v1.XmlPullParser;
+import org.xmlpull.v1.XmlSerializer;
+import android.util.Xml;
+import java.io.File;
+import java.io.FileInputStream;
import java.io.FileDescriptor;
import java.io.IOException;
@@ -128,6 +133,8 @@ import java.util.List;
import java.util.NoSuchElementException;
import java.util.Objects;
+import static org.xmlpull.v1.XmlPullParser.END_DOCUMENT;
+import static org.xmlpull.v1.XmlPullParser.START_TAG;
/**
* The implementation of the volume manager service.
* <p>
@@ -273,6 +280,7 @@ public class AudioService extends IAudioService.Stub {
/* Sound effect file names */
private static final String SOUND_EFFECTS_PATH = "/media/audio/ui/";
+ private static final String APP_VOLUME_PATH = "/system/etc/app_volume_list.xml";//add by xpzhi
private static final List<String> SOUND_EFFECT_FILES = new ArrayList<String>();
/* Sound effect file name mapping sound effect id (AudioManager.FX_xxx) to
@@ -688,6 +696,7 @@ public class AudioService extends IAudioService.Stub {
// must be called before readPersistedSettings() which needs a valid mStreamVolumeAlias[]
// array initialized by updateStreamVolumeAlias()
updateStreamVolumeAlias(false /*updateVolumes*/, TAG);
+ Log.e(TAG,"===xpzhi======AudioService()====AppVolume==");
readPersistedSettings();
readUserRestrictions();
mSettingsObserver = new SettingsObserver();
@@ -1251,6 +1260,7 @@ public class AudioService extends IAudioService.Stub {
// Load settings for the volume controller
mVolumeController.loadSettings(cr);
+ parseAppVolumeList(APP_VOLUME_PATH);//add by xpzhi
}
private void readUserRestrictions() {
@@ -1281,6 +1291,46 @@ public class AudioService extends IAudioService.Stub {
AudioSystem.muteMicrophone(microphoneMute);
}
+ /** add by xpzhi begin**/
+ private void parseAppVolumeList(String path) {
+ String name = null;
+ float level=0f;
+ try {
+ File file = new File(path);
+ if (file.exists()) {
+ FileInputStream inputStream = new FileInputStream(file);
+ XmlPullParser parser = Xml.newPullParser();
+ parser.setInput(inputStream, "UTF-8");
+ int eventType = parser.getEventType();
+ while (eventType != XmlPullParser.END_DOCUMENT) {
+ switch (eventType) {
+ case XmlPullParser.START_DOCUMENT:
+ break;
+ case XmlPullParser.START_TAG:
+ if (parser.getName().equals("package")) {
+
+ } else if("name".equals(parser.getName())){
+ name = parser.nextText();
+ } else if ("volume".equals(parser.getName())){
+ level = Float.parseFloat(parser.nextText());
+ Log.i(TAG,name+"===xpzhi=====AppVolume====="+level);
+ //AudioSystem.setParameters("");//write to audioflinger
+ }
+ break;
+ case XmlPullParser.END_TAG:
+ break;
+ }
+ eventType = parser.next();
+ }
+ } else {
+ Log.e(TAG,"====AppVolume file do not exists!=====");
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ /** add by xpzhi end**/
也即是在加载Audio模块持久化配置信息时顺便加载应用程序的音量信息再通过AudioSystem接口写入AudioFlinger即可。
附上流程图