下图是通过AudioManager进行setMicrophoneMute的过程,
应用可通过AudioManager获取系统服务,再调用服务提供的接口进行mute操作,
public void setMicrophoneMute(boolean on) { IAudioService service = getService(); try { service.setMicrophoneMute(on, getContext().getOpPackageName(), UserHandle.getCallingUserId()); } catch (RemoteException e) { throw e.rethrowFromSystemServer(); } } |
AudioManager通过AudioService,间接调用到AudioSystem.muteMicrophone,这个是native方法,
最终调用到native的AudioSystem.cpp
static jint android_media_AudioSystem_muteMicrophone(JNIEnv *env, jobject thiz, jboolean on) { return (jint) check_AudioSystem_Command(AudioSystem::muteMicrophone(on)); }
|
AudioSystem.cpp通过binder,调用到AF,
status_t AudioSystem::muteMicrophone(bool state) { const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); if (af == 0) return PERMISSION_DENIED; return af->setMicMute(state); }
|
AF则轮询所有的设备,依次将其mute。
status_t AudioFlinger::setMicMute(bool state) {
for (size_t i = 0; i < mAudioHwDevs.size(); i++) { audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice(); status_t result = dev->set_mic_mute(dev, state); if (result != NO_ERROR) { ret = result; } } mHardwareStatus = AUDIO_HW_IDLE; return ret; }
|
例如,对于设备模块,下面有加载模块后给函数指针的赋值代码,也有相应的调用过程,
Audio_hw.c (hardware\qcom\audio\hal)
static int adev_open(const hw_module_t *module, const char *name, hw_device_t **device) {… adev->device.set_mic_mute = adev_set_mic_mute; }
static int adev_set_mic_mute(struct audio_hw_device *dev, bool state) { int ret;
pthread_mutex_lock(&adev->lock); ALOGD("%s state %d\n", __func__, state); ret = voice_set_mic_mute((struct audio_device *)dev, state); pthread_mutex_unlock(&adev->lock);
return ret; }
|
Voice.c(hardware\qcom\audio\hal):
分在通话中和voip状态下进行mute操作。
int voice_set_mic_mute(struct audio_device *adev, bool state) { int err = 0;
adev->voice.mic_mute = state; if (adev->mode == AUDIO_MODE_IN_CALL) err = platform_set_mic_mute(adev->platform, state); if (adev->mode == AUDIO_MODE_IN_COMMUNICATION) err = voice_extn_compress_voip_set_mic_mute(adev, state);
return err; } |
Platform.c里,调用了my_data->csd->mic_mute,
int platform_set_mic_mute(void *platform, bool state) { struct platform_data *my_data = (struct platform_data *)platform; struct audio_device *adev = my_data->adev;
if (my_data->csd != NULL) { ret = my_data->csd->mic_mute(ALL_SESSION_VSID, state, DEFAULT_MUTE_RAMP_DURATION_MS); if (ret < 0) { ALOGE("%s: csd_mic_mute error %d", __func__, ret); } } } |
ALL_SESSION_VSID 表示将所有session都进行mute,
#define VOICE_SESSION_VSID 0x10C01000 #define VOICE2_SESSION_VSID 0x10DC1000 #define VOLTE_SESSION_VSID 0x10C02000 #define VOWLAN_SESSION_VSID 0x10002000 #define ALL_SESSION_VSID 0xFFFFFFFF |
其中, my_data是在Audio_hw.c (hardware\qcom\audio\hal): adev_open()里,通过platform_init()初始的,platform即my_data,
adev->platform = platform_init(adev); |
而csd的赋值,则是在platform_init()里初始,
my_data->csd = open_csd_client(my_data->is_i2s_ext_modem); |
open_csd_client则是打开一个libcsd-client.so库文件,给csd的函数指针赋值,我们需要的mute就在里面, 同时open_csd_client会进行初始化csd_client_init。
static struct csd_data *open_csd_client(bool i2s_ext_modem) { struct csd_data *csd = calloc(1, sizeof(struct csd_data));
if (!csd) { ALOGE("failed to allocate csd_data mem"); return NULL; }
csd->csd_client = dlopen(LIB_CSD_CLIENT, RTLD_NOW);
csd->deinit = (deinit_t)dlsym(csd->csd_client, "csd_client_deinit");
csd->disable_device = (disable_device_t)dlsym(csd->csd_client, "csd_client_disable_device"); … csd->mic_mute = (mic_mute_t)dlsym(csd->csd_client, "csd_client_mic_mute"); … csd->init = (init_t)dlsym(csd->csd_client, "csd_client_init"); csd->init(i2s_ext_modem); } |
csd_client_init实现在csd_client.c,它主要是初始数据,创建一个Thread,并打开一个audio_slimslave。
int csd_client_init(bool i2s_ext_modem) {
rc = initialize_client_data();
rc = csd_create_thread();
csd_client.slim_fd = open("/dev/audio_slimslave", O_RDWR); } |
qmi_idl_lib_internal.h
|
再回到csd_client_mic_mute,它就是通过set_stream_mute给qmi发送一个QMI_CSD_IOCTL_VS_CMD_SET_MUTE_REQ_V01请求消息,具体mute通话的过程则由modem来完成。
int csd_client_mic_mute(uint32_t vsid, int mute, uint16_t ramp_duration) {
iterator_init(&it, sessionid); while (iterator_has_next(&it)) { iterator_next(&it, &service, &session);
if ((session->state == SESSION_STARTED || session->state == SESSION_STANDBY) && (session->lch_mode == VOICE_LCH_STOP)) { rc = set_stream_mute(service, session, mute, ramp_duration); if (rc < 0) { LOGE("%s: Error %d setting mic mute on session %s\n", __func__, rc, session->name); }
} } |
可以看出,android在处理audio的时候,仅仅是通话音频通道的mute,并没有由telephony简单的调用qmi来完成,而是绕了很大一个弯。这样设计的目的,是为了保持模块的一致性,采用通用框架,更好维护和管理,但在效率上则大打折扣。