Android AudioSystem学习

  1. APP调用
    AudioManager mAudioManager = (AudioManager) mContext.getSystemService(Context.AUDIO_SERVICE);
    mAudioManager.setMicrophoneMute(true);
  2. java层调用
    // frameworks/base/media/java/android/media/AudioManager.java
    public void setMicrophoneMute(boolean on) {
    final IAudioService service = getService();
    service.setMicrophoneMute(on, getContext().getOpPackageName(),UserHandle.getCallingUserId());
    }
    // frameworks/base/services/core/java/com/android/server/audio/AudioService.java
    public void setMicrophoneMute(boolean on, String callingPackage, int userId) {
    setMicrophoneMuteNoCallerCheck(on, userId);
    }
    private void setMicrophoneMuteNoCallerCheck(boolean on, int userId) {
    AudioSystem.muteMicrophone(on);
    }
    // frameworks/base/media/java/android/media/AudioSystem.java
    public static native int muteMicrophone(boolean on);
  3. JNI调用
    // frameworks/base/core/jni/android_media_AudioSystem.cpp
    {“muteMicrophone”, “(Z)I”, (void *)android_media_AudioSystem_muteMicrophone},
    android_media_AudioSystem_muteMicrophone(JNIEnv *env, jobject thiz, jboolean on)
    {
    return (jint) check_AudioSystem_Command(AudioSystem::muteMicrophone(on));
    }
  4. Native调用
    // frameworks/av/media/libaudioclient/AudioSystem.cpp
    status_t AudioSystem::muteMicrophone(bool state)
    {
    const sp& af = AudioSystem::get_audio_flinger();
    if (af == 0) return PERMISSION_DENIED;
    return af->setMicMute(state);
    }
    // /frameworks/av/services/audioflinger/AudioFlinger.cpp
    status_t AudioFlinger::setMicMute(bool state) {
    mHardwareStatus = AUDIO_HW_SET_MIC_MUTE;
    for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
    sp dev = mAudioHwDevs.valueAt(i)->hwDevice();
    status_t result = dev->setMicMute(state);
    if (result != NO_ERROR) {
    ret = result;
    }
    }
    mHardwareStatus = AUDIO_HW_IDLE;
    }
    // /hardware/interfaces/audio/core/all-versions/default/Device.cpp
    Return Device::setMicMute(bool mute) {
    return analyzeStatus(“set_mic_mute”, mDevice->set_mic_mute(mDevice, mute));
    }
  5. hal层调用
    // vendor/qcom/opensource/audio-hal/primary-hal/hal/audio_hw.c
    adev->device.set_mic_mute = adev_set_mic_mute;
    static int adev_set_mic_mute(struct audio_hw_device *dev, bool state)
    {
    ret = voice_set_mic_mute((struct audio_device *)dev, state);
    if (adev->ext_hw_plugin)
    ret = audio_extn_ext_hw_plugin_set_mic_mute(adev->ext_hw_plugin, state);
    adev->mic_muted = state;
    return ret;
    }
    // vendor/qcom/opensource/audio-hal/primary-hal/hal/voice.c
    int voice_set_mic_mute(struct audio_device *adev, bool state)
    {
    adev->voice.mic_mute = state;
    if (audio_extn_hfp_is_active(adev)) {
    err = audio_extn_hfp_set_mic_mute2(adev, state);
    } else if (adev->mode == AUDIO_MODE_IN_CALL) {
    if (adev->voice.use_device_mute)
    err = platform_set_device_mute(adev->platform, state, “tx”);
    else
    err = platform_set_mic_mute(adev->platform, state);
    } else if (adev->mode == AUDIO_MODE_IN_COMMUNICATION) {
    err = voice_extn_compress_voip_set_mic_mute(adev, state);
    }
    return err;
    }
    接下来就是重点看platform_set_device_mute和platform_set_mic_mute

// vendor/qcom/opensource/audio-hal/primary-hal/hal/msm8974/platform.c
int platform_set_device_mute(void *platform, bool state, char *dir) {
struct platform_data *my_data = (struct platform_data *)platform;
struct audio_device *adev = my_data->adev;
struct mixer_ctl *ctl;
char *mixer_ctl_name = NULL;
int ret = 0;
long set_values[ ] = {0,
ALL_SESSION_VSID,
DEFAULT_DEVICE_MUTE_RAMP_DURATION_MS};

if (!strncmp("rx", dir, sizeof("rx")) && !(!state && incall_music_flag)) {
    mixer_ctl_name = "Voice Rx Device Mute";
} else if (!strncmp("tx", dir, sizeof("tx"))) {
    mixer_ctl_name = "Voice Tx Device Mute";  // 这个和Voice Tx Mute有什么区别
}
set_values[0] = state;
ctl = mixer_get_ctl_by_name(adev->mixer, mixer_ctl_name);
mixer_ctl_set_array(ctl, set_values, ARRAY_SIZE(set_values)); // 是在mixer的时候把对应的流mute吗?
return ret;

}

// vendor/qcom/opensource/audio-hal/primary-hal/hal/msm8974/platform.c
int platform_set_mic_mute(void *platform, bool state)
{
struct platform_data *my_data = (struct platform_data *)platform;
struct audio_device *adev = my_data->adev;
struct mixer_ctl *ctl;
const char *mixer_ctl_name = “Voice Tx Mute”;
int ret = 0;
long set_values[ ] = {0,
ALL_SESSION_VSID,
DEFAULT_MUTE_RAMP_DURATION_MS};

if (adev->mode != AUDIO_MODE_IN_CALL &&
    adev->mode != AUDIO_MODE_IN_COMMUNICATION)
    return 0;

if (adev->enable_hfp)
    mixer_ctl_name = "HFP Tx Mute";

set_values[0] = state;
ctl = mixer_get_ctl_by_name(adev->mixer, mixer_ctl_name);
if (!ctl) {
    ALOGE("%s: Could not get ctl for mixer cmd - %s",
          __func__, mixer_ctl_name);
    ret = -EINVAL;
} else {
    ALOGV("%s Setting voice mute state: %d", __func__, state);
    mixer_ctl_set_array(ctl, set_values, ARRAY_SIZE(set_values));
}

if (my_data->csd != NULL) {
    ret = my_data->csd->mic_mute(ALL_SESSION_VSID, state,
                                 DEFAULT_MUTE_RAMP_DURATION_MS);
    if (ret < 0) {
        ALOGE("%s: csd_mic_mute error %d", __func__, ret);
    }
}
return ret;

}
platform_set_device_mute和platform_set_mic_mute最终调用的都是mixer_ctl_set_array,只不过一个是Voice Tx Mute,一个是Voice Tx Device Mute。

SOC_SINGLE_MULTI_EXT(“Voice Tx Device Mute”, SND_SOC_NOPM, 0, VSID_MAX,
0, 3, NULL, msm_voice_tx_device_mute_put),
SOC_SINGLE_MULTI_EXT(“Voice Tx Mute”, SND_SOC_NOPM, 0, VSID_MAX,
0, 3, NULL, msm_voice_mute_put),
6. kernel层调用
"Voice Tx Device Mute"的调用过程

// /vendor/qcom/opensource/audio-kernel/asoc/msm-pcm-voice-v2.c
static int msm_voice_tx_device_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int ret = 0;
int mute = ucontrol->value.integer.value[0];
uint32_t session_id = ucontrol->value.integer.value[1];
int ramp_duration = ucontrol->value.integer.value[2];

ret = voc_set_device_mute(session_id, VSS_IVOLUME_DIRECTION_TX,
			  mute, ramp_duration);
return ret;

}
// /vendor/qcom/opensource/audio-kernel/dsp/q6voice.c
int voc_set_device_mute(uint32_t session_id, uint32_t dir, uint32_t mute,
uint32_t ramp_duration)
{
struct voice_data *v = NULL;
int ret = 0;
struct voice_session_itr itr;

voice_itr_init(&itr, session_id);
while (voice_itr_get_next_session(&itr, &v)) {
	if (v != NULL) {
		mutex_lock(&v->lock);
		if (dir == VSS_IVOLUME_DIRECTION_TX) {
			v->dev_tx.dev_mute = mute;
			v->dev_tx.dev_mute_ramp_duration_ms =
						ramp_duration;
		} else {
			v->dev_rx.dev_mute = mute;
			v->dev_rx.dev_mute_ramp_duration_ms =
						ramp_duration;
		}

		if (((v->voc_state == VOC_RUN) ||
			(v->voc_state == VOC_STANDBY)) &&
			(v->lch_mode == 0))
			ret = voice_send_device_mute_cmd(v,
						dir,
						mute,
						ramp_duration);
		mutex_unlock(&v->lock);
	}

return ret;

}
// vendor/qcom/opensource/audio-kernel/dsp/q6voice.c
static int voice_send_device_mute_cmd(struct voice_data *v, uint16_t direction,
uint16_t mute_flag, uint32_t ramp_duration)
{
struct cvp_set_mute_cmd cvp_mute_cmd;
int ret = 0;

cvp_mute_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
					APR_HDR_LEN(APR_HDR_SIZE),
					APR_PKT_VER);
cvp_mute_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
				sizeof(cvp_mute_cmd) - APR_HDR_SIZE);
cvp_mute_cmd.hdr.src_port =
			voice_get_idx_for_session(v->session_id);
cvp_mute_cmd.hdr.dest_port = voice_get_cvp_handle(v);
cvp_mute_cmd.hdr.token = 0;
cvp_mute_cmd.hdr.opcode = VSS_IVOLUME_CMD_MUTE_V2;
cvp_mute_cmd.cvp_set_mute.direction = direction;
cvp_mute_cmd.cvp_set_mute.mute_flag = mute_flag;
cvp_mute_cmd.cvp_set_mute.ramp_duration_ms = ramp_duration;

v->cvp_state = CMD_STATUS_FAIL;
v->async_err = 0;
ret = apr_send_pkt(common.apr_q6_cvp, (uint32_t *) &cvp_mute_cmd);

}
"Voice Tx Mute"的调用过程:

// vendor/qcom/opensource/audio-kernel/asoc/msm-pcm-voice-v2.c
static int msm_voice_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int ret = 0;
int mute = ucontrol->value.integer.value[0];
uint32_t session_id = ucontrol->value.integer.value[1];
int ramp_duration = ucontrol->value.integer.value[2];

ret = voc_set_tx_mute(session_id, TX_PATH, mute, ramp_duration);

return ret;

}
// vendor/qcom/opensource/audio-kernel/dsp/q6voice.c
int voc_set_tx_mute(uint32_t session_id, uint32_t dir, uint32_t mute,
uint32_t ramp_duration)
{
struct voice_data *v = NULL;
int ret = 0;
struct voice_session_itr itr;

voice_itr_init(&itr, session_id);
while (voice_itr_get_next_session(&itr, &v)) {
	if (v != NULL) {
		mutex_lock(&v->lock);
		v->stream_tx.stream_mute = mute;
		v->stream_tx.stream_mute_ramp_duration_ms =
							ramp_duration;
		if (is_voc_state_active(v->voc_state) &&
			(v->lch_mode == 0))
			ret = voice_send_stream_mute_cmd(v,
			VSS_IVOLUME_DIRECTION_TX,
			v->stream_tx.stream_mute,
			v->stream_tx.stream_mute_ramp_duration_ms);
		mutex_unlock(&v->lock);
	} 
return ret;

}
// vendor/qcom/opensource/audio-kernel/dsp/q6voice.c
static int voice_send_stream_mute_cmd(struct voice_data *v, uint16_t direction,
uint16_t mute_flag, uint32_t ramp_duration)
{
struct cvs_set_mute_cmd cvs_mute_cmd;
int ret = 0;

/* send mute/unmute to cvs */
cvs_mute_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
					APR_HDR_LEN(APR_HDR_SIZE),
					APR_PKT_VER);
cvs_mute_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
				sizeof(cvs_mute_cmd) - APR_HDR_SIZE);
cvs_mute_cmd.hdr.src_port =
			voice_get_idx_for_session(v->session_id);
cvs_mute_cmd.hdr.dest_port = voice_get_cvs_handle(v);
cvs_mute_cmd.hdr.token = 0;
cvs_mute_cmd.hdr.opcode = VSS_IVOLUME_CMD_MUTE_V2;
cvs_mute_cmd.cvs_set_mute.direction = direction;
cvs_mute_cmd.cvs_set_mute.mute_flag = mute_flag;
cvs_mute_cmd.cvs_set_mute.ramp_duration_ms = ramp_duration;

v->cvs_state = CMD_STATUS_FAIL;
v->async_err = 0;
ret = apr_send_pkt(common.apr_q6_cvs, (uint32_t *) &cvs_mute_cmd);
return ret;

}
从上面来看这两个最终的区别就是一个是调用的cvs mute,一个调用的是cvp mute.
————————————————
版权声明:本文为CSDN博主「cheri–」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/cheriyou_/article/details/109657309

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值