Android音频子系统(十)------MTK Audio录音流程代码解析

你好!这里是风筝的博客,
欢迎和我一起交流。

Android framework中的代码每个平台基本都是大同小异,只有Hal上代码才是厂商特制,每个平台都不相同,这里以MTK平台为例,记录下MTK平台Hal audio音频录音代码流程。

调用大致流程如下

AudioALSAHardwa: openInputStream()
	AudioALSAStreamManager: openInputStream()
		AudioALSAStreamIn: AudioALSAStreamIn //new AudioALSAStreamIn
		AudioALSAStreamIn: set() //done, devices: 0x80000004, flags: 0, acoustics: 0x0, format: 0x1, sampleRate: 48000/48000, num_channels: 12/2, buffer_size: 3840, tempDebugflag: 0
			AudioALSAStreamIn: checkOpenStreamFormat
			AudioALSAStreamIn: checkOpenStreamChannels
			AudioALSAStreamIn: checkOpenStreamSampleRate
AudioALSAHardware: createAudioPatch()
AudioALSAStreamManager: setParameters() //IOport = 70, keyValuePairs = input_source=1;routing=-2147483644
	AudioALSAStreamIn: setParameters()
		AudioALSAStreamManager: routingInputDevice()// input_device: 0x80000004 => 0x80000004

AudioALSAStreamIn: read()
	AudioALSAStreamIn: open()
		AudioALSAStreamManager: createCaptureHandler()
			AudioALSAStreamManager: ulStreamAttributeTargetCustomization
			AudioALSACaptureDataProviderNormal: AudioALSACaptureDataProviderNormal()
				AudioALSACaptureDataProviderBase: AudioALSACaptureDataProviderBase()
			AudioALSACaptureHandlerNormal: AudioALSACaptureHandlerNormal()
				AudioALSACaptureHandlerNormal: init()
		AudioALSACaptureHandlerNormal: open()//input_device = 0x80000004, input_source = 0x1, sample_rate=48000, num_channels=2 //里面配置period_count channels...
			AudioALSACaptureDataProviderBase: AudioALSACaptureDataProviderBase()
			AudioALSACaptureDataProviderDspRaw: AudioALSACaptureDataProviderDspRaw()
			AudioALSACaptureDataClientAurisysNormal: AudioALSACaptureDataClientAurisysNormal(+) //mCaptureDataClient = new AudioALSACaptureDataClientAurisysNormal
				AudioALSACaptureDataProviderBase: configStreamAttribute() //audio_mode: 0 => 0, input_device: 0x0 => 0x80000004, flag: 0x0 => 0x0, input_source: 0->1, output_device: 0x0 => 0x2, sample_rate: 0 => 48000, period_us: 0 => 0, DSP out sample_rate: 0 => 48000
				AudioALSACaptureDataProviderBase: attach
					AudioALSACaptureDataProviderDspRaw: open(+) //里面配置format、channels
						AudioALSADeviceConfigManager: ApplyDeviceTurnonSequenceByName() DeviceName = ADDA_TO_CAPTURE1 descriptor->DeviceStatusCounte = 0
						AudioALSACaptureDataProviderBase: enablePmicInputDevice
							AudioALSAHardwareResourceManager: +startInputDevice()
								AudioALSADeviceConfigManager: ApplyDeviceTurnonSequenceByName() DeviceName = builtin_Mic_DualMic descriptor->DeviceStatusCounte = 0
						AudioALSACaptureDataProviderBase: getInputSampleRate()
						AudioMTKGainController: +SetCaptureGain() //mode=0, source=1, input device=0x80000004, output device=0x2
							AudioMTKGainController: ApplyMicGain()
						AudioALSACaptureDataProviderDspRaw: openApHwPcm(), mPcm = 0xf2b55260
						AudioDspStreamManager: addCaptureDataProvider() //添加到mCaptureDataProviderVector
							AudioDspStreamManager: openCaptureDspHwPcm(), mDspHwPcm = 0xf2b55340
						pthread_create(&hReadThread, NULL, AudioALSACaptureDataProviderDspRaw::readThread, (void *)this); //创建线程						
							AudioALSACaptureDataProviderDspRaw: +readThread()
								AudioALSACaptureDataProviderBase: waitPcmStart
									AudioALSACaptureDataProviderBase: pcm_start
								AudioALSACaptureDataProviderBase: pcmRead
								AudioALSACaptureDataProviderBase: provideCaptureDataToAllClients
								AudioALSACaptureDataClientAurisysNormal: copyCaptureDataToClient() //提供数据给client,然后继续Read,如此循环
							
				pthread_create(&hProcessThread, NULL, AudioALSACaptureDataClientAurisysNormal::processThread, (void *)this); //创建线程
					AudioALSACaptureDataClientAurisysNormal: processThread
					
		AudioALSACaptureHandlerNormal: read()
			bytes = mCaptureDataClient->read(buffer, bytes);
AudioMTKStreamInInterface *AudioALSAStreamManager::openInputStream(
    uint32_t devices,
    int *format,
    uint32_t *channels,
    uint32_t *sampleRate,
    status_t *status,
    audio_in_acoustics_t acoustics,
    uint32_t input_flag) {

    //检查参数配置
    if (format == NULL || channels == NULL || sampleRate == NULL || status == NULL) {
        ALOGE("%s(), NULL pointer!! format = %p, channels = %p, sampleRate = %p, status = %p",
              __FUNCTION__, format, channels, sampleRate, status);
        if (status != NULL) { *status = INVALID_OPERATION; }
        return NULL;
    }

    ALOGD("%s(), devices = 0x%x, format = 0x%x, channels = 0x%x, sampleRate = %d, status = %d, acoustics = 0x%x, input_flag 0x%x",
          __FUNCTION__, devices, *format, *channels, *sampleRate, *status, acoustics, input_flag);

    // create stream in
    AudioALSAStreamIn *pAudioALSAStreamIn = new AudioALSAStreamIn();
    //主要set一些参数
    pAudioALSAStreamIn->set(devices, format, channels, sampleRate, status, acoustics, input_flag);

    pAudioALSAStreamIn->setIdentity(mStreamInIndex);
    mStreamInVector.add(mStreamInIndex, pAudioALSAStreamIn);//添加到mStreamInVector

    return pAudioALSAStreamIn;
}
status_t AudioALSAStreamIn::set(
    uint32_t devices,
    int *format,
    uint32_t *channels,
    uint32_t *sampleRate,
    status_t *status,
    audio_in_acoustics_t acoustics, uint32_t flags) {

    // check format
    if (checkOpenStreamFormat(static_cast<audio_devices_t>(devices), format) == false) {
        *status = BAD_VALUE;
    }
    // check channel mask
    if (checkOpenStreamChannels(static_cast<audio_devices_t>(devices), channels) == false) {
        *status = BAD_VALUE;
    }
    // check sample rate
    if (checkOpenStreamSampleRate(static_cast<audio_devices_t>(devices), sampleRate) == false) {
        *status = BAD_VALUE;
    }
    // config stream attribute,主要配置参数
    if (*status == NO_ERROR) {
        // format
        mStreamAttributeTarget.audio_format = static_cast<audio_format_t>(*format);
        // channel
        mStreamAttributeTarget.audio_channel_mask = static_cast<audio_channel_mask_t>(*channels);
        mStreamAttributeTarget.num_channels = popcount(*channels);
        // sample rate
        mStreamAttributeTarget.sample_rate = *sampleRate;
        // devices
        mStreamAttributeTarget.input_device = static_cast<audio_devices_t>(devices);
        // acoustics flags
        mStreamAttributeTarget.acoustics_mask = static_cast<audio_in_acoustics_t>(acoustics);
    }
}

上层服务会通过setParameters下发参数给HAL,这里下发的参数是:IOport = 70, keyValuePairs = input_source=1;routing=-2147483644

status_t AudioALSAStreamIn::setParameters(const String8 &keyValuePairs) {
    /// routing
    if (param.getInt(keyRouting, value) == NO_ERROR) {
        status = mStreamManager->routingInputDevice(this, mStreamAttributeTarget.input_device, inputdevice);
    }
}
//setParameters里面解析参数,本次参数主要是要route操作
status_t AudioALSAStreamManager::routingInputDevice(AudioALSAStreamIn *pAudioALSAStreamIn,
                                                    const audio_devices_t current_input_device,
                                                    audio_devices_t input_device) {
    //如果要route的device和上次的一样,则直接返回
    if (input_device == AUDIO_DEVICE_NONE ||
        input_device == current_input_device) {
        ALOGW("-%s(), input_device(0x%x) is AUDIO_DEVICE_NONE(0x%x) or current_input_device(0x%x), return",
              __FUNCTION__,
              input_device, AUDIO_DEVICE_NONE, current_input_device);
        return NO_ERROR;
    }

    //如果要切换设备,先suspend挂起
    setAllInputStreamsSuspend(true, false);
    standbyAllInputStreams();
    if (mStreamInVector.size() > 0) {
        for (size_t i = 0; i < mStreamInVector.size(); i++) {
            if ((input_device == AUDIO_DEVICE_IN_FM_TUNER) || (current_input_device == AUDIO_DEVICE_IN_FM_TUNER) ||
                (input_device == AUDIO_DEVICE_IN_TELEPHONY_RX) || (current_input_device == AUDIO_DEVICE_IN_TELEPHONY_RX)) {
                if (pAudioALSAStreamIn == mStreamInVector[i]) {
                    status = mStreamInVector[i]->routing(input_device);
                    ASSERT(status == NO_ERROR);
                }
            } else {
                //这里就是实际route操作
                status = mStreamInVector[i]->routing(input_device);
                ASSERT(status == NO_ERROR);
            }
        }
    }
    //suspend恢复
    setAllInputStreamsSuspend(false, false);
}

接下来就是最重要的read调用了,上层服务会调用read函数来读取录音数据:

ssize_t AudioALSAStreamIn::read(void *buffer, ssize_t bytes) {
        /// check open
        if (mStandby == true) {
            status = open();
        }
}

open是放在read里面来做的:

status_t AudioALSAStreamIn::open() {
    if (mStandby == true) {
        // create capture handler
        ASSERT(mCaptureHandler == NULL);
        mCaptureHandler = mStreamManager->createCaptureHandler(&mStreamAttributeTarget);
        if (mCaptureHandler == NULL) {
            status = BAD_VALUE;
            return status;
        }

        // open audio hardware
        status = mCaptureHandler->open();

        mStandby = false;
    }

    return status;
}

先创建CaptureHandler,不同的设备和源会创建不同的CaptureHandler执行类,之后open对应的CaptureHandler。
可以留一下createCaptureHandler的逻辑,主要还是对设备做区分:

AudioALSACaptureHandlerBase *AudioALSAStreamManager::createCaptureHandler(
    // Init input stream attribute here配置stream_attribute_target
    stream_attribute_target->audio_mode = mAudioMode; // set mode to stream attribute for mic gain setting
    stream_attribute_target->output_devices = current_output_devices; // set output devices to stream attribute for mic gain setting and BesRecord parameter
    stream_attribute_target->micmute = mMicMute;

    //客制化通路,设置APP2场景可以使得normal 录音也走voip,这样就可以跑aec算法消除回声
    /* StreamAttribute customization for scene */
    ulStreamAttributeTargetCustomization(stream_attribute_target);

		//语音唤醒
        if (stream_attribute_target->input_source == AUDIO_SOURCE_HOTWORD) {
                if (mAudioALSAVoiceWakeUpController->getVoiceWakeUpEnable() == false) {
                    mAudioALSAVoiceWakeUpController->setVoiceWakeUpEnable(true);
                }
                if (mVoiceWakeUpNeedOn == true) {
                    mAudioALSAVoiceWakeUpController->SeamlessRecordEnable();
                }
                pCaptureHandler = new AudioALSACaptureHandlerVOW(stream_attribute_target);
        } else if (stream_attribute_target->input_source == AUDIO_SOURCE_VOICE_UNLOCK ||
                   stream_attribute_target->input_source == AUDIO_SOURCE_ECHO_REFERENCE) {
            pCaptureHandler = new AudioALSACaptureHandlerSyncIO(stream_attribute_target);
		//实网通话
        } else if (isPhoneCallOpen() == true) {
                pCaptureHandler = new AudioALSACaptureHandlerVoice(stream_attribute_target);
		//客制化AEC场景
        } else if ((stream_attribute_target->NativePreprocess_Info.PreProcessEffect_AECOn == true) ||
                   (stream_attribute_target->input_source == AUDIO_SOURCE_VOICE_COMMUNICATION) ||
                   (stream_attribute_target->input_source == AUDIO_SOURCE_CUSTOMIZATION1) || //MagiASR enable AEC
                   (stream_attribute_target->input_source == AUDIO_SOURCE_CUSTOMIZATION2)) { //Normal REC with AEC
            AudioALSAHardwareResourceManager::getInstance()->setHDRRecord(false); // turn off HRD record for VoIP

            switch (stream_attribute_target->input_device) {
            case AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET: {
                if (stream_attribute_target->output_devices & AUDIO_DEVICE_OUT_ALL_SCO) {
                    pCaptureHandler = new AudioALSACaptureHandlerAEC(stream_attribute_target);
                } else {
                    pCaptureHandler = new AudioALSACaptureHandlerBT(stream_attribute_target);
                }
                break;
            }
            case AUDIO_DEVICE_IN_USB_DEVICE:
            case AUDIO_DEVICE_IN_USB_HEADSET:
#if defined(MTK_AURISYS_FRAMEWORK_SUPPORT)
                pCaptureHandler = new AudioALSACaptureHandlerAEC(stream_attribute_target);
#else
                pCaptureHandler = new AudioALSACaptureHandlerUsb(stream_attribute_target);
#endif
                break;
            default: {
                if (isAdspOptionEnable() &&
                    ((isCaptureOffload(stream_attribute_target) && !isIEMsOn &&
                      !AudioALSACaptureDataProviderNormal::getInstance()->getNormalOn()) ||
                     isBleInputDevice(stream_attribute_target->input_device))) {
                    pCaptureHandler = new AudioALSACaptureHandlerDsp(stream_attribute_target);
                } else {
                    pCaptureHandler = new AudioALSACaptureHandlerAEC(stream_attribute_target);
                }
                break;
            }
            }//switch
        } else {
            switch (stream_attribute_target->input_device) {
            case AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET: {
                pCaptureHandler = new AudioALSACaptureHandlerBT(stream_attribute_target);
                break;
            }
            case AUDIO_DEVICE_IN_USB_DEVICE:
            case AUDIO_DEVICE_IN_USB_HEADSET:
                pCaptureHandler = new AudioALSACaptureHandlerUsb(stream_attribute_target);
                break;
            case AUDIO_DEVICE_IN_BUILTIN_MIC:
            case AUDIO_DEVICE_IN_BACK_MIC:
            case AUDIO_DEVICE_IN_WIRED_HEADSET:
            case AUDIO_DEVICE_IN_BLE_HEADSET:
            case AUDIO_DEVICE_IN_BUS:
            default: {
                if (AudioSmartPaController::getInstance()->isInCalibration()) {
                    pCaptureHandler = new AudioALSACaptureHandlerNormal(stream_attribute_target);
                    break;
                }
                if (isAdspOptionEnable() &&
                    !(AUDIO_INPUT_FLAG_MMAP_NOIRQ & stream_attribute_target->mAudioInputFlags) &&((isCaptureOffload(stream_attribute_target) && !isIEMsOn &&
                      !AudioALSACaptureDataProviderNormal::getInstance()->getNormalOn()) ||
                     isBleInputDevice(stream_attribute_target->input_device))) {
                    if (isPhoneCallOpen() == true) {
                        pCaptureHandler = new AudioALSACaptureHandlerVoice(stream_attribute_target);
                    } else {
                        pCaptureHandler = new AudioALSACaptureHandlerDsp(stream_attribute_target);
                    }
                } else {
                        pCaptureHandler = new AudioALSACaptureHandlerNormal(stream_attribute_target);
                }
                break;
            }
            }//switch
        }

    // save capture handler object in vector
    mCaptureHandlerVector.add(mCaptureHandlerIndex, pCaptureHandler);

    return pCaptureHandler;
}

createCaptureHandler里面逻辑还是比较多的,这里我们分析AUDIO_DEVICE_IN_BUILTIN_MIC,也就是普通MIC录音场景,也就是AudioALSACaptureHandlerNormal。

status_t AudioALSACaptureHandlerNormal::open() {
    if (!AudioSmartPaController::getInstance()->isInCalibration()) {
        if (isAdspOptionEnable() &&
            (AudioDspStreamManager::getInstance()->getDspRawInHandlerEnable(mStreamAttributeTarget->mAudioInputFlags) > 0) &&
            (AudioDspStreamManager::getInstance()->getDspInHandlerEnable(mStreamAttributeTarget->mAudioInputFlags) > 0) && !isIEMsOn &&
            !AudioALSACaptureDataProviderNormal::getInstance()->getNormalOn()) {
             mCaptureDataClient = new AudioALSACaptureDataClientAurisysNormal(AudioALSACaptureDataProviderDspRaw::getInstance(),
                                                                              mStreamAttributeTarget, NULL); // NULL: w/o AEC
        } else {
            mCaptureDataClient = new AudioALSACaptureDataClientAurisysNormal(AudioALSACaptureDataProviderNormal::getInstance(),
                                                                             mStreamAttributeTarget, NULL); // NULL: w/o AEC
        }
    } else {
        mCaptureDataClient = new AudioALSACaptureDataClientAurisysNormal(AudioALSACaptureDataProviderEchoRefExt::getInstance(),
                                                                         mStreamAttributeTarget, NULL); // NULL: w/o AEC
    }
}

这里主要走的是AudioALSACaptureDataClientAurisysNormal,DataProvider是AudioALSACaptureDataProviderDspRaw(这个后面表述)

继续看下AudioALSACaptureDataClientAurisysNormal

AudioALSACaptureDataClientAurisysNormal::AudioALSACaptureDataClientAurisysNormal(
    AudioALSACaptureDataProviderBase *pCaptureDataProvider,
    stream_attribute_t *stream_attribute_target,
    AudioALSACaptureDataProviderBase *pCaptureDataProviderEchoRef) {

    // config attribute for input device
    mCaptureDataProvider->configStreamAttribute(mStreamAttributeTarget);
    // attach client to capture data provider (after data buf ready)
    mCaptureDataProvider->attach(this);
    // get latency (for library, but not data provider)
    mLatency = (IsLowLatencyCapture()) ? UPLINK_LOW_LATENCY_MS : UPLINK_NORMAL_LATENCY_MS;

    if (mAudioALSAVolumeController != NULL) {
        mAudioALSAVolumeController->SetCaptureGain(mStreamAttributeTarget->audio_mode,
                                                   mStreamAttributeTarget->input_source,
                                                   mStreamAttributeTarget->input_device,
                                                   mStreamAttributeTarget->output_devices);
    }

    // create lib manager,aurisys用于dsp里面传递算法
    CreateAurisysLibManager();

    // depop,MTK在录音时会丢弃前面一段数据避免杂音
    drop_ms = getDropMs(mStreamAttributeTarget);
    if (drop_ms) {
        if ((drop_ms % mLatency) != 0) { // drop data size need to align interrupt rate
            drop_ms = ((drop_ms / mLatency) + 1) * mLatency; // cell()
        }
        mDropPopSize = (audio_bytes_per_sample(mStreamAttributeTarget->audio_format) *
                        mStreamAttributeTarget->num_channels *
                        mStreamAttributeTarget->sample_rate *
                        drop_ms) / 1000;
    }

    // processThread
    ret = pthread_create(&hProcessThread, NULL,
                         AudioALSACaptureDataClientAurisysNormal::processThread,
                         (void *)this);
}
  • 配置Attribute
  • DataProvider attach,提供录音数据
  • 计算Latency ,录音间隔延时
  • 设置gain增益
  • 创建aurisys,算法会用到
  • 计算录音开头需要丢弃的数据大小mDropPopSize,避免录音开头有杂音
  • 创建线程去读取录音数据
void *AudioALSACaptureDataClientAurisysNormal::processThread(void *arg) {
    /* process thread created */
    client->mProcessThreadLaunched = true;
    /* get buffer address */
    raw_ul    = &client->mRawDataBuf;//ringbuf地址
    processed = &client->mProcessedDataBuf;
    while (client->mEnable == true) {
        data_count_raw_ul = audio_ringbuf_count(raw_ul);
        // data not reary, wait data
        if ((data_count_raw_ul < client->mRawDataPeriodBufSize) ||
            (client->IsAECEnable() == true &&
             ((client->mIsEchoRefDataSync == false && client->isNeedSkipSyncEchoRef() == false) ||
              data_count_raw_aec < client->mEchoRefDataPeriodBufSize))) {
            wait_result = AL_WAIT_MS(client->mRawDataBufLock, MAX_PROCESS_DATA_WAIT_TIME_OUT_MS);
        }

        // copy data
        audio_pool_buf_copy_from_ringbuf(ul_in, raw_ul, client->mRawDataPeriodBufSize);
        aurisys_process_ul_only(manager,
                                ul_in,
                                ul_out,
                                ul_aec);

        // depop
        if (client->mDropPopSize > 0) {
            ALOGV("data_count %u, mDropPopSize %u, %dL", data_count, client->mDropPopSize, __LINE__);

            if (data_count >= client->mDropPopSize) {
                audio_ringbuf_drop_data(&ul_out->ringbuf, client->mDropPopSize);
                data_count -= client->mDropPopSize;
                client->mDropPopSize = 0;
            } else {
                audio_ringbuf_drop_data(&ul_out->ringbuf, data_count);
                client->mDropPopSize -= data_count;
                data_count = 0;
            }
        }

        // copy to processed buf and signal read()
        audio_ringbuf_copy_from_linear(processed, effect_buf, data_count);
    }
    pthread_exit(NULL);
    return NULL;
}

线程里面会不停循环的从client->mRawDataBuf这个ringbuf的里面copy数据出来,这个数据也就是录音数据,同时会通过aurisys ul过一道算法处理,然后通过之前在AudioALSACaptureDataClientAurisysNormal算出的mDropPopSize,丢弃一段数据避免录音杂音。

那么问题来了,为什么ringbuf里面会有录音数据呢?是谁提供的呢?

这就是之前说的:DataProvider是AudioALSACaptureDataProviderDspRaw

之前在AudioALSACaptureDataClientAurisysNormal::AudioALSACaptureDataClientAurisysNormal中有调用:mCaptureDataProvider->attach(this);
mCaptureDataProvider也就是传入的AudioALSACaptureDataProviderDspRaw!

void AudioALSACaptureDataProviderBase::attach(IAudioALSACaptureDataClient *pCaptureDataClient) {
    mCaptureDataClientVector.add(pCaptureDataClient->getIdentity(), pCaptureDataClient);
    size = (uint32_t)mCaptureDataClientVector.size();
    // open pcm interface when 1st attach
    if (size == 1) {
        mOpenIndex++;
        open();
    } else {
        if (!hasLowLatencyCapture && pCaptureDataClient->IsLowLatencyCapture()) {
            // update HW interrupt rate by HW sample rate
            updateReadSize(getPeriodBufSize(pStreamAttr, UPLINK_NORMAL_LATENCY_MS) *
                           lowLatencyMs / UPLINK_NORMAL_LATENCY_MS);
            if (mCaptureDataProviderType != CAPTURE_PROVIDER_DSP) {
                mHardwareResourceManager->setULInterruptRate(mStreamAttributeSource.sample_rate *
                                                             lowLatencyMs / 1000);
            } else if (isAdspOptionEnable()) {
                AudioDspStreamManager::getInstance()->UpdateCaptureDspLatency();
            }
        }
        enablePmicInputDevice(true);
    }
}

第一次录音时,size必定等于1,所以会进入open函数:

status_t AudioALSACaptureDataProviderDspRaw::open() {
    unsigned int feature_id = CAPTURE_RAW_FEATURE_ID;
	//发送给DSP需要录音原数据
    mAudioMessengerIPI->registerAdspFeature(feature_id);
        if (AudioALSAHardwareResourceManager::getInstance()->getNumPhoneMicSupport() > 2 && mStreamAttributeSource.input_device != AUDIO_DEVICE_IN_WIRED_HEADSET) {
            mApTurnOnSequence = AUDIO_CTL_ADDA_TO_CAPTURE1_4CH;
        } else {
            mApTurnOnSequence = AUDIO_CTL_ADDA_TO_CAPTURE1;
        }
	    //打开对应通路控件
        AudioALSADeviceConfigManager::getInstance()->ApplyDeviceTurnonSequenceByName(mApTurnOnSequence);
		//配置mStreamAttributeSource参数
        /* Reset frames readed counter */
        mStreamAttributeSource.Time_Info.total_frames_readed = 0;
        mStreamAttributeSource.sample_rate = getInputSampleRate(mStreamAttributeSource.input_device,
                                                                mStreamAttributeSource.output_devices);
        mStreamAttributeSource.audio_format = AUDIO_FORMAT_PCM_8_24_BIT;
        if (mStreamAttributeSource.input_device == AUDIO_DEVICE_IN_WIRED_HEADSET ||
            mStreamAttributeSource.input_source == AUDIO_SOURCE_UNPROCESSED){
            mStreamAttributeSource.num_channels = 1;
        } else {
            mStreamAttributeSource.num_channels = 2;
        }
        mStreamAttributeSource.latency = mlatency;
        //配置mConfig
        setApHwPcm();
		//DMA传输完成之后的回调函数:processDmaMsgWrapper
        mAudioMessengerIPI->registerDmaCbk(
            TASK_SCENE_CAPTURE_RAW,
            0x2000,
            0xF000,
            processDmaMsgWrapper,
            this);

        mAudioALSAVolumeController->SetCaptureGain(mStreamAttributeSource.audio_mode,
                                                   mStreamAttributeSource.input_source,
                                                   mStreamAttributeSource.input_device,
                                                   mStreamAttributeSource.output_devices);

        openApHwPcm();// pcmOpen
        AudioDspStreamManager::getInstance()->addCaptureDataProvider(this);//pcm_prepare pcm_start

    int ret = pthread_create(&hReadThread, NULL, AudioALSACaptureDataProviderDspRaw::readThread, (void *)this);
}

open里面,主要就是操作到底层里面pcm_open和pcm_start,开始录音,然后就是最重要的readThread线程了!

void *AudioALSACaptureDataProviderDspRaw::readThread(void *arg) {
    pDataProvider->waitPcmStart();

    // read raw data from alsa driver
    char linear_buffer[kReadBufferSizeNormal];
    while (pDataProvider->mEnable == true) {
        //从底层读取录音数据,放到linear_buffer
        ret = pDataProvider->pcmRead(pDataProvider->mPcm, linear_buffer, kReadBufferSize);
        // use ringbuf format to save buffer info
        pDataProvider->mPcmReadBuf.pBufBase = linear_buffer;
        pDataProvider->mPcmReadBuf.bufLen   = Read_Size + 1; // +1: avoid pRead == pWrite
        pDataProvider->mPcmReadBuf.pRead    = linear_buffer;
        pDataProvider->mPcmReadBuf.pWrite   = linear_buffer + Read_Size;
        pDataProvider->provideCaptureDataToAllClients(open_index);
    }

    pthread_exit(NULL);
    return NULL;
}

重点来了,这里就是在线程中不断的从alsa底层中读取到录音数据到linear_buffer中,然后放到pDataProvider->mPcmReadBuf.pBufBase中,提供给所有clien,也就是我们AudioALSACaptureDataClientAurisysNormal。

void AudioALSACaptureDataProviderBase::provideCaptureDataToAllClients(const uint32_t open_index) {
    for (size_t i = 0; i < mCaptureDataClientVector.size(); i++) {
        pCaptureDataClient = mCaptureDataClientVector[i];
        pCaptureDataClient->copyCaptureDataToClient(mPcmReadBuf);
    }
}

类似于广播一样的机制,查询所有ClientVector,并且将录音数据提供出去。

为什么MTK要设计成这样子呢?

这样有个好处,就是使得即使有多个AudioStreamIn,但每个AudioStreamIn实例client中有各自的环形Readbuffer,从硬件来的数据会扔到各自的环形Readbuffer中,从而互不影响。

uint32_t AudioALSACaptureDataClientAurisysNormal::copyCaptureDataToClient(RingBuf pcm_read_buf) {
    pcm_read_buf_wrap.base = pcm_read_buf.pBufBase;
    pcm_read_buf_wrap.read = pcm_read_buf.pRead;
    pcm_read_buf_wrap.write = pcm_read_buf.pWrite;
    pcm_read_buf_wrap.size = pcm_read_buf.bufLen;
            audio_ringbuf_copy_from_ringbuf_all(&mRawDataBuf, &pcm_read_buf_wrap);
}

最后就是将数据填装到mRawDataBuf了,这样就可以在AudioALSACaptureDataClientAurisysNormal::processThread中拿到录音数据了。

总的来说,录音流程还是比较简单的,核心就是AudioALSACaptureDataProviderDspRaw::readThread负责生产数据,AudioALSACaptureDataClientAurisysNormal::processThread负责消耗数据,一个生产者-消费者模型。

这段代码用于将指定 URI 的图片进行压缩,并将压缩后的图片数据转化为 Base64 编码的字符串。完整代码如下: ```java try { // 获取图片输入流 InputStream inputStream = getContentResolver().openInputStream(imageUri); // 设置 BitmapFactory.Options 对象 BitmapFactory.Options options = new BitmapFactory.Options(); options.inJustDecodeBounds = true; // 只解码图片边界,不加载图片 BitmapFactory.decodeStream(inputStream, null, options); inputStream.close(); // 计算采样率 final int maxWidth = 1024; // 假设最大宽度为 1024 像素 int width = options.outWidth; int height = options.outHeight; int sampleSize = 1; while (width > maxWidth) { width /= 2; height /= 2; sampleSize *= 2; } // 设置 BitmapFactory.Options 对象 options = new BitmapFactory.Options(); options.inSampleSize = sampleSize; // 设置采样率 inputStream = getContentResolver().openInputStream(imageUri); Bitmap bitmap = BitmapFactory.decodeStream(inputStream, null, options); // 压缩图片 ByteArrayOutputStream baos = new ByteArrayOutputStream(); bitmap.compress(Bitmap.CompressFormat.JPEG, 80, baos); byte[] bytes = baos.toByteArray(); String imageString = Base64.encodeToString(bytes, Base64.DEFAULT); inputStream.close(); } catch (Exception e) { e.printStackTrace(); } ``` 该代码主要分为以下几个步骤: 1. 使用 `getContentResolver().openInputStream(imageUri)` 方法获取指定 URI 的图片的输入流。 2. 设置 `BitmapFactory.Options` 对象,并调用 `BitmapFactory.decodeStream` 方法进行图片解码,以获取图片的宽度和高度。 3. 根据图片的宽度和高度计算采样率,以便后续进行图片压缩。 4. 重新设置 `BitmapFactory.Options` 对象,设置采样率,并调用 `BitmapFactory.decodeStream` 方法进行图片解码,以获取压缩后的 Bitmap 对象。 5. 将 Bitmap 对象进行压缩,并将压缩后的图片数据转化为 Base64 编码的字符串。
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值