status_t AudioALSACaptureHandlerAEC::open()
{
if (mStreamAttributeTarget->input_device == AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)
{
// open BT data provider
if (WCNChipController::GetInstance()->IsBTMergeInterfaceSupported() == true)
{
mCaptureDataClient = new AudioALSACaptureDataClient(AudioALSACaptureDataProviderBTSCO::getInstance(),
mStreamAttributeTarget);
} else
{
mCaptureDataClient = new AudioALSACaptureDataClient(AudioALSACaptureDataProviderBTCVSD::getInstance(),
mStreamAttributeTarget);
}
} else //创建输入设备的数据读取客户端
{
mCaptureDataClient = new AudioALSACaptureDataClient(AudioALSACaptureDataProviderNormal::getInstance(),
mStreamAttributeTarget);
}
//打开输出设备反馈信号,用来进行回音消除
if (mCaptureDataClient != NULL)
{
memcpy(&mStreamAttributeTargetEchoRef, mStreamAttributeTarget, sizeof(stream_attribute_t));
if (mStreamAttributeTarget->input_device == AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)
{
//open BT echoref data provider
mCaptureDataClient->AddEchoRefDataProvider(AudioALSACaptureDataProviderEchoRefBTSCO::getInstance(),
&mStreamAttributeTargetEchoRef);
} else {
if (mStreamAttributeTarget->output_devices == AUDIO_DEVICE_OUT_SPEAKER &&
AudioSmartPaController::getInstance()->isEchoReferenceSupport())//扬声器输出的回音消除
{
mCaptureDataClient->AddEchoRefDataProvider(AudioALSACaptureDataProviderEchoRefExt::getInstance(),
&mStreamAttributeTargetEchoRef);
} else //听筒输出的回音消除
{
mCaptureDataClient->AddEchoRefDataProvider(AudioALSACaptureDataProviderEchoRef::getInstance(),
&mStreamAttributeTargetEchoRef);
}
}
}
if (mStreamAttributeTarget->input_device != AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)
{
//no need to config analog part while BT case
mHardwareResourceManager->startInputDevice(mStreamAttributeTarget->input_device);
}
return NO_ERROR;
}
打开输出设备反馈信号,用来进行回音消除
void AudioALSACaptureDataClient::AddEchoRefDataProvider(AudioALSACaptureDataProviderBase *pCaptureDataProvider,
stream_attribute_t *stream_attribute_target)
{
mStreamAttributeTargetEchoRef = stream_attribute_target;
mCaptureDataProviderEchoRef = pCaptureDataProvider;
mStreamAttributeSourceEchoRef = mCaptureDataProviderEchoRef->getStreamAttributeSource();
// fix the channel count of echo reference data to stereo since native echo_reference_itfe supports stereo only
mStreamAttributeTargetEchoRef->num_channels = 2;
mStreamAttributeTargetEchoRef->audio_channel_mask = AUDIO_CHANNEL_IN_STEREO;
//check SRC needed and created
// raw data
memset((void *)&mEchoRefRawDataBuf, 0, sizeof(mEchoRefRawDataBuf));
mEchoRefRawDataBuf.pBufBase = new char[kClientBufferSize];
mEchoRefRawDataBuf.bufLen = kClientBufferSize;
mEchoRefRawDataBuf.pRead = mEchoRefRawDataBuf.pBufBase;
mEchoRefRawDataBuf.pWrite = mEchoRefRawDataBuf.pBufBase;
// src data
memset((void *)&mEchoRefSrcDataBuf, 0, sizeof(mEchoRefSrcDataBuf));
mEchoRefSrcDataBuf.pBufBase = new char[kClientBufferSize];
mEchoRefSrcDataBuf.bufLen = kClientBufferSize;
mEchoRefSrcDataBuf.pRead = mEchoRefSrcDataBuf.pBufBase;
mEchoRefSrcDataBuf.pWrite = mEchoRefSrcDataBuf.pBufBase;
ASSERT(mEchoRefSrcDataBuf.pBufBase != NULL);
// attach client to capture EchoRef data provider
mCaptureDataProviderEchoRef->attach(this); // mStreamAttributeSource will be updated when first client attached
//assume starts after PCM open
mSPELayer->SetOutputStreamRunning(true, true);
mSPELayer->SetEchoRefStartTime(GetSystemTime(false));
mSPELayer->SetDownLinkLatencyTime(mStreamAttributeSourceEchoRef->latency);
// init SRC, this SRC is for Android Native.
if (mStreamAttributeSourceEchoRef->sample_rate != mStreamAttributeTargetEchoRef->sample_rate ||
mStreamAttributeSourceEchoRef->num_channels != mStreamAttributeTargetEchoRef->num_channels ||
mStreamAttributeSourceEchoRef->audio_format != mStreamAttributeTargetEchoRef->audio_format)
{
mBliSrcEchoRef = newMtkAudioSrc(
mStreamAttributeSourceEchoRef->sample_rate, mStreamAttributeSourceEchoRef->num_channels,
mStreamAttributeTargetEchoRef->sample_rate, mStreamAttributeTargetEchoRef->num_channels,
SRC_IN_Q1P15_OUT_Q1P15); // TODO(Harvey, Ship): 24bit
mBliSrcEchoRef->open();
}
// init SRC, this SRC is for MTK VoIP
if ((mStreamAttributeTargetEchoRef->sample_rate != 16000) || (mStreamAttributeTargetEchoRef->num_channels != 1))
{
mBliSrcEchoRefBesRecord = newMtkAudioSrc(
mStreamAttributeTargetEchoRef->sample_rate, mStreamAttributeTargetEchoRef->num_channels,
16000, 1,
SRC_IN_Q1P15_OUT_Q1P15);
mBliSrcEchoRefBesRecord->open();
}
}
void AudioALSACaptureDataProviderBase::attach(AudioALSACaptureDataClient *pCaptureDataClient)
{
pCaptureDataClient->setIdentity(mCaptureDataClientIndex);
mCaptureDataClientVector.add(pCaptureDataClient->getIdentity(), pCaptureDataClient);
mCaptureDataClientIndex++;
// open pcm interface when 1st attach
if (mCaptureDataClientVector.size() == 1)
{
mOpenIndex++;
open();
}
}
status_t AudioALSACaptureDataProviderEchoRefExt::open()
{
AudioALSASampleRateController *pAudioALSASampleRateController = AudioALSASampleRateController::getInstance();
pAudioALSASampleRateController->setScenarioStatus(PLAYBACK_SCENARIO_ECHO_REF_EXT);
// config attribute (will used in client SRC/Enh/... later) // TODO(Sam): query the mConfig?
mStreamAttributeSource.audio_format = AUDIO_FORMAT_PCM_16_BIT;
mStreamAttributeSource.audio_channel_mask = AUDIO_CHANNEL_IN_STEREO;
mStreamAttributeSource.num_channels =
android_audio_legacy::AudioSystem::popCount(mStreamAttributeSource.audio_channel_mask);
mStreamAttributeSource.sample_rate =
AudioALSASampleRateController::getInstance()->getPrimaryStreamOutSampleRate();
// Reset frames readed counter
mStreamAttributeSource.Time_Info.total_frames_readed = 0;
uint32_t latency = getLatencyTime();
mConfig.rate = mStreamAttributeSource.sample_rate;
mConfig.channels = mStreamAttributeSource.num_channels;
mConfig.format = PCM_FORMAT_S16_LE;
kReadBufferSize = (((uint32_t)((mStreamAttributeSource.sample_rate / 1000) * latency * mConfig.channels *
(pcm_format_to_bits(mConfig.format) / 8))) & 0xFFFFFFC0); // (DL1)44.1K\20ms data\stereo\2byte\(Align64byte)
mConfig.period_size = kReadBufferSize / mConfig.channels / (pcm_format_to_bits(mConfig.format) / 8);
mConfig.period_count = 2;
if (latency == UPLINK_LOW_LATENCY_MS)
{
mConfig.period_count = 8; // 2*(20ms/5ms);
}
mConfig.start_threshold = 0;
mConfig.stop_threshold = 0;
mConfig.silence_threshold = 0;
//latency time, set as hardware buffer size
mStreamAttributeSource.latency = (mConfig.period_size * mConfig.period_count * 1000) / mConfig.rate;
OpenPCMDump(LOG_TAG);
// enable pcm
int pcmIdx = AudioALSADeviceParser::getInstance()->GetPcmIndexByString(keypcmI2SAwbCapture);
int cardIdx = AudioALSADeviceParser::getInstance()->GetCardIndexByString(keypcmI2SAwbCapture);
mPcm = pcm_open(cardIdx, pcmIdx, PCM_IN | PCM_MONOTONIC, &mConfig);
pcm_start(mPcm);
// create reading thread
mEnable = true;
int ret = pthread_create(&hReadThread, NULL,
AudioALSACaptureDataProviderEchoRefExt::readThread, (void *)this);
return NO_ERROR;
}
读取下行数据的线程
void *AudioALSACaptureDataProviderEchoRef::readThread(void *arg)
{
status_t retval = NO_ERROR;
AudioALSACaptureDataProviderEchoRef *pDataProvider = static_cast<AudioALSACaptureDataProviderEchoRef *>(arg);
uint32_t open_index = pDataProvider->mOpenIndex;
char nameset[32];
sprintf(nameset, "%s%d", __FUNCTION__, pDataProvider->mCaptureDataProviderType);
prctl(PR_SET_NAME, (unsigned long)nameset, 0, 0, 0);
// read raw data from alsa driver
char linear_buffer[kReadBufferSize];
while (pDataProvider->mEnable == true)
{
clock_gettime(CLOCK_REALTIME, &pDataProvider->mNewtime);
pDataProvider->timerec[0] = calc_time_diff(pDataProvider->mNewtime, pDataProvider->mOldtime);
pDataProvider->mOldtime = pDataProvider->mNewtime;
int retval = pcm_read(pDataProvider->mPcm, linear_buffer, kReadBufferSize);
clock_gettime(CLOCK_REALTIME, &pDataProvider->mNewtime);
pDataProvider->timerec[1] = calc_time_diff(pDataProvider->mNewtime, pDataProvider->mOldtime);
pDataProvider->mOldtime = pDataProvider->mNewtime;
pDataProvider->GetCaptureTimeStamp(&pDataProvider->mStreamAttributeSource.Time_Info, kReadBufferSize);
// use ringbuf format to save buffer info
pDataProvider->mPcmReadBuf.pBufBase = linear_buffer; //linear_buffer>>mPcmReadBuf
pDataProvider->mPcmReadBuf.bufLen = kReadBufferSize + 1; // +1: avoid pRead == pWrite
pDataProvider->mPcmReadBuf.pRead = linear_buffer;
pDataProvider->mPcmReadBuf.pWrite = linear_buffer + kReadBufferSize;
//Provide EchoRef data
pDataProvider->provideEchoRefCaptureDataToAllClients(open_index);
clock_gettime(CLOCK_REALTIME, &pDataProvider->mNewtime);
pDataProvider->timerec[2] = calc_time_diff(pDataProvider->mNewtime, pDataProvider->mOldtime);
pDataProvider->mOldtime = pDataProvider->mNewtime;
}
pthread_exit(NULL);
return NULL;
}
void AudioALSACaptureDataProviderBase::provideEchoRefCaptureDataToAllClients(const uint32_t open_index)
{
AudioALSACaptureDataClient *pCaptureDataClient = NULL;
WritePcmDumpData();
for (size_t i = 0; i < mCaptureDataClientVector.size(); i++)
{
pCaptureDataClient = mCaptureDataClientVector[i];
pCaptureDataClient->copyEchoRefCaptureDataToClient(mPcmReadBuf);
}
}
uint32_t AudioALSACaptureDataClient::copyEchoRefCaptureDataToClient(RingBuf pcm_read_buf)
{
uint32_t freeSpace = RingBuf_getFreeSpace(&mEchoRefRawDataBuf);
uint32_t dataSize = RingBuf_getDataCount(&pcm_read_buf);
//pcm_read_buf>>mEchoRefRawDataBuf
if (freeSpace < dataSize)
{
RingBuf_copyFromRingBuf(&mEchoRefRawDataBuf, &pcm_read_buf, freeSpace);
} else
{
RingBuf_copyFromRingBuf(&mEchoRefRawDataBuf, &pcm_read_buf, dataSize);
}
// SRC to to Native AEC need format (as StreaminTarget format since AWB data might be the same as DL1 before)
const uint32_t kNumRawData = RingBuf_getDataCount(&mEchoRefRawDataBuf);
uint32_t num_free_space = RingBuf_getFreeSpace(&mEchoRefSrcDataBuf);
if (mBliSrcEchoRef == NULL) // No need SRC, mEchoRefRawDataBuf>>mEchoRefSrcDataBuf
{
if (num_free_space < kNumRawData)
{
RingBuf_copyFromRingBuf(&mEchoRefSrcDataBuf, &mEchoRefRawDataBuf, num_free_space);
} else
{
RingBuf_copyFromRingBuf(&mEchoRefSrcDataBuf, mEchoRefRawDataBuf, kNumRawData);
}
} else // Need SRC, mEchoRefRawDataBuf>>pEchoRefRawDataLinearBuf
{
char *pEchoRefRawDataLinearBuf = new char[kNumRawData];
RingBuf_copyToLinear(pEchoRefRawDataLinearBuf, &mEchoRefRawDataBuf, kNumRawData);
char *pEchoRefSrcDataLinearBuf = new char[num_free_space];
char *p_read = pEchoRefRawDataLinearBuf;
uint32_t num_raw_data_left = kNumRawData;
uint32_t num_converted_data = num_free_space; // max convert num_free_space
uint32_t consumed = num_raw_data_left;
//pEchoRefRawDataLinearBuf>>pEchoRefSrcDataLinearBuf
mBliSrcEchoRef->process((int16_t *)p_read, &num_raw_data_left,
(int16_t *)pEchoRefSrcDataLinearBuf, &num_converted_data);
consumed -= num_raw_data_left;
p_read += consumed;
//pEchoRefSrcDataLinearBuf>>mEchoRefSrcDataBuf
RingBuf_copyFromLinear(&mEchoRefSrcDataBuf, pEchoRefSrcDataLinearBuf, num_converted_data);
}
//for Preprocess
const uint32_t kNumEchoRefSrcData = RingBuf_getDataCount(&mEchoRefSrcDataBuf);
char *pEchoRefProcessDataLinearBuf = new char[kNumEchoRefSrcData];
RingBuf_copyToLinear(pEchoRefProcessDataLinearBuf, &mEchoRefSrcDataBuf, kNumEchoRefSrcData);
#ifdef BOOST_ECHOREF
if((mStreamAttributeTarget->output_devices & AUDIO_DEVICE_OUT_SPEAKER))
{
for(int i=0; i<kNumEchoRefSrcData/2 ; i++) {
// over flow protection
int16_t temp = *((int16_t*)(pEchoRefProcessDataLinearBuf+(i*2)));
if(temp >8191)
temp = 8191;
else if(temp <-8192)
temp =-8192;
temp = temp <<2;
pEchoRefProcessDataLinearBuf[2*i]= (char)temp;
pEchoRefProcessDataLinearBuf[2*i+1]= (char)(temp>>8);
}
}
#endif
//here to queue the EchoRef data to Native effect, since it doesn't need to SRC here
if ((mAudioPreProcessEffect->num_preprocessors > 0)) //&& echoref is enabled
{
//copy pEchoRefProcessDataLinearBuf to native preprocess for echo ref
mAudioPreProcessEffect->WriteEchoRefData(pEchoRefProcessDataLinearBuf, kNumEchoRefSrcData,
&mStreamAttributeSourceEchoRef->Time_Info);
}
//If need MTK VoIP process
if ((mStreamAttributeTarget->BesRecord_Info.besrecord_enable) && !mBypassBesRecord)
{
struct InBufferInfo BufInfo;
//for MTK native SRC
if (mBliSrcEchoRefBesRecord == NULL) // No need SRC
{
BufInfo.pBufBase = (short *)pEchoRefProcessDataLinearBuf;
BufInfo.BufLen = kNumEchoRefSrcData;
BufInfo.time_stamp_queued = GetSystemTime(false);
BufInfo.bHasRemainInfo = true;
BufInfo.time_stamp_predict = GetEchoRefTimeStamp();
#ifdef SRC_DROP_DATA
if (mFirstEchoSRC == true)
{
mFirstEchoSRC = false;
delete[] pEchoRefProcessDataLinearBuf;
return 0;
}
#endif
mSPELayer->WriteReferenceBuffer(&BufInfo);
} else // Need SRC
{
char *pEchoRefProcessSRCDataLinearBuf = new char[kNumEchoRefSrcData];
char *p_read = pEchoRefProcessDataLinearBuf;
uint32_t num_raw_data_left = kNumEchoRefSrcData;
uint32_t num_converted_data = kNumEchoRefSrcData; // max convert num_free_space
uint32_t consumed = num_raw_data_left;
//pEchoRefProcessDataLinearBuf>>pEchoRefProcessSRCDataLinearBuf
mBliSrcEchoRefBesRec