1 AudioRecord测试程序 核心源码分析
上一章节AudioRecord测试程序的 核心逻辑 整理如下:
//step1 创建AudioRecord
pAudioRecord = new android::AudioRecord();
//step2 set操作
pAudioRecord->set( inputSource, sampleRateInHz,audioFormat, \
channelConfig,0,NULL, NULL,0,true,0);
//step3 启动对应的线程
pAudioRecord->start()
//step4 循环读取音频数据
while (!g_bQuitAudioRecordThread)
{
//读取数据
pAudioRecord->read(inBuffer, bufferSizeInBytes);
//存储操作
//...
}
//step5 停止对应线程,相对较为简单,忽略分析
pAudioRecord->stop()
这里主要对前面四个步骤进行详细点的分析:创建,set操作,start操作,read操作
1.1 AudioRecord创建
AudioRecord的构造器代码实现如下:
AudioRecord::AudioRecord()
: mStatus(NO_INIT), mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT)
{
}
AudioRecord::AudioRecord(
audio_source_t inputSource,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
callback_t cbf,
void* user,
uint32_t notificationFrames,
int sessionId,
transfer_type transferType,
audio_input_flags_t flags,
const audio_attributes_t* pAttributes)
: mStatus(NO_INIT), mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mProxy(NULL)
{
mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
pAttributes);
}
注意,这里的构造器有两类,一类是初始化部分参数,后面再调用set方法设置其他的参数。另一类是直接设置好所有的参数,即在内部直接调用set操作,这和 AudioTrack是极其类似的。
1.2 pAudioRecord->set操作
set的代码实现如下:
status_t AudioRecord::set(
audio_source_t inputSource,
//...
const audio_attributes_t* pAttributes)
{
//...
if (sessionId == AUDIO_SESSION_ALLOCATE) {
mSessionId = AudioSystem::newAudioUniqueId();
} else {
mSessionId = sessionId;
}
ALOGV("set(): mSessionId %d", mSessionId);
mFlags = flags;
mCbf = cbf;
if (cbf != NULL) {
mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
}
// create the IAudioRecord
status_t status = openRecord_l(0 /*epoch*/);
//...
return NO_ERROR;
}
这里专注分析openRecord_l实现,代码如下:
status_t AudioRecord::openRecord_l(size_t epoch)
{
status_t status;
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
if (audioFlinger == 0) {
ALOGE("Could not get audioflinger");
return NO_INIT;
}
//...
audio_io_handle_t input;
//关键点1 getInput操作
status = AudioSystem::getInputForAttr(&mAttributes, &input, (audio_session_t)mSessionId,
mSampleRate, mFormat, mChannelMask, mFlags);
//...
sp<IMemory> iMem; // for cblk
sp<IMemory> bufferMem;
//关键点2:AudioFlinger的openRecord操作
sp<IAudioRecord> record = audioFlinger->openRecord(input,
mSampleRate, mFormat,
mChannelMask,
&temp,
&trackFlags,
tid,
&mSessionId,
¬ificationFrames,
iMem,
bufferMem,
&status);
//...
// update proxy 共享内存相关
mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mFrameSize);
mProxy->setEpoch(epoch);
mProxy->setMinimum(mNotificationFramesAct);
//...
return NO_ERROR;
}
release:
AudioSystem::releaseInput(input, (audio_session_t)mSessionId);
if (status == NO_ERROR) {
status = NO_INIT;
}
return status;
}
@1 AudioSystem的getInputForAttr代码实现如下:
status_t AudioSystem::getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
audio_session_t session,
uint32_t samplingRate,
audio_format_t format,
audio_channel_mask_t channelMask,
audio_input_flags_t flags)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return NO_INIT;
return aps->getInputForAttr(attr, input, session, samplingRate, format, channelMask, flags);
}
继续分析AudioPolicyManager的getInputForAttr方法,代码实现如下:
status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr,
//...
input_type_t *inputType)
{
*input = AUDIO_IO_HANDLE_NONE;
*inputType = API_INPUT_INVALID;
audio_devices_t device;
// handle legacy remote submix case where the address was not always specified
String8 address = String8("");
bool isSoundTrigger = false;
audio_source_t inputSource = attr->source;
audio_source_t halInputSource;
AudioMix *policyMix = NULL;
if (inputSource == AUDIO_SOURCE_DEFAULT) {
inputSource = AUDIO_SOURCE_MIC;
}
halInputSource = inputSource;
if (inputSource == AUDIO_SOURCE_REMOTE_SUBMIX &&
strncmp(attr->tags, "addr=", strlen("addr=")) == 0) {
//...
} else {
//根据APP端传入的音源找到对应的device。
device = getDeviceAndMixForInputSource(inputSource, &policyMix);
//...
if (inputSource == AUDIO_SOURCE_HOTWORD) {
ssize_t index = mSoundTriggerSessions.indexOfKey(session);
if (index >= 0) {
*input = mSoundTriggerSessions.valueFor(session);
isSoundTrigger = true;
flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_HW_HOTWORD);
ALOGV("SoundTrigger capture on session %d input %d", session, *input);
} else {
halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
}
}
//根据device找到对应的IOProfile
sp<IOProfile> profile = getInputProfile(device, address,
samplingRate, format, channelMask,
flags);
//...
//根据profile找到对应的Module(so文件),对应一个声卡
//这里调用AudioFlinger的openInput方法
status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
input,
&config,
&device,
address,
halInputSource,
flags);
//...
//根据profile创建一个inputDesc
sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile);
//inputDesc初始化...
//将input索引和inputDesc绑定
addInput(*input, inputDesc);
mpClientInterface->onAudioPortListUpdate();
return NO_ERROR;
}
这里继续分析AudioFlinger的openInput方法,代码实现如下:
status_t AudioFlinger::openInput(audio_module_handle_t module,
//...
audio_input_flags_t flags)
{
Mutex::Autolock _l(mLock);
//...
sp<RecordThread> thread = openInput_l(module, input, config, *device, address, source, flags);
//...
return NO_INIT;
}
继续分析openInput_l的实现,代码如下:
sp<AudioFlinger::RecordThread> AudioFlinger::openInput_l(audio_module_handle_t module,
audio_io_handle_t *input,
audio_config_t *config,
audio_devices_t device,
const String8& address,
audio_source_t source,
audio_input_flags_t flags)
{
AudioHwDevice *inHwDev = findSuitableHwDev_l(module, device);
//...
if (*input == AUDIO_IO_HANDLE_NONE) {
*input = nextUniqueId();
}
audio_config_t halconfig = *config;
audio_hw_device_t *inHwHal = inHwDev->hwDevice();
audio_stream_in_t *inStream = NULL;
status_t status = inHwHal->open_input_stream(inHwHal, *input, device, &halconfig,
&inStream, flags, address.string(), source);
// If the input could not be opened with the requested parameters and we can handle the
// conversion internally, try to open again with the proposed parameters. The AudioFlinger can
// resample the input and do mono to stereo or stereo to mono conversions on 16 bit PCM inputs.
if (status == BAD_VALUE &&
config->format == halconfig.format && halconfig.format == AUDIO_FORMAT_PCM_16_BIT &&
(halconfig.sample_rate <= 2 * config->sample_rate) &&
(audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_2) &&
(audio_channel_count_from_in_mask(config->channel_mask) <= FCC_2)) {
inStream = NULL;
status = inHwHal->open_input_stream(inHwHal, *input, device, &halconfig,
&inStream, flags, address.string(), source);
// FIXME log this new status; HAL should not propose any further changes
}
if (status == NO_ERROR && inStream != NULL) {
AudioStreamIn *inputStream = new AudioStreamIn(inHwDev, inStream);
//创建RecordThread录音线程
sp<RecordThread> thread = new RecordThread(this,
inputStream,
*input,
primaryOutputDevice_l(),
device
);
mRecordThreads.add(*input, thread);
return thread;
}
*input = AUDIO_IO_HANDLE_NONE;
return 0;
}
这里最终会创一个RecordThread线程,绑定对应的input索引和device。@2 audioFlinger->openRecord 代码实现如下:
sp<IAudioRecord> AudioFlinger::openRecord(
audio_io_handle_t input,
//...
status_t *status)
{
sp<RecordThread::RecordTrack> recordTrack;
sp<RecordHandle> recordHandle;
sp<Client> client;
status_t lStatus;
int lSessionId;
//...
{
Mutex::Autolock _l(mLock);
RecordThread *thread = checkRecordThread_l(input);
//...
// TODO: the uid should be passed in as a parameter to openRecord
recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
frameCount, lSessionId, notificationFrames,
IPCThreadState::self()->getCallingUid(),
flags, tid, &lStatus);
//...
}
//...
cblk = recordTrack->getCblk();//获取共享内存头
buffers = recordTrack->getBuffers();//获取共享内存Buffer
// return handle to client
recordHandle = new RecordHandle(recordTrack);
Exit:
*status = lStatus;
return recordHandle;
}
继续分析createRecordTrack_l的实现,代码如下:
// RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held
sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
const sp<AudioFlinger::Client>& client,
//...
status_t *status)
{
size_t frameCount = *pFrameCount;
sp<RecordTrack> track;
status_t lStatus;
//...
lStatus = initCheck();
//...
{ // scope for mLock
Mutex::Autolock _l(mLock);
track = new RecordTrack(this, client, sampleRate,
format, channelMask, frameCount, NULL, sessionId, uid,
*flags, TrackBase::TYPE_DEFAULT);
lStatus = track->initCheck();
//...
mTracks.add(track);
//...
}
lStatus = NO_ERROR;
Exit:
*status = lStatus;
return track;
}
这里最关注的一个环节就是 通过 RecordThread 会创建recordTrack,类似于playbackThread创建自己的Track。
1.3 pAudioRecord->start()操作
AudioRecord::start的代码实现如下:
status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession)
{
//...
// reset current position as seen by client to 0
mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
mRefreshRemaining = true;
mNewPosition = mProxy->getPosition() + mUpdatePeriod;
int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);
status_t status = NO_ERROR;
if (!(flags & CBLK_INVALID)) {
ALOGV("mAudioRecord->start()");
status = mAudioRecord->start(event, triggerSession);
if (status == DEAD_OBJECT) {
flags |= CBLK_INVALID;
}
}
//...
return status;
}
这里关注mAudioRecord->start的实现(mAudioRecord是audioFlinger->openRecord返回得到的 RecordHandle 类型,RecordHandle使用recordTrack初始化),实际上 调用的是AudioFlinger::RecordThread::RecordTrack::start方法,代码实现如下:
status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
int triggerSession)
{
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
RecordThread *recordThread = (RecordThread *)thread.get();
return recordThread->start(this, event, triggerSession);
} else {
return BAD_VALUE;
}
}
这里调用到了recordThread的start方法,代码如下:
status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack,
AudioSystem::sync_event_t event,
int triggerSession)
{
ALOGV("RecordThread::start event %d, triggerSession %d", event, triggerSession);
sp<ThreadBase> strongMe = this;
status_t status = NO_ERROR;
{
//...
recordTrack->mState = TrackBase::STARTING_1;
mActiveTracks.add(recordTrack);
mActiveTracksGen++;
status_t status = NO_ERROR;
if (recordTrack->isExternalTrack()) {
mLock.unlock();
status = AudioSystem::startInput(mId, (audio_session_t)recordTrack->sessionId());
mLock.lock();
// FIXME should verify that recordTrack is still in mActiveTracks
if (status != NO_ERROR) {
mActiveTracks.remove(recordTrack);
mActiveTracksGen++;
recordTrack->clearSyncStartEvent();
ALOGV("RecordThread::start error %d", status);
return status;
}
}
//...
recordTrack->mState = TrackBase::STARTING_2;
// signal thread to start
mWaitWorkCV.broadcast();//唤醒线程执行
//...
return status;
}
//...
}
这里后面会唤醒线程,线程的主要内容是从HAL中得到数据, 再通过内部的RecordTrack把数据传给APP的AudioRecord。因此这里关注 AudioSystem::startInput的实现。AudioSystem::startInput的代码实现如下:
status_t AudioSystem::startInput(audio_io_handle_t input,
audio_session_t session)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
return aps->startInput(input, session);
}
继续分析AudioPolicyManager的startInput实现,代码如下:
status_t AudioPolicyManager::startInput(audio_io_handle_t input,
audio_session_t session)
{
ssize_t index = mInputs.indexOfKey(input);
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
index = inputDesc->mSessions.indexOf(session);
//...
// virtual input devices are compatible with other input devices
if (!isVirtualInputDevice(inputDesc->mDevice)) {
// for a non-virtual input device, check if there is another (non-virtual) active input
audio_io_handle_t activeInput = getActiveInput();//获取活跃的input,有APP在录音
/*关键逻辑:
*当前正在录音 / 想启动录音的input,如果不是同一个,则返回error
*也正是因为如此,Android的APP不可以同时录音
*/
if (activeInput != 0 && activeInput != input) {
sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
if (activeDesc->mInputSource == AUDIO_SOURCE_HOTWORD) {
stopInput(activeInput, activeDesc->mSessions.itemAt(0));
releaseInput(activeInput, activeDesc->mSessions.itemAt(0));
} else {
return INVALID_OPERATION;
}
}
}
//...
inputDesc->mRefCount++;
return NO_ERROR;
}
在start的过程中启动了线程RecordThread。同时我们也发现Android的APP不可以同时录音。
1.4 pAudioRecord->read操作
AudioRecord的read方法,代码实现如下:
ssize_t AudioRecord::read(void* buffer, size_t userSize)
{
if (mTransfer != TRANSFER_SYNC) {
return INVALID_OPERATION;
}
if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
// sanity-check. user is most-likely passing an error code, and it would
// make the return value ambiguous (actualSize vs error).
ALOGE("AudioRecord::read(buffer=%p, size=%zu (%zu)", buffer, userSize, userSize);
return BAD_VALUE;
}
ssize_t read = 0;
Buffer audioBuffer;
while (userSize >= mFrameSize) {
audioBuffer.frameCount = userSize / mFrameSize;
status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
if (err < 0) {
if (read > 0) {
break;
}
return ssize_t(err);
}
size_t bytesRead = audioBuffer.size;
memcpy(buffer, audioBuffer.i8, bytesRead);
buffer = ((char *) buffer) + bytesRead;
userSize -= bytesRead;
read += bytesRead;
releaseBuffer(&audioBuffer);
}
return read;
}
这里也是使用obtainBuffer和releaseBuffer来操作共享内存,从audioBuffer中读取数据到Buffer中。
2 AudioRecord框架简要说明
@1 AudioRecord框架流程
APP创建、设置AudioRecord, 指定了声音来源inputSource( 比如: AUDIO_SOURCE_MIC),还指定了采样率、通道数、格式等参数。
AudioPolicyManager根据inputSource等参数确定录音设备: device,根据device找到profile(audio_policy.conf产生的)
根据profile找到module,即对应一个声卡,然后加载对应声卡的HAL文件。
AudioFlinger创建一个RecordThread, 以后该线程将从上述device读取声音数据
在RecordThread内部为APP的AudioRecord创建一个对应的RecordTrack,APP的AudioRecord 与 RecordThread内部的RecordTrack 通过共享内存传递数据
RecordThread从HAL中得到数据(调用HAL文件中的openInput()来打开一个输入通道), 再通过内部的RecordTrack把数据传给APP的AudioRecord
@2 为什么Android 不可以实现 多个APP同时录音?
在原生代码中,APP的一个AudioRecord会导致创建一个RecordThread,在一个device上有可能存在多个RecordThread,任意时刻只能有一个RecordThread在运行,所以只能有一个APP在录音,不能多个APP同时录音
3 多APP同时录音 解决方案
3.1 解决方案原理
APP创建的AudioRecord会导致创建一个RecordThread,在一个device上有可能存在多个RecordThread,任意时刻只能有一个RecordThread在运行,所以只能有一个APP在录音,不能多个APP同时录音。
实际上在AudioFlinger层面是支持多APP同时录音的,一个RecordThread中可以有多个RecordTrack。只是在AudioPolicyManager(策略层)这一层 做了限制(限制 线程的数量,只能有一个),只要我们保证在一个device上只存在一个RecordThread(这里也参考了PlaybackThread的方式),当多个APP访问时,依然访问同一个线程,只是用到不同的RecordTrack即可(这套机制是完善的,可以直接使用,不需要修改代码),那么就可以实现 多APP同时录音。
3.2 解决方案实施
修改AudioPolicyManager.cpp,如下所示(+部分为新添加的代码):
status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
audio_session_t session,
uint32_t samplingRate,
audio_format_t format,
audio_channel_mask_t channelMask,
audio_input_flags_t flags,
input_type_t *inputType)
{
//...
config.channel_mask = channelMask;
config.format = format;
+ /* check wether have an AudioInputDescriptor use the same profile */
+ for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
+ sp<AudioInputDescriptor> desc;
+ desc = mInputs.valueAt(input_index);
+ if (desc->mProfile == profile) {
+ desc->mOpenRefCount++; // 引用计数+1
+ desc->mSessions.add(session); // session
+ return desc->mIoHandle;
+ }
+ }
+
status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
input,
&config,
&device,
address,
halInputSource,
flags);
//...
}