Android audio 二 AudioRecord 分析上
Android audio 三 AudioRecord 分析下
Android audio 四 AudioTrack 分析上
Android audio 五 AudioTrack 分析下
Android audio 六 AudioRecord AudiTrack 拾音放音例子
frameworks/av/media/libmedia/AudioRecord.cpp
AudioRecord 是Android 拾音对象。
实例化 AudioRecord 对象后,启动录音的步骤:
- sp< AudioRecord> audiorecord = new AudioRecord(......)
- audiorecord->start();
- audiorecord->read(......)
- audiorecord->stop()
先来看看 AudioRecord 的构造函数和析构函数;
AudioRecord::AudioRecord
AudioRecord::AudioRecord(......) 调用了 status_t AudioRecord::set(......) 接下来分析 set(......) ;
// frameworks/av/media/libmedia/AudioRecord.cpp
AudioRecord::AudioRecord(const String16 &opPackageName)
: mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName), mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
}
AudioRecord::AudioRecord(
audio_source_t inputSource,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
const String16& opPackageName,
size_t frameCount,
callback_t cbf,
void* user,
uint32_t notificationFrames,
audio_session_t sessionId,
transfer_type transferType,
audio_input_flags_t flags,
int uid,
pid_t pid,
const audio_attributes_t* pAttributes)
: mActive(false),
mStatus(NO_INIT),
mOpPackageName(opPackageName),
mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mProxy(NULL),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
uid, pid, pAttributes);
}
AudioRecord::~AudioRecord()
{
if(mStatus == NO_ERROR)
{
// Make sure that callback function exits in the case where
// it is looping on buffer empty condition in obtainBuffer().
// Otherwise the callback thread will never exit.
stop();
if(mAudioRecordThread != 0)
{
mProxy->interrupt();
mAudioRecordThread->requestExit(); // see comment in AudioRecord.h
mAudioRecordThread->requestExitAndWait();
mAudioRecordThread.clear();
}
// No lock here: worst case we remove a NULL callback which will be a nop
if(mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE)
{
AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
}
IInterface::asBinder(mAudioRecord)->unlinkToDeath(mDeathNotifier, this);
mAudioRecord.clear();
mCblkMemory.clear();
mBufferMemory.clear();
IPCThreadState::self()->flushCommands();
ALOGV("~AudioRecord, releasing session id %d",
mSessionId);
AudioSystem::releaseAudioSessionId(mSessionId, -1 /*pid*/);
}
}
status_t AudioRecord::set(......)
配置 AudioRecord 的 声道 采样率 音频帧长度 读取音频设备的数据的方式
重点如下:
// frameworks/av/media/libmedia/AudioRecord.cpp
status_t AudioRecord::set(......)
{
......
// 实例化 AudioRecordThread
if(cbf != NULL)
{
mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
// thread begins in paused state, and will not reference us until start()
}
// create the IAudioRecord
// 获取系统服务 AudioFlinger ,并调用 AudioFlinger->openRecord ,重点
status_t status = openRecord_l(0 /*epoch*/, mOpPackageName);
......
}
AudioRecord::set 源码如下:
// frameworks/av/media/libmedia/AudioRecord.cpp
status_t AudioRecord::set(
audio_source_t inputSource,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
callback_t cbf,
void* user,
uint32_t notificationFrames,
bool threadCanCallJava,
audio_session_t sessionId,
transfer_type transferType,
audio_input_flags_t flags,
int uid,
pid_t pid,
const audio_attributes_t* pAttributes)
{
ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"notificationFrames %u, sessionId %d, transferType %d, flags %#x, opPackageName %s "
"uid %d, pid %d",
inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
sessionId, transferType, flags, String8(mOpPackageName).string(), uid, pid);
// 选择数据传输方式
switch(transferType)
{
case TRANSFER_DEFAULT:
if(cbf == NULL || threadCanCallJava)
{
transferType = TRANSFER_SYNC;
}
else
{
transferType = TRANSFER_CALLBACK;
}
break;
case TRANSFER_CALLBACK:
if(cbf == NULL)
{
ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
return BAD_VALUE;
}
break;
case TRANSFER_OBTAIN:
case TRANSFER_SYNC:
break;
default:
ALOGE("Invalid transfer type %d", transferType);
return BAD_VALUE;
}
mTransfer = transferType;
// invariant that mAudioRecord != 0 is true only after set() returns successfully
if(mAudioRecord != 0)
{
ALOGE("Track already in use");
return INVALID_OPERATION;
}
if(pAttributes == NULL)
{
memset(&mAttributes, 0, sizeof(audio_attributes_t));
mAttributes.source = inputSource;
}
else
{
// stream type shouldn't be looked at, this track has audio attributes
memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
ALOGV("Building AudioRecord with attributes: source=%d flags=0x%x tags=[%s]",
mAttributes.source, mAttributes.flags, mAttributes.tags);
}
// 采样率
mSampleRate = sampleRate;
// these below should probably come from the audioFlinger too...
if(format == AUDIO_FORMAT_DEFAULT)
{
format = AUDIO_FORMAT_PCM_16_BIT;
}
// validate parameters
// AudioFlinger capture only supports linear PCM
// 判断 AudioFlinger 是否支持该格式
if(!audio_is_valid_format(format) || !audio_is_linear_pcm(format))
{
ALOGE("Format %#x is not linear pcm", format);
return BAD_VALUE;
}
mFormat = format;
// 声道检查
if(!audio_is_input_channel(channelMask))
{
ALOGE("Invalid channel mask %#x", channelMask);
return BAD_VALUE;
}
mChannelMask = channelMask;
uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
mChannelCount = channelCount;
// 音频帧长度
if(audio_is_linear_pcm(format))
{
mFrameSize = channelCount * audio_bytes_per_sample(format);
}
else
{
mFrameSize = sizeof(uint8_t);
}
// mFrameCount is initialized in openRecord_l
mReqFrameCount = frameCount;
mNotificationFramesReq = notificationFrames;
// mNotificationFramesAct is initialized in openRecord_l
if(sessionId == AUDIO_SESSION_ALLOCATE)
{
mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
}
else
{
mSessionId = sessionId;
}
ALOGV("set(): mSessionId %d", mSessionId);
int callingpid = IPCThreadState::self()->getCallingPid();
int mypid = getpid();
if(uid == -1 || (callingpid != mypid))
{
mClientUid = IPCThreadState::self()->getCallingUid();
}
else
{
mClientUid = uid;
}
if(pid == -1 || (callingpid != mypid))
{
mClientPid = callingpid;
}
else
{
mClientPid = pid;
}
mOrigFlags = mFlags = flags;
mCbf = cbf;
// 实例化 AudioRecordThread
if(cbf != NULL)
{
mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
// thread begins in paused state, and will not reference us until start()
}
// create the IAudioRecord
// 获取系统服务 AudioFlinger ,并调用 AudioFlinger->openRecord ,重点
status_t status = openRecord_l(0 /*epoch*/, mOpPackageName);
if(status != NO_ERROR)
{
if(mAudioRecordThread != 0)
{
mAudioRecordThread->requestExit(); // see comment in AudioRecord.h
mAudioRecordThread->requestExitAndWait();
mAudioRecordThread.clear();
}
return status;
}
mStatus = NO_ERROR;
mUserData = user;
// TODO: add audio hardware input latency here
if(mTransfer == TRANSFER_CALLBACK ||
mTransfer == TRANSFER_SYNC)
{
mLatency = (1000 * mNotificationFramesAct) / sampleRate;
}
else
{
mLatency = (1000 * mFrameCount) / sampleRate;
}
mMarkerPosition = 0;
mMarkerReached = false;
mNewPosition = 0;
mUpdatePeriod = 0;
AudioSystem::acquireAudioSessionId(mSessionId, -1);
mSequence = 1;
mObservedSequence = mSequence;
mInOverrun = false;
mFramesRead = 0;
mFramesReadServerOffset = 0;
return NO_ERROR;
}
AudioRecordThread 和 AudioRecord::openRecord_l()
AudioRcorcdThread 的声明如下:
发现并没有 run 成员函数,AudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
我们知道 AudioRecordThread 继承了一个线程类 Thread, Thread 类中声明了成员 run
在线程函数 Thread::run 创建线程 Thread::_threadLoop(void* user)
- _threadLoop 循环调用 self->threadLoop();
- self->threadLoop() 调用 AudioRecord.processAudioBuffer();
- AudioRecord.processAudioBuffer() 调用 AudioRecord::restoreRecord_l(const char *from)
- AudioRecord::restoreRecord_l(const char *from) 调用 AudioFlinger->openRecord(......)
- AudioFlinger 是承上启下的作用,为应用层提供访问接口,并通过 HAL 来管理音频设备
- 下一步是分析对象 AudioFlinger,后续有时间,我会写博客分析的 AudioFlinger 。(这里不做分析)
AudioRecord::openRecord_l() 也是调用 AudioFlinger->openRecord(......)
// frameworks/av/media/libmedia/AudioRecord.cpp
/* a small internal class to handle the callback */
class AudioRecordThread : public Thread
{
public:
AudioRecordThread(AudioRecord& receiver, bool bCanCallJava = false);
// Do not call Thread::requestExitAndWait() without first calling requestExit().
// Thread::requestExitAndWait() is not virtual, and the implementation doesn't do enough.
virtual void requestExit();
void pause(); // suspend thread from execution at next loop boundary
void resume(); // allow thread to execute, if not requested to exit
void wake(); // wake to handle changed notification conditions.
private:
void pauseInternal(nsecs_t ns = 0LL);
// like pause(), but only used internally within thread
friend class AudioRecord;
virtual bool threadLoop();
AudioRecord& mReceiver;
virtual ~AudioRecordThread();
Mutex mMyLock; // Thread::mLock is private
Condition mMyCond; // Thread::mThreadExitedCondition is private
bool mPaused; // whether thread is requested to pause at next loop entry
bool mPausedInt; // whether thread internally requests pause
nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored
bool mIgnoreNextPausedInt; // skip any internal pause and go immediately
// to processAudioBuffer() as state may have changed
// since pause time calculated.
};
AudioRecord::star
开始拾音
// frameworks/av/media/libmedia/AudioRecord.cpp
status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t triggerSession)
{
ALOGV("start, sync event %d trigger session %d", event, triggerSession);
SEEMPLOG_RECORD(71,"");
AutoMutex lock(mLock);
if(mActive)
{
return NO_ERROR;
}
// discard data in buffer
const uint32_t framesFlushed = mProxy->flush();
mFramesReadServerOffset -= mFramesRead + framesFlushed;
mFramesRead = 0;
mProxy->clearTimestamp(); // timestamp is invalid until next server push
// reset current position as seen by client to 0
mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
// force refresh of remaining frames by processAudioBuffer() as last
// read before stop could be partial.
mRefreshRemaining = true;
mNewPosition = mProxy->getPosition() + mUpdatePeriod;
int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);
// we reactivate markers (mMarkerPosition != 0) as the position is reset to 0.
// This is legacy behavior. This is not done in stop() to avoid a race condition
// where the last marker event is issued twice.
mMarkerReached = false;
mActive = true;
status_t status = NO_ERROR;
if(!(flags & CBLK_INVALID))
{
status = mAudioRecord->start(event, triggerSession);
if(status == DEAD_OBJECT)
{
flags |= CBLK_INVALID;
}
}
if(flags & CBLK_INVALID)
{
status = restoreRecord_l("start");
}
if(status != NO_ERROR)
{
mActive = false;
ALOGE("start() status %d", status);
}
else
{
sp<AudioRecordThread> t = mAudioRecordThread;
if(t != 0)
{
t->resume();
}
else
{
mPreviousPriority = getpriority(PRIO_PROCESS, 0);
get_sched_policy(0, &mPreviousSchedulingGroup);
androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
}
}
return status;
}
AudioRecord::read
- AudioRecord::read 调用 AudioRecord::obtainBuffer(......) 获取音频设备的拾音数据
- AudioRecord::obtainBuffer(......) 调用 AudioRecordClientProxy::obtainBuffer(......)
AudioRecord::stop
AudioRecord::stop 源码如下:
IAudioRecord::stop() 通过 binder 停止 Audio Record 服务。
AudioRecordThread::pause(); 暂停线程
变量 mPaused 在函数开头的地方判断返回 true ,暂停线程。
// frameworks/av/media/libmedia/AudioRecord.cpp
void AudioRecord::stop()
{
AutoMutex lock(mLock);
if (!mActive) {
return;
}
mActive = false;
mProxy->interrupt();
mAudioRecord->stop();
// Note: legacy handling - stop does not clear record marker and
// periodic update position; we update those on start().
sp<AudioRecordThread> t = mAudioRecordThread;
if (t != 0) {
t->pause();
} else {
setpriority(PRIO_PROCESS, 0, mPreviousPriority);
set_sched_policy(0, mPreviousSchedulingGroup);
}
}