在前面一篇的文章中,我们知道了音频模块Java层所做的一些事情,总的来说还是比较简单的,下面我们继续学习和探索Native层中系统做了什么工作,首先先简单介绍下,Native层采用了C/S的架构方式,AudioTrack是属于Client端的,AudioFlinger和AudioPolicyService是属于服务端的,AudioFlinger是唯一可以调用HAL层接口的地方,而AudioPolicyService主要是对一些音频策略的选择和一些逻辑调用的中转,下面先来看看Client端AudioTrack都做了什么。
首先需要注意的是,我们本篇文章只描述Native层代码的初始化工作,因为整个Native层的代码和逻辑是整个音频模块的核心,想要在一篇文章中讲述完整那是不可能的,其中的很多地方的知识点都可以单独拿出来写一篇文章。
1. AudioTrack的初始化工作
在Java层JNI方法中,我们得知不仅通过AudioTrack的无参构造方法生成AudioTrack,还调用了它的set()方法,下面是一些重要的代码片段,它所在的路径是:/framework/av/media/libmedia .
//AudioTrack的无参构造方法
AudioTrack::AudioTrack()
: mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
mAttributes.usage = AUDIO_USAGE_UNKNOWN;
mAttributes.flags = 0x0;
strcpy(mAttributes.tags, "");
}
status_t AudioTrack::set(
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
audio_output_flags_t flags,
callback_t cbf,
void* user,
uint32_t notificationFrames,
const sp<IMemory>& sharedBuffer,
bool threadCanCallJava,
int sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
int uid,
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect)
{
//校验数据...(省略)
//对参数的处理和初始化工作...(省略)
// validate parameters
if (!audio_is_valid_format(format)) {
ALOGE("Invalid format %#x", format);
return BAD_VALUE;
}
mFormat = format;
if (!audio_is_output_channel(channelMask)) {
ALOGE("Invalid channel mask %#x", channelMask);
return BAD_VALUE;
}
//确定声道数
mChannelMask = channelMask;
uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
mChannelCount = channelCount;
//根据flags 确定framesize 大小
if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
if (audio_is_linear_pcm(format)) {
mFrameSize = channelCount * audio_bytes_per_sample(format);
} else {
mFrameSize = sizeof(uint8_t);
}
} else {
ALOG_ASSERT(audio_is_linear_pcm(format));
mFrameSize = channelCount * audio_bytes_per_sample(format);
// createTrack will return an error if PCM format is not supported by server,
// so no need to check for specific PCM formats here
}
// sampling rate must be specified for direct outputs
if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
return BAD_VALUE;
}
mSampleRate = sampleRate;
mOriginalSampleRate = sampleRate;
mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
mSendLevel = 0.0f;
// mFrameCount is initialized in createTrack_l
mReqFrameCount = frameCount;
mNotificationFramesReq = notificationFrames;
mNotificationFramesAct = 0;
// ...
mAuxEffectId = 0;
mFlags = flags;
mCbf = cbf;
//1. 如果Java传入的音频状态回调对象不为空,则开启线程处理回调
if (cbf != NULL) {
mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
// thread begins in paused state, and will not reference us until start()
}
//2. 创建 sp<IAudioTrack> 对象,主要和服务端进行通信
status_t status = createTrack_l();
//... 参数的初始化,赋值工作
return NO_ERROR;
}
这个方法实在太长了,有些不重要的我删除了,还对一些语句做了备注,最为主要的我用序列标出来了,下面就详细学习下这两步到底做了什么。
1.1. Native层状态回调
在我们AudioTrack调用set()方法中,创建了一条名叫AudioTrackThread线程,我们来看下它的数据结构:
/* a small internal class to handle the callback */
class AudioTrackThread : public Thread
{
public:
AudioTrackThread(AudioTrack& receiver, bool bCanCallJava = false);
// Do not call Thread::requestExitAndWait() without first calling requestExit().
// Thread::requestExitAndWait() is not virtual, and the implementation doesn't do enough.
virtual void requestExit();
void pause(); // suspend thread from execution at next loop boundary
void resume(); // allow thread to execute, if not requested to exit
void wake(); // wake to handle changed notification conditions.
private:
void pauseInternal(nsecs_t ns = 0LL);
// like pause(), but only used internally within thread
friend class AudioTrack;
virtual bool threadLoop();
AudioTrack& mReceiver;
virtual ~AudioTrackThread();
Mutex mMyLock; // Thread::mLock is private
Condition mMyCond; // Thread::mThreadExitedCondition is private
bool mPaused; // whether thread is requested to pause at next loop entry
bool mPausedInt; // whether thread internally requests pause
nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored
bool mIgnoreNextPausedInt; // skip any internal pause and go immediately
// to processAudioBuffer() as state may have changed
// since pause time calculated.
};
我们可以看到它继承于Native层的Thread,其实在调用AudioTrackThread->run()的时候,最终会调用到它的threadLooper(),我们可以看下Thread.h的实现类Threads.cpp,代码位于:
./system/core/libutils/Threads.cpp
status_t Thread::run(const char* name, int32_t priority, size_t stack)
{
Mutex::Autolock _l(mLock);
//...
bool res;
//mCanCallJava表示是否需要attach虚拟机,这里是false.
if (mCanCallJava) {
res = createThreadEtc(_threadLoop,
this, name, priority, stack, &mThread);
} else {
res = androidCreateRawThreadEtc(_threadLoop,
this, name, priority, stack, &mThread);
}
//...
}
int androidCreateRawThreadEtc(android_thread_func_t entryFunction,
void *userData,
const char* threadName __android_unused,
int32_t threadPriority,
size_t threadStackSize,
android_thread_id_t *threadId)
{
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
//...
errno = 0;
pthread_t thread;
//创建 unix thread. 可以看出 entryFunction就是上面方法传下的 _threadLoop
int result = pthread_create(&thread, &attr,
(android_pthread_entry)entryFunction, userData);
pthread_attr_destroy(&attr);
if (result != 0) {
ALOGE("androidCreateRawThreadEtc failed (entry=%p, res=%d, errno=%d)\n"
"(android threadPriority=%d)",
entryFunction, result, errno, threadPriority);
return 0;
}
// Note that *threadID is directly available to the parent only, as it is
// assigned after the child starts. Use memory barrier / lock if the child
// or other threads also need access.
if (threadId != NULL) {
*threadId = (android_thread_id_t)thread; // XXX: this is not portable
}
return 1;
}
int Thread::_threadLoop(void* user)
{
Thread* const self = static_cast<Thread*>(user);
//...
bool first = true;
//可以从下面代码看出,Thread线程会一直调用 threadloop 方法,当然这个循环也会阻塞,用于
//和其他线程之间的协调工作.
do {
bool result;
if (first) {
first = false;
self->mStatus = self->readyToRun();
result = (self->mStatus == NO_ERROR);
if (result && !self->exitPending()) {
// Binder threads (and maybe others) rely on threadLoop
// running at least once after a successful ::readyToRun()
// (unless, of course, the thread has already been asked to exit
// at that point).
// This is because threads are essentially used like this:
// (new ThreadSubclass())->run();
// The caller therefore does not retain a strong reference to
// the thread and the thread would simply disappear after the
// successful ::readyToRun() call instead of entering the
// threadLoop at least once.
result = self->threadLoop();
}
} else {
result = self->threadLoop();
}
// establish a scope for mLock
{
Mutex::Autolock _l(self->mLock);
if (result == false || self->mExitPending) {
self->mExitPending = true;
self->mRunning = false;
// clear thread ID so that requestExitAndWait() does not exit if
// called by a new thread using the same thread ID as this one.
self->mThread = thread_id_t(-1);
// note that interested observers blocked in requestExitAndWait are
// awoken by broadcast, but blocked on mLock until break exits scope
self->mThreadExitedCondition.broadcast();
break;
}
}
// Release our strong reference, to let a chance to the thread
// to die a peaceful death.
strong.clear();
// And immediately, re-acquire a strong reference for the next loop
strong = weak.promote();
} while(strong != 0);
return 0;
}
从上面一大片代码的分析,得出最后我们只要去分析 threadloop() 方法的实现即可,那么继续分析回调AudioTrack.cpp中对AudioTrackThread的threadloop()的实现:
bool AudioTrack::AudioTrackThread::threadLoop()
{
{
AutoMutex _l(mMyLock);
if (mPaused) {
mMyCond.wait(mMyLock);
// caller will check for exitPending()
return true;
}
if (mIgnoreNextPausedInt) {
mIgnoreNextPausedInt = false;
mPausedInt = false;
}
if (mPausedInt) {
if (mPausedNs > 0) {
(void) mMyCond.waitRelative(mMyLock, mPausedNs);
} else {
mMyCond.wait(mMyLock);
}
mPausedInt = false;
return true;
}
}
if (exitPending()) {
return false;
}
nsecs_t ns = mReceiver.processAudioBuffer();
switch (ns) {
case 0:
return true;
case NS_INACTIVE:
pauseInternal();
return true;
case NS_NEVER:
return false;
case NS_WHENEVER:
// Event driven: call wake() when callback notifications conditions change.
ns = INT64_MAX;
// fall through
default:
LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
pauseInternal(ns);
return true;
}
}
其中很重要的一句mReceiver.processAudioBuffer(),其中的mReceiver就是AudioTrack本身,所以,go on …
nsecs_t AudioTrack::processAudioBuffer()
{
//检测 mCblk 是否为空,mCblk 对应的是Java空间的回调方法
LOG_ALWAYS_FATAL_IF(mCblk == NULL);
mLock.lock();
//...
// Can only reference mCblk while locked
int32_t flags = android_atomic_and(
~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
// Check for track invalidation
if (flags & CBLK_INVALID) {
// for offloaded tracks restoreTrack_l() will just update the sequence and clear
// AudioSystem cache. We should not exit here but after calling the callback so
// that the upper layers can recreate the track
if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
status_t status __unused = restoreTrack_l("processAudioBuffer");
// FIXME unused status
// after restoration, continue below to make sure that the loop and buffer events
// are notified because they have been cleared from mCblk->mFlags above.
}
}
//查看 音频服务端(消费者)是否是低负荷状态
bool waitStreamEnd = mState == STATE_STOPPING;
bool active = mState == STATE_ACTIVE;
// Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
bool newUnderrun = false;
if (flags & CBLK_UNDERRUN) {
#if 0
// Currently in shared buffer mode, when the server reaches the end of buffer,
// the track stays active in continuous underrun state. It's up to the application
// to pause or stop the track, or set the position to a new offset within buffer.
// This was some experimental code to auto-pause on underrun. Keeping it here
// in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
if (mTransfer == TRANSFER_SHARED) {
mState = STATE_PAUSED;
active = false;
}
#endif
if (!mInUnderrun) {
mInUnderrun = true;
newUnderrun = true;
}
}
//获取服务端的写指针
size_t position = updateAndGetPosition_l();
//检测是否达到警戒标志位
bool markerReached = false;
size_t markerPosition = mMarkerPosition;
// FIXME fails for wraparound, need 64 bits
if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
mMarkerReached = markerReached = true;
}
// Determine number of new position callback(s) that will be needed, while locked
size_t newPosCount = 0;
size_t newPosition = mNewPosition;
size_t updatePeriod = mUpdatePeriod;
// FIXME fails for wraparound, need 64 bits
if (updatePeriod > 0 && position >= newPosition) {
newPosCount = ((position - newPosition) / updatePeriod) + 1;
mNewPosition += updatePeriod * newPosCount;
}
// Cache other fields that will be needed soon
uint32_t sampleRate = mSampleRate;
float speed = mPlaybackRate.mSpeed;
const uint32_t notificationFrames = mNotificationFramesAct;
if (mRefreshRemaining) {
mRefreshRemaining = false;
mRemainingFrames = notificationFrames;
mRetryOnPartialBuffer = false;
}
size_t misalignment = mProxy->getMisalignment();
uint32_t sequence = mSequence;
sp<AudioTrackClientProxy> proxy = mProxy;
// Determine the number of new loop callback(s) that will be needed, while locked.
int loopCountNotifications = 0;
uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
//获取循环次数
if (mLoopCount > 0) {
int loopCount;
size_t bufferPosition;
mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
mLoopCountNotified = loopCount; // discard any excess notifications
} else if (mLoopCount < 0) {
// FIXME: We're not accurate with notification count and position with infinite looping
// since loopCount from server side will always return -1 (we could decrement it).
size_t bufferPosition = mStaticProxy->getBufferPosition();
loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
loopPeriod = mLoopEnd - bufferPosition;
} else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
size_t bufferPosition = mStaticProxy->getBufferPosition();
loopPeriod = mFrameCount - bufferPosition;
}
mLock.unlock();
// get anchor time to account for callbacks.
const nsecs_t timeBeforeCallbacks = systemTime();
/**
* 这里检测 音频流是否播放完,播放状态回调Java层
*/
if (waitStreamEnd) {
// FIXME: Instead of blocking in proxy->waitStreamEndDone(), Callback thread
// should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
// (and make sure we don't callback for more data while we're stopping).
// This helps with position, marker notifications, and track invalidation.
struct timespec timeout;
timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
timeout.tv_nsec = 0;
status_t status = proxy->waitStreamEndDone(&timeout);
switch (status) {
case NO_ERROR:
case DEAD_OBJECT:
case TIMED_OUT:
if (status != DEAD_OBJECT) {
//回调Java层 EVENT_STREAM_END 状态
mCbf(EVENT_STREAM_END, mUserData, NULL);
}
{
AutoMutex lock(mLock);
// The previously assigned value of waitStreamEnd is no longer valid,
// since the mutex has been unlocked and either the callback handler
// or another thread could have re-started the AudioTrack during that time.
waitStreamEnd = mState == STATE_STOPPING;
if (waitStreamEnd) {
mState = STATE_STOPPED;
mReleased = 0;
}
}
if (waitStreamEnd && status != DEAD_OBJECT) {
return NS_INACTIVE;
}
break;
}
return 0;
}
//回调之前判断的状态...
if (newUnderrun) {
mCbf(EVENT_UNDERRUN, mUserData, NULL);
}
//循环播放回调
while (loopCountNotifications > 0) {
mCbf(EVENT_LOOP_END, mUserData, NULL);
--loopCountNotifications;
}
if (flags & CBLK_BUFFER_END) {
mCbf(EVENT_BUFFER_END, mUserData, NULL);
}
if (markerReached) {
mCbf(EVENT_MARKER, mUserData, &markerPosition);
}
//播放多少音频帧回调一次
while (newPosCount > 0) {
size_t temp = newPosition;
mCbf(EVENT_NEW_POS, mUserData, &temp);
newPosition += updatePeriod;
newPosCount--;
}
if (mObservedSequence != sequence) {
mObservedSequence = sequence;
mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
// for offloaded tracks, just wait for the upper layers to recreate the track
if (isOffloadedOrDirect()) {
return NS_INACTIVE;
}
}
// if inactive, then don't run me again until re-started
if (!active) {
return NS_INACTIVE;
}
//...
// EVENT_MORE_DATA callback handling.
// Timing for linear pcm audio data formats can be derived directly from the
// buffer fill level.
// Timing for compressed data is not directly available from the buffer fill level,
// rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
// to return a certain fill level.
struct timespec timeout;
const struct timespec *requested = &ClientProxy::kForever;
if (ns != NS_WHENEVER) {
timeout.tv_sec = ns / 1000000000LL;
timeout.tv_nsec = ns % 1000000000LL;
ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
requested = &timeout;
}
//如果还有音频流没有写完
while (mRemainingFrames > 0) {
Buffer audioBuffer;
audioBuffer.frameCount = mRemainingFrames;
size_t nonContig;
//向共享内存块申请 一块可用的 buffer.
status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
"obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
requested = &ClientProxy::kNonBlocking;
size_t avail = audioBuffer.frameCount + nonContig;
ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
if (err != NO_ERROR) {
if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
(isOffloaded() && (err == DEAD_OBJECT))) {
// FIXME bug 25195759
return 1000000;
}
ALOGE("Error %d obtaining an audio buffer, giving up.", err);
return NS_NEVER;
}
if (mRetryOnPartialBuffer && audio_is_linear_pcm(mFormat)) {
mRetryOnPartialBuffer = false;
if (avail < mRemainingFrames) {
if (ns > 0) { // account for obtain time
const nsecs_t timeNow = systemTime();
ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
}
nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);