在看AudioTrack的write函数的时候,了解到,音频数据最终都写到了audio_track_cblk_t的结构体中。
这个结构体是在AudioFlinger中创建的。
AudioFlinger是如何来使用这些数据的呢?
今天就来学习学习。
我们写数据的时候,调用了audio_track_cblk_t::framesAvailable_l函数,来判断是否有可用的空间,以供写用。
类audio_track_cblk_t中还有另外一个函数framesReady,看名字,应该是告诉我们已经准备好了多少东东。
看样子,AudioFlinger在使用音频数据的时候,应该是先调用了framesReady函数,来看看我们已经写进去多少音频数据了,然后再使用这些数据。
*****************************************源码*************************************************
uint32_t audio_track_cblk_t::framesReady()
{
uint64_t u = this->user;
uint64_t s = this->server;
if (flags & CBLK_DIRECTION_MSK) {
if (u < loopEnd) {
return u - s;
} else {
Mutex::Autolock _l(lock);
if (loopCount >= 0) {
return (loopEnd - loopStart)*loopCount + u - s;
} else {
return UINT_MAX;
}
}
} else {
return s - u;
}
}
**********************************************************************************************
源码路径:
frameworks\base\media\libmedia\AudioTrack.cpp
#######################说明################################
之前看代码都是顺藤摸瓜,今天要顺瓜摸藤了。
看看什么地方调用了函数framesReady。
搜了一下,调用的地方还真不少。
不过,很多地方只是用调用结果来作判断,
只有在函数AudioFlinger::PlaybackThread::Track::getNextBuffer中,把返回值保存了下来。
以前看代码知道,写数据的时候,framesAvailable返回值是被保存起来,并使用的。
类推一下,读数据的时候,framesReady的返回值也应该被保存使用。
于是就来到了函数AudioFlinger::PlaybackThread::Track::getNextBuffer中:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
{
audio_track_cblk_t* cblk = this->cblk();
uint32_t framesReady;
uint32_t framesReq = buffer->frameCount;
// Check if last stepServer failed, try to step now
if (mFlags & TrackBase::STEPSERVER_FAILED) {
if (!step()) goto getNextBuffer_exit;
LOGV("stepServer recovered");
mFlags &= ~TrackBase::STEPSERVER_FAILED;
}
framesReady = cblk->framesReady();
if (LIKELY(framesReady)) {
uint64_t s = cblk->server;
uint64_t bufferEnd = cblk->serverBase + cblk->frameCount;
bufferEnd = (cblk->loopEnd < bufferEnd) ? cblk->loopEnd : bufferEnd;
if (framesReq > framesReady) {
framesReq = framesReady;
}
if (s + framesReq > bufferEnd) {
framesReq = bufferEnd - s;
}
buffer->raw = getBuffer(s, framesReq);
if (buffer->raw == 0) goto getNextBuffer_exit;
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void* AudioFlinger::ThreadBase::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const {
audio_track_cblk_t* cblk = this->cblk();
int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase)*cblk->frameSize;
int8_t *bufferEnd = bufferStart + frames * cblk->frameSize;
// Check validity of returned pointer in case the track control block would have been corrupted.
if (bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd ||
((unsigned long)bufferStart & (unsigned long)(cblk->frameSize - 1))) {
LOGE("TrackBase::getBuffer buffer out of range:\n start: %p, end %p , mBuffer %p mBufferEnd %p\n \
server %lld, serverBase %lld, user %lld, userBase %lld, channelCount %d",
bufferStart, bufferEnd, mBuffer, mBufferEnd,
cblk->server, cblk->serverBase, cblk->user, cblk->userBase, cblk->channelCount);
return 0;
}
return bufferStart;
}
----------------------------------------------------------------
buffer->frameCount = framesReq;
return NO_ERROR;
}
getNextBuffer_exit:
buffer->raw = 0;
buffer->frameCount = 0;
LOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
return NOT_ENOUGH_DATA;
}
----------------------------------------------------------------
下面看看哪个地方调用了函数AudioFlinger::PlaybackThread::Track::getNextBuffer。
搜索发现,AudioFlinger中只有函数AudioFlinger::DirectOutputThread::threadLoop调用了,
而我们说的AudioTrack是播放音乐用的,所以,肯定不是这儿。
另外,发现AudioMixer中有几个地方调用了函数getNextBuffer,不过调用代码如下:
t.bufferProvider->getNextBuffer(&t.buffer);
bufferProvider和AudioFlinger::PlaybackThread::Track又存在怎样的关系呢?
bufferProvider的定义代码:AudioBufferProvider* bufferProvider;
存在以下继承关系:
class Track : public TrackBase
class TrackBase : public AudioBufferProvider, public RefBase
可见,bufferProvider最终指向的是AudioFlinger::PlaybackThread::Track对象,
所以,t.bufferProvider->getNextBuffer(&t.buffer)其实是调用的函数:AudioFlinger::PlaybackThread::Track::getNextBuffer
函数AudioFlinger::MixerThread::prepareTracks_l调用了函数AudioMixer::setBufferProvider。
函数AudioMixer::setBufferProvider中给bufferProvider进行赋值。
函数AudioFlinger::MixerThread::threadLoop和函数AudioFlinger::DuplicatingThread::threadLoop中
有调用函数AudioFlinger::MixerThread::prepareTracks_l。
先看看AudioMixer中调用函数AudioFlinger::PlaybackThread::Track::getNextBuffer的地方吧。
AudioMixer中的以下几个函数都调用了函数getNextBuffer:
process__nop函数
process__genericNoResampling函数
process__genericResampling函数
process__OneTrack16BitsStereoNoResampling函数
process__TwoTracks16BitsStereoNoResampling函数
这儿取其中的process__OneTrack16BitsStereoNoResampling函数来看看。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// one track, 16 bits stereo without resampling is the most common case
void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
{
const int i = 31 - __builtin_clz(state->enabledTracks);
const track_t& t = state->tracks[i];
AudioBufferProvider::Buffer& b(t.buffer);
int32_t* out = t.mainBuffer;
size_t numFrames = state->frameCount;
const int16_t vl = t.volume[0];
const int16_t vr = t.volume[1];
const uint32_t vrl = t.volumeRL;
while (numFrames) {
b.frameCount = numFrames;
t.bufferProvider->getNextBuffer(&b);
int16_t const *in = b.i16;
// in == NULL can happen if the track was flushed just after having
// been enabled for mixing.
if (in == NULL || ((unsigned long)in & 3)) {
memset(out, 0, numFrames*MAX_NUM_CHANNELS*sizeof(int16_t));
LOGE_IF(((unsigned long)in & 3), "process stereo track: input buffer alignment pb: buffer %p track %d, channels %d, needs %08x",
in, i, t.channelCount, t.needs);
return;
}
size_t outFrames = b.frameCount;
if (UNLIKELY(uint32_t(vl) > UNITY_GAIN || uint32_t(vr) > UNITY_GAIN)) {
// volume is boosted, so we might need to clamp even though
// we process only one track.
do {
uint32_t rl = *reinterpret_cast<uint32_t const *>(in);
in += 2;
int32_t l = mulRL(1, rl, vrl) >> 12;
int32_t r = mulRL(0, rl, vrl) >> 12;
// clamping...
l = clamp16(l);
r = clamp16(r);
*out++ = (r<<16) | (l & 0xFFFF);
} while (--outFrames);
} else {
do {
uint32_t rl = *reinterpret_cast<uint32_t const *>(in);
in += 2;
int32_t l = mulRL(1, rl, vrl) >> 12;
int32_t r = mulRL(0, rl, vrl) >> 12;
*out++ = (r<<16) | (l & 0xFFFF);
} while (--outFrames);
}
numFrames -= b.frameCount;
t.bufferProvider->releaseBuffer(&b);
}
}
----------------------------------------------------------------
可见,函数的主要功能是将数据从audio_track_cblk_t中copy到track_t的mainBuffer中。
track_t是个什么东东呢?是state_t的一个成员。
state_t又是个什么东东呢?是传入的函数参数。
现在有两个问题需要探讨:
1、process__OneTrack16BitsStereoNoResampling是如何被调用的?以及传入的参数是从哪儿来的?
2、放入track_t的mainBuffer中的数据,是如何被使用的?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
先看看process__OneTrack16BitsStereoNoResampling是怎么被调用的。
函数AudioMixer::process__validate中有使用process__OneTrack16BitsStereoNoResampling函数。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void AudioMixer::process__validate(state_t* state)
{
LOGW_IF(!state->needsChanged,
"in process__validate() but nothing's invalid");
uint32_t changed = state->needsChanged;
state->needsChanged = 0; // clear the validation flag
// recompute which tracks are enabled / disabled
uint32_t enabled = 0;
uint32_t disabled = 0;
while (changed) {
const int i = 31 - __builtin_clz(changed);
const uint32_t mask = 1<<i;
changed &= ~mask;
track_t& t = state->tracks[i];
(t.enabled ? enabled : disabled) |= mask;
}
state->enabledTracks &= ~disabled;
state->enabledTracks |= enabled;
// compute everything we need...
int countActiveTracks = 0;
int all16BitsStereoNoResample = 1;
int resampling = 0;
int volumeRamp = 0;
uint32_t en = state->enabledTracks;
while (en) {
const int i = 31 - __builtin_clz(en);
en &= ~(1<<i);
countActiveTracks++;
track_t& t = state->tracks[i];
uint32_t n = 0;
n |= NEEDS_CHANNEL_1 + t.channelCount - 1;
n |= NEEDS_FORMAT_16;
n |= t.doesResample() ? NEEDS_RESAMPLE_ENABLED : NEEDS_RESAMPLE_DISABLED;
if (t.auxLevel != 0 && t.auxBuffer != NULL) {
n |= NEEDS_AUX_ENABLED;
}
if (t.volumeInc[0]|t.volumeInc[1]) {
volumeRamp = 1;
} else if (!t.doesResample() && t.volumeRL == 0) {
n |= NEEDS_MUTE_ENABLED;
}
t.needs = n;
if ((n & NEEDS_MUTE__MASK) == NEEDS_MUTE_ENABLED) {
t.hook = track__nop;
} else {
if ((n & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED) {
all16BitsStereoNoResample = 0;
}
if ((n & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) {
all16BitsStereoNoResample = 0;
resampling = 1;
t.hook = track__genericResample;
} else {
if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
t.hook = track__16BitsMono;
all16BitsStereoNoResample = 0;
}
if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_2){
t.hook = track__16BitsStereo;
}
}
}
这个结构体是在AudioFlinger中创建的。
AudioFlinger是如何来使用这些数据的呢?
今天就来学习学习。
我们写数据的时候,调用了audio_track_cblk_t::framesAvailable_l函数,来判断是否有可用的空间,以供写用。
类audio_track_cblk_t中还有另外一个函数framesReady,看名字,应该是告诉我们已经准备好了多少东东。
看样子,AudioFlinger在使用音频数据的时候,应该是先调用了framesReady函数,来看看我们已经写进去多少音频数据了,然后再使用这些数据。
*****************************************源码*************************************************
uint32_t audio_track_cblk_t::framesReady()
{
uint64_t u = this->user;
uint64_t s = this->server;
if (flags & CBLK_DIRECTION_MSK) {
if (u < loopEnd) {
return u - s;
} else {
Mutex::Autolock _l(lock);
if (loopCount >= 0) {
return (loopEnd - loopStart)*loopCount + u - s;
} else {
return UINT_MAX;
}
}
} else {
return s - u;
}
}
**********************************************************************************************
源码路径:
frameworks\base\media\libmedia\AudioTrack.cpp
#######################说明################################
之前看代码都是顺藤摸瓜,今天要顺瓜摸藤了。
看看什么地方调用了函数framesReady。
搜了一下,调用的地方还真不少。
不过,很多地方只是用调用结果来作判断,
只有在函数AudioFlinger::PlaybackThread::Track::getNextBuffer中,把返回值保存了下来。
以前看代码知道,写数据的时候,framesAvailable返回值是被保存起来,并使用的。
类推一下,读数据的时候,framesReady的返回值也应该被保存使用。
于是就来到了函数AudioFlinger::PlaybackThread::Track::getNextBuffer中:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
{
audio_track_cblk_t* cblk = this->cblk();
uint32_t framesReady;
uint32_t framesReq = buffer->frameCount;
// Check if last stepServer failed, try to step now
if (mFlags & TrackBase::STEPSERVER_FAILED) {
if (!step()) goto getNextBuffer_exit;
LOGV("stepServer recovered");
mFlags &= ~TrackBase::STEPSERVER_FAILED;
}
framesReady = cblk->framesReady();
if (LIKELY(framesReady)) {
uint64_t s = cblk->server;
uint64_t bufferEnd = cblk->serverBase + cblk->frameCount;
bufferEnd = (cblk->loopEnd < bufferEnd) ? cblk->loopEnd : bufferEnd;
if (framesReq > framesReady) {
framesReq = framesReady;
}
if (s + framesReq > bufferEnd) {
framesReq = bufferEnd - s;
}
buffer->raw = getBuffer(s, framesReq);
if (buffer->raw == 0) goto getNextBuffer_exit;
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void* AudioFlinger::ThreadBase::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const {
audio_track_cblk_t* cblk = this->cblk();
int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase)*cblk->frameSize;
int8_t *bufferEnd = bufferStart + frames * cblk->frameSize;
// Check validity of returned pointer in case the track control block would have been corrupted.
if (bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd ||
((unsigned long)bufferStart & (unsigned long)(cblk->frameSize - 1))) {
LOGE("TrackBase::getBuffer buffer out of range:\n start: %p, end %p , mBuffer %p mBufferEnd %p\n \
server %lld, serverBase %lld, user %lld, userBase %lld, channelCount %d",
bufferStart, bufferEnd, mBuffer, mBufferEnd,
cblk->server, cblk->serverBase, cblk->user, cblk->userBase, cblk->channelCount);
return 0;
}
return bufferStart;
}
----------------------------------------------------------------
buffer->frameCount = framesReq;
return NO_ERROR;
}
getNextBuffer_exit:
buffer->raw = 0;
buffer->frameCount = 0;
LOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
return NOT_ENOUGH_DATA;
}
----------------------------------------------------------------
下面看看哪个地方调用了函数AudioFlinger::PlaybackThread::Track::getNextBuffer。
搜索发现,AudioFlinger中只有函数AudioFlinger::DirectOutputThread::threadLoop调用了,
而我们说的AudioTrack是播放音乐用的,所以,肯定不是这儿。
另外,发现AudioMixer中有几个地方调用了函数getNextBuffer,不过调用代码如下:
t.bufferProvider->getNextBuffer(&t.buffer);
bufferProvider和AudioFlinger::PlaybackThread::Track又存在怎样的关系呢?
bufferProvider的定义代码:AudioBufferProvider* bufferProvider;
存在以下继承关系:
class Track : public TrackBase
class TrackBase : public AudioBufferProvider, public RefBase
可见,bufferProvider最终指向的是AudioFlinger::PlaybackThread::Track对象,
所以,t.bufferProvider->getNextBuffer(&t.buffer)其实是调用的函数:AudioFlinger::PlaybackThread::Track::getNextBuffer
函数AudioFlinger::MixerThread::prepareTracks_l调用了函数AudioMixer::setBufferProvider。
函数AudioMixer::setBufferProvider中给bufferProvider进行赋值。
函数AudioFlinger::MixerThread::threadLoop和函数AudioFlinger::DuplicatingThread::threadLoop中
有调用函数AudioFlinger::MixerThread::prepareTracks_l。
先看看AudioMixer中调用函数AudioFlinger::PlaybackThread::Track::getNextBuffer的地方吧。
AudioMixer中的以下几个函数都调用了函数getNextBuffer:
process__nop函数
process__genericNoResampling函数
process__genericResampling函数
process__OneTrack16BitsStereoNoResampling函数
process__TwoTracks16BitsStereoNoResampling函数
这儿取其中的process__OneTrack16BitsStereoNoResampling函数来看看。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// one track, 16 bits stereo without resampling is the most common case
void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
{
const int i = 31 - __builtin_clz(state->enabledTracks);
const track_t& t = state->tracks[i];
AudioBufferProvider::Buffer& b(t.buffer);
int32_t* out = t.mainBuffer;
size_t numFrames = state->frameCount;
const int16_t vl = t.volume[0];
const int16_t vr = t.volume[1];
const uint32_t vrl = t.volumeRL;
while (numFrames) {
b.frameCount = numFrames;
t.bufferProvider->getNextBuffer(&b);
int16_t const *in = b.i16;
// in == NULL can happen if the track was flushed just after having
// been enabled for mixing.
if (in == NULL || ((unsigned long)in & 3)) {
memset(out, 0, numFrames*MAX_NUM_CHANNELS*sizeof(int16_t));
LOGE_IF(((unsigned long)in & 3), "process stereo track: input buffer alignment pb: buffer %p track %d, channels %d, needs %08x",
in, i, t.channelCount, t.needs);
return;
}
size_t outFrames = b.frameCount;
if (UNLIKELY(uint32_t(vl) > UNITY_GAIN || uint32_t(vr) > UNITY_GAIN)) {
// volume is boosted, so we might need to clamp even though
// we process only one track.
do {
uint32_t rl = *reinterpret_cast<uint32_t const *>(in);
in += 2;
int32_t l = mulRL(1, rl, vrl) >> 12;
int32_t r = mulRL(0, rl, vrl) >> 12;
// clamping...
l = clamp16(l);
r = clamp16(r);
*out++ = (r<<16) | (l & 0xFFFF);
} while (--outFrames);
} else {
do {
uint32_t rl = *reinterpret_cast<uint32_t const *>(in);
in += 2;
int32_t l = mulRL(1, rl, vrl) >> 12;
int32_t r = mulRL(0, rl, vrl) >> 12;
*out++ = (r<<16) | (l & 0xFFFF);
} while (--outFrames);
}
numFrames -= b.frameCount;
t.bufferProvider->releaseBuffer(&b);
}
}
----------------------------------------------------------------
可见,函数的主要功能是将数据从audio_track_cblk_t中copy到track_t的mainBuffer中。
track_t是个什么东东呢?是state_t的一个成员。
state_t又是个什么东东呢?是传入的函数参数。
现在有两个问题需要探讨:
1、process__OneTrack16BitsStereoNoResampling是如何被调用的?以及传入的参数是从哪儿来的?
2、放入track_t的mainBuffer中的数据,是如何被使用的?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
先看看process__OneTrack16BitsStereoNoResampling是怎么被调用的。
函数AudioMixer::process__validate中有使用process__OneTrack16BitsStereoNoResampling函数。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void AudioMixer::process__validate(state_t* state)
{
LOGW_IF(!state->needsChanged,
"in process__validate() but nothing's invalid");
uint32_t changed = state->needsChanged;
state->needsChanged = 0; // clear the validation flag
// recompute which tracks are enabled / disabled
uint32_t enabled = 0;
uint32_t disabled = 0;
while (changed) {
const int i = 31 - __builtin_clz(changed);
const uint32_t mask = 1<<i;
changed &= ~mask;
track_t& t = state->tracks[i];
(t.enabled ? enabled : disabled) |= mask;
}
state->enabledTracks &= ~disabled;
state->enabledTracks |= enabled;
// compute everything we need...
int countActiveTracks = 0;
int all16BitsStereoNoResample = 1;
int resampling = 0;
int volumeRamp = 0;
uint32_t en = state->enabledTracks;
while (en) {
const int i = 31 - __builtin_clz(en);
en &= ~(1<<i);
countActiveTracks++;
track_t& t = state->tracks[i];
uint32_t n = 0;
n |= NEEDS_CHANNEL_1 + t.channelCount - 1;
n |= NEEDS_FORMAT_16;
n |= t.doesResample() ? NEEDS_RESAMPLE_ENABLED : NEEDS_RESAMPLE_DISABLED;
if (t.auxLevel != 0 && t.auxBuffer != NULL) {
n |= NEEDS_AUX_ENABLED;
}
if (t.volumeInc[0]|t.volumeInc[1]) {
volumeRamp = 1;
} else if (!t.doesResample() && t.volumeRL == 0) {
n |= NEEDS_MUTE_ENABLED;
}
t.needs = n;
if ((n & NEEDS_MUTE__MASK) == NEEDS_MUTE_ENABLED) {
t.hook = track__nop;
} else {
if ((n & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED) {
all16BitsStereoNoResample = 0;
}
if ((n & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) {
all16BitsStereoNoResample = 0;
resampling = 1;
t.hook = track__genericResample;
} else {
if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
t.hook = track__16BitsMono;
all16BitsStereoNoResample = 0;
}
if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_2){
t.hook = track__16BitsStereo;
}
}
}