参考:http://blog.csdn.net/droidphone/article/details/5941344
引子
Android Framework的音频子系统中,每一个音频流对应着一个AudioTrack类的一个实例,每个AudioTrack会在创建时注册到AudioFlinger中,由AudioFlinger把所有的AudioTrack进行混合(Mixer),然后输送到AudioHardware中进行播放,
代码分析:
1. Framework 或者JAVA层通过JNI,new AudioTrack();
这里具体分析StagefrightPlayer 中Audio的功能实现;
MediaPlayerService.cpp
status_tMediaPlayerService::Client::setDataSource(..){
...
//创建class StagefrightPlayer : publicMediaPlayerInterface
sp<MediaPlayerBase> p = createPlayer(playerType);
if (p == NULL) return NO_INIT;
if (!p->hardwareOutput()) {
//class AudioOutput : publicMediaPlayerBase::AudioSink 对AudioTrack的方法进行封装
mAudioOutput = new AudioOutput();
//StagefrightPlayer关联AudioOutput
static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
}
}
AwesomePlayer.cpp
status_t AwesomePlayer::play_l() {
.....
mAudioPlayer = new AudioPlayer(mAudioSink);
}
AudioPlayer.cpp
status_t AudioPlayer::start(boolsourceAlreadyStarted) {
......
//如果关联AudioOutput,就用AudioSink对AudioTrack的封装,否则就用AudioTrack的实现
if (mAudioSink.get() != NULL) {
status_t err = mAudioSink->open(mSampleRate, numChannels,AudioSystem::PCM_16_BIT,DEFAULT_AUDIOSINK_BUFFERCOUNT,&AudioPlayer::AudioSinkCallback, this);
} else {
mAudioTrack =newAudioTrack(AudioSystem::MUSIC, mSampleRate, AudioSystem::PCM_16_BIT,(numChannels == 2)?AudioSystem::CHANNEL_OUT_STEREO:AudioSystem::CHANNEL_OUT_MONO,0, 0, &AudioCallback, this, 0);
......
}
}
2.建立AudioTrack与AudioFlinger关联, AudioTrack::set();
a. AudioFlinger根据StreamType打开不同硬件设备,并为该输出设备创建混音线程: MixerThread(),并把该线程的id作为getOutput()的返回值返回给AudioTrack;
AudioFlinger::AudioFlinger()
{......
mAudioHardware = AudioHardwareInterface::create();
}
int AudioFlinger::openOutput(...)
{
AudioStreamOut *output = mAudioHardware->openOutputStream.......
thread = new MixerThread(this, output, ++mNextThreadId);
mPlaybackThreads.add(mNextThreadId, thread);...
return mNextThreadId;
}
b.AudioTrack通过binder机制调用AudioFlinger的createTrack();通过IAudioTrack关联AudioFlinger,通过AudioFlinger分配的FIFO,实现音频数据的读写。
AudioTrack通过IAudioTrack接口控制该音轨的状态,例如start,stop,pause等等;
audio_track_cblk_t这个结构是FIFO实现的关键,该结构是在createTrack的时候,由AudioFlinger申请相应的内存,然后通过IMemory接口返回AudioTrack的,这样AudioTrack和AudioFlinger管理着同一个audio_track_cblk_t,通过它实现了环形FIFO,AudioTrack向FIFO中写入音频数据,AudioFlinger从FIFO中读取音频数据,经Mixer后送给AudioHardware进行播放。
status_t AudioTrack::createTrack(...) {
const sp<IAudioFlinger>& audioFlinger =AudioSystem::get_audio_flinger();
sp<IAudioTrack> track = audioFlinger->createTrack(getpid(),.....);
mAudioTrack = track;//关联
mCblk = static_cast<audio_track_cblk_t*>(cblk->pointer());//计算出AudioFlinger分配的buffer,参考TrackBase;
mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
}
//AudioFlinger创建一个用于控制的TrackHandle,并以IAudioTrack这一接口作为createTrack()的返回值;
sp<IAudioTrack>AudioFlinger::createTrack(...) {
sp<PlaybackThread::Track> track = thread->createTrack_l(client,streamType, sampleRate, format,channelCount, frameCount,sharedBuffer, &lStatus);
//track = new Track....
sp<TrackHandle> trackHandle= new TrackHandle(track);//class TrackHandle : publicandroid::BnAudioTrack classBnAudioTrack : public BnInterface<IAudioTrack>
....
return trackHandle;
}
在createTrack的过程中,AudioFlinger会根据传入的frameCount参数,申请一块内存,AudioTrack可以通过IAudioTrack接口的getCblk()函数获得指向该内存块的IMemory接口,然后AudioTrack通过该IMemory接口的pointer()函数获得指向该内存块的指针,这块内存的开始部分就是audio_track_cblk_t结构,紧接着是大小为frameSize的FIFO内存。
IMemory->pointer() ---->|_______________________________________________________
|__audio_track_cblk_t__|_______buffer of FIFO(size==frameCount)____|
// AudioFlinger根据AudioTrack的属性分配buffer; class Track : public TrackBase
AudioFlinger::ThreadBase::TrackBase::TrackBase(..) {
size = sizeof(audio_track_cblk_t) + frameCount*channelCount*sizeof(int16_t);
sp<IMemory> mCblkMemory = client->heap()->allocate(size);
audio_track_cblk_t* mCblk = static_cast<audio_track_cblk_t*>(mCblkMemory->pointer());
// construct the shared structure in-place.
new(mCblk) audio_track_cblk_t();
}