Android audio 二 AudioRecord 分析上
Android audio 三 AudioRecord 分析下
Android audio 四 AudioTrack 分析上
Android audio 五 AudioTrack 分析下
Android audio 六 AudioRecord AudiTrack 拾音放音例子
该小节简要分析 AudioTrack.cpp
文件:
frameworks/av/media/libmedia/AudioTrack.cpp
AudioTrack native 方法实现的播放音频的逻辑和 java 层类似,详情参考上小节 AudioTrack 播放音频的步骤。
先看下 AudioTrack 本地方法的构造和析构函数 :
在构造函数中调用 set(......)函数完成 AudioTrack 的初始化。
AudioTrack::AudioTrack()
: mStatus(NO_INIT),
mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
mAttributes.usage = AUDIO_USAGE_UNKNOWN;
mAttributes.flags = 0x0;
strcpy(mAttributes.tags, "");
}
AudioTrack::AudioTrack(
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
audio_output_flags_t flags,
callback_t cbf,
void* user,
int32_t notificationFrames,
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
int uid,
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
float maxRequiredSpeed)
: mStatus(NO_INIT),
mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
mStatus = set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
}
AudioTrack::AudioTrack(
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
const sp<IMemory>& sharedBuffer,
audio_output_flags_t flags,
callback_t cbf,
void* user,
int32_t notificationFrames,
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
int uid,
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
float maxRequiredSpeed)
: mStatus(NO_INIT),
mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
mStatus = set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
}
AudioTrack::~AudioTrack()
{
if (mStatus == NO_ERROR) {
// Make sure that callback function exits in the case where
// it is looping on buffer full condition in obtainBuffer().
// Otherwise the callback thread will never exit.
stop();
if (mAudioTrackThread != 0) {
mProxy->interrupt();
mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
mAudioTrackThread->requestExitAndWait();
mAudioTrackThread.clear();
}
// No lock here: worst case we remove a NULL callback which will be a nop
if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
}
IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
mAudioTrack.clear();
mCblkMemory.clear();
mSharedBuffer.clear();
IPCThreadState::self()->flushCommands();
ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
}
}
AudioTrack::set
AudioTrack::set的代码很长,这里简要叙述下,不做贴出源码。
- 根据 transferType 选择传输模式 TRANSFER_SHARED/TRANSFER_SYNC/TRANSFER_CALLBACK
默认使用 AudioTrack::TRANSFER_SYNC ; - 检查输出音频的格式和音频的通道数是否合法;
- 创建并运行放音线程 AudioTrackThread ;
- 调用 AudioTrack::createTrack_l();
- 检查传输的音频参数和 AudioSystem 的参数是否匹配;
- 调用 audioFlinger->createTrack(......); Android 的音频离不开 AudioFlinger ,
AudioFlinger 是承上启下的作用,承上(为上层提供访问接口),启下(通过HAL来管理音频设备)。是整个音频系统的核心与难点。
- 使用 binder 创建一个链接 Service/Client :
mDeathNotifier = new DeathNotifier(this); IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
AudioTrack::write(播放音频)
write(......) 调用 AudioTrack::obtainBuffer(......) 发送数据
AudioTrack::obtainBuffer(......) 中使用 AudioTrackClientProxy::obtainBuffer(......) 发送音频数据。
virtual status_t AudioTrackClientProxy::obtainBuffer(Buffer* buffer, bool ackFlush = false);
这里用到了 virtual 虚函数,放音服务的成员实现 obtainBuffer(......) 的业务逻辑,即播放音频。
ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
{
if (mTransfer != TRANSFER_SYNC) {
return INVALID_OPERATION;
}
if (isDirect()) {
AutoMutex lock(mLock);
int32_t flags = android_atomic_and(
~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
&mCblk->mFlags);
if (flags & CBLK_INVALID) {
return DEAD_OBJECT;
}
}
if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
// Sanity-check: user is most-likely passing an error code, and it would
// make the return value ambiguous (actualSize vs error).
ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
return BAD_VALUE;
}
size_t written = 0;
Buffer audioBuffer;
while (userSize >= mFrameSize) {
audioBuffer.frameCount = userSize / mFrameSize;
// 发送数据
status_t err = obtainBuffer(&audioBuffer,
blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
if (err < 0) {
if (written > 0) {
break;
}
return ssize_t(err);
}
size_t toWrite = audioBuffer.size;
memcpy(audioBuffer.i8, buffer, toWrite);
buffer = ((const char *) buffer) + toWrite;
userSize -= toWrite;
written += toWrite;
releaseBuffer(&audioBuffer);
}
if (written > 0) {
mFramesWritten += written / mFrameSize;
}
return written;
}