下面我就以startRecording为例,分析一下从AudioRecord JAVA->JNI->AudioRecord Native的过程:
首先是AudioRecord.java:
//frameworks/base/media/java/android/media/AudioRecord.java
public void startRecording(MediaSyncEvent syncEvent)
throws IllegalStateException {
if (mState != STATE_INITIALIZED) {
throw new IllegalStateException("startRecording() called on an "
+ "uninitialized AudioRecord.");
}
// start recording
synchronized(mRecordingStateLock) {
if (native_start(syncEvent.getType(), syncEvent.getAudioSessionId()) == SUCCESS) {
handleFullVolumeRec(true);
mRecordingState = RECORDSTATE_RECORDING;
}
}
}
代码比较简单,直接调用native_start,通过查询JNI代码android_media_AudioRecord.cpp得出调用的函数是android_media_AudioRecord_start:
//frameworks/base/core/jni/android_media_AudioRecord.cpp
static jint
android_media_AudioRecord_start(JNIEnv *env, jobject thiz, jint event, jint triggerSession)
{
sp<AudioRecord> lpRecorder = getAudioRecord(env, thiz);
if (lpRecorder == NULL ) {
jniThrowException(env, "java/lang/IllegalStateException", NULL);
return (jint) AUDIO_JAVA_ERROR;
}
return nativeToJavaStatus(
lpRecorder->start((AudioSystem::sync_event_t)event, (audio_session_t) triggerSession));
}
在android_media_AudioTrack_start中调用AudioRecord的start函数:
//frameworks/av/media/libaudioclient/AudioRecord.cpp
status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t triggerSession)
{
ALOGV("%s(%d): sync event %d trigger session %d", __func__, mPortId, event, triggerSession);
AutoMutex lock(mLock);
if (mActive) {
return NO_ERROR;
}
// discard data in buffer
const uint32_t framesFlushed = mProxy->flush();
mFramesReadServerOffset -= mFramesRead + framesFlushed;
mFramesRead = 0;
mProxy->clearTimestamp(); // timestamp is invalid until next server push
// reset current position as seen by client to 0
mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
// force refresh of remaining frames by processAudioBuffer() as last
// read before stop could be partial.
mRefreshRemaining = true;
mNewPosition = mProxy->getPosition() + mUpdatePeriod;
int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);
// we reactivate markers (mMarkerPosition != 0) as the position is reset to 0.
// This is legacy behavior. This is not done in stop() to avoid a race condition
// where the last marker event is issued twice.
mMarkerReached = false;
// mActive is checked by restoreRecord_l
mActive = true;
status_t status = NO_ERROR;
if (!(flags & CBLK_INVALID)) {
status = mAudioRecord->start(event, triggerSession).transactionError();
if (status == DEAD_OBJECT) {
flags |= CBLK_INVALID;
}
}
if (flags & CBLK_INVALID) {
status = restoreRecord_l("start");
}
// Call these directly because we are already holding the lock.
mAudioRecord->setPreferredMicrophoneDirection(mSelectedMicDirection);
mAudioRecord->setPreferredMicrophoneFieldDimension(mSelectedMicFieldDimension);
if (status != NO_ERROR) {
mActive = false;
ALOGE("%s(%d): status %d", __func__, mPortId, status);
mMediaMetrics.markError(status, __FUNCTION__);
} else {
mTracker->recordingStarted();
sp<AudioRecordThread> t = mAudioRecordThread;
if (t != 0) {
t->resume();
} else {
mPreviousPriority = getpriority(PRIO_PROCESS, 0);
get_sched_policy(0, &mPreviousSchedulingGroup);
androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
}
// we've successfully started, log that time
mMediaMetrics.logStart(systemTime());
}
return status;
}
这里最重要的代码就是status = mAudioRecord->start(event, triggerSession).transactionError();,其中mAudioRecord的定义为sp<media::IAudioRecord> mAudioRecord;,IAudioRecord接口由AudioFlinger的BnAudioRecord实现, mAudioRecord->start(event, triggerSession).transactionError()会调用BnAudioRecord的start函数。
/frameworks/av/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
interface IAudioRecord {
......
void start(int /*AudioSystem::sync_event_t*/ event, int /*audio_session_t*/ triggerSession);
......
}
AudioFlinger RecordHandle start
调用AudioFlinger RecordHandle的start方法: