Android 音频源码分析——AndroidRecord录音(一)
Android 音频源码分析——AndroidRecord录音(二)
Android 音频源码分析——AndroidRecord音频数据传输流程
Android 音频源码分析——audioserver启动
基于Android 源码版本:9.0
- java代码路径:frameworks/base/media/java/android/media/
- jni代码路径:frameworks/base/core/jni/
- C++代码路径:frameworks/av/media/libaudioclient/
frameworks/av/media/libaudiohal/
frameworks/av/media/audioserver/
frameworks/av/sevices/audioflinger/
frameworks/av/sevices/audiopolicy/ - hal层:hardware/interfaces/audio/
hardware/libhardware/modules/audio/ - system:system/media/audio/include/system/
由于涉及源码比较多,所以只贴出部分,中间由删除许多,有兴趣的可以自行看源码。
1.简介
使⽤AudioRecord的录音流程,分为以下⼏步
- 获取 创建AudioRecord 所需的buffer size 大小;
- 根据⾳频设备和AudioRecord参数,创建AudioRecord
- 调⽤AudioRecord.startRecording开始录音。
- 读取录制的音频数据AudioRecord.read(data, 0, bufferSize)。
- 停止录音,并释放;
主要使用的API
//静态方法
AudioRecord.getMinBufferSize(sampleRate, channel, audioFormat)
//创建AudioRecord对象
new AudioRecord(MediaRecorder.AudioSource.MIC,
sampleRate,
channel,
AudioFormat.ENCODING_PCM_16BIT,
bufferSize
);
audiorecord.startRecording();
audiorecord.read(data, 0, bufferSize);
audiorecord.stop();
audiorecord.release();
2.getMinBufferSize
AudioRecord构造器中需要传递一个buffersize,该值需要通过AudioRecord的静态函数getMinBufferSize获取。我们先看下getMinBufferSize
getMinBufferSize:静态方法,用来获取AudioRecord对象所需的最小缓冲区大小(字节)。
//参数分别是 采样率、声道、音频格式
static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
int channelCount = 0;
switch (channelConfig) {
//省略,这里单生道转换为1, 双声道转换为2;
}
//调用native方法。
int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
if (size == 0) {
return ERROR_BAD_VALUE;
}
else if (size == -1) {
return ERROR;
}
else {
return size;
}
}
接着看jni代码,android_media_AudioRecord.cpp
// 返回成功创建AudioRecord实例所需的最小大小。
//如果不支持参数组合,则返回0。
//如果查询buffer size出错,则返回-1。
static jint android_media_AudioRecord_get_min_buff_size(JNIEnv *env, jobject thiz,
jint sampleRateInHertz, jint channelCount, jint audioFormat) {
ALOGV(">> android_media_AudioRecord_get_min_buff_size(%d, %d, %d)",
sampleRateInHertz, channelCount, audioFormat);
size_t frameCount = 0;
audio_format_t format = audioFormatToNative(audioFormat);//java 对象转C++对象
//调用C++ AudioRecord类的getMinFrameCount方法
status_t result = AudioRecord::getMinFrameCount(&frameCount,
sampleRateInHertz,
format,
audio_channel_in_mask_from_count(channelCount));
if (result == BAD_VALUE) {
return 0;
}
if (result != NO_ERROR) {
return -1;
}
return frameCount * channelCount * audio_bytes_per_sample(format);
}
C++ AudioRecord.cpp
status_t AudioRecord::getMinFrameCount(
size_t* frameCount,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask)
{
if (frameCount == NULL) {
return BAD_VALUE;
}
size_t size;
status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size);
if (status != NO_ERROR) {
return status;
}
//检测size是否符合规则
// We double the size of input buffer for ping pong use of record buffer.
// Assumes audio_is_linear_pcm(format)
if ((*frameCount = (size * 2) / (audio_channel_count_from_in_mask(channelMask) *
audio_bytes_per_sample(format))) == 0) {
return BAD_VALUE;
}
return NO_ERROR;
}
AudioSystem 通过binder方式,调用AudioFlinger getInputBufferSize函数。
AudioSystem.cpp
status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
audio_channel_mask_t channelMask, size_t* buffSize)
{
const sp<AudioFlingerClient> afc = getAudioFlingerClient();
if (afc == 0) {
return NO_INIT;
}
return afc->getInputBufferSize(sampleRate, format, channelMask, buffSize);
}
AudioFlinger 运行在audioserver系统进程中,与hal层进行交互。
具体流程:
AudioFlinger::getInputBufferSize
->>DeviceHalInterface getInputBufferSize
->>DeviceHalHidl::getInputBufferSize
IDevice.hal getInputBufferSize(AudioConfig config)
generates (Result retval, uint64_t bufferSize);
->>Device.impl.h Device::getInputBufferSize
->>audio_hw.c adev_get_input_buffer_size
3.初始化AudioRecord
流程:AudioRecord.java ->android.media.AudioRecord.cpp->AudioRecord.cpp->AudioSystem.cpp->IAudioFlinger.cpp->AudioFlinger.cpp
AudioRecord构建器
public AudioRecord(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat,
int bufferSizeInBytes)
throws IllegalArgumentException {
this((new AudioAttributes.Builder())
.setInternalCapturePreset(audioSource)
.build(),
(new AudioFormat.Builder())
.setChannelMask(getChannelMaskFromLegacyConfig(channelConfig,
true/*allow legacy configurations*/))
.setEncoding(audioFormat)
.setSampleRate(sampleRateInHz)
.build(),
bufferSizeInBytes,
AudioManager.AUDIO_SESSION_ID_GENERATE);
}
创建AudioAttributes、AudioFormat对象,并调用AudioRecord另一个构建器。
AudioManager.AUDIO_SESSION_ID_GENERATE表示一个特殊的音频会话ID,用于指示未知的音频会话ID,并且framework应生成一个新值。
@SystemApi
public AudioRecord(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
int sessionId) throws IllegalArgumentException {
mRecordingState = RECORDSTATE_STOPPED;
//..省略部分代码, 检查attributes, format
//sampleRate channel等转换
。。。。。
//检查buffer size
audioBuffSizeCheck(bufferSizeInBytes);
int[] sampleRate = new int[] {
mSampleRate};
int[] session = new int[1];
session[0] = sessionId;
//调用natvie方法,初始化设备
//TODO: update native initialization when information about hardware init failure
// due to capture device already open is available.
int initResult = native_setup( new WeakReference<AudioRecord>(this),
mAudioAttributes, sampleRate, mChannelMask, mChannelIndexMask,
mAudioFormat, mNativeBufferSizeInBytes,
session, getCurrentOpPackageName(), 0 /*nativeRecordInJavaObj*/);
if (initResult != SUCCESS) {
loge("Error code "+initResult+" when initializing native AudioRecord object.");
return; // with mState == STATE_UNINITIALIZED
}
//初始化成功,获取底层返回的 samplerate,及sessionid。
mSampleRate = sampleRate[0];
mSessionId = session[0];
mState = STATE_INITIALIZED;
}
native_setup 对应jni中的android_media_AudioRecord_setup函数,
android_media_AudioRecord_setup
主要包含:
- 一些参数的转换、检查;AudioAttributes sampleRate channelIndexMask fromat
- 创建C++ 层AudioRecord对象
- lpRecorder->set 设置参数
- 更新sessionid, sampleRate到java层
- 将C++ 层对象保存在 java 某些字段中
static jint
android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
jobject jaa, jintArray jSampleRate, jint channelMask, jint channelIndexMask,
jint audioFormat, jint buffSizeInBytes, jintArray jSession, jstring opPackageName,
jlong nativeRecordInJavaObj)
{
//.........
//获取channel session
audio_attributes_t *paa = NULL;
sp<AudioRecord> lpRecorder = 0;
audiorecord_callback_cookie *lpCallbackData = NULL;
// 判断是否需要创建C++ 层AudioRecord
if (nativeRecordInJavaObj == 0) {
//.........
//检查 AudioAttributes sampleRate channelIndexMask fromat等
size_t bytesPerSample = audio_bytes_per_sample(format);
if (buffSizeInBytes == 0) {
ALOGE("Error creating AudioRecord: frameCount is 0.");
return (jint) AUDIORECORD_ERROR_SETUP_ZEROFRAMECOUNT;
}
size_t frameSize = channelCount * bytesPerSample;
size_t frameCount = buffSizeInBytes / frameSize;
// 创建C++ 层AudioRecord对象;
lpRecorder = new AudioRecord(String16(opPackageNameStr.c_str()));
// create the callback information:
// this data will be passed with every AudioRecord callback
lpCallbackData = new audiorecord_callback_cookie;
lpCallbackData->audioRecord_class = (jclass)env->NewGlobalRef(clazz);
// 我们使用弱引用,以便可以对AudioRecord对象进行垃圾回收。
lpCallbackData->audioRecord_ref = env->NewGlobalRef(weak_this);
lpCallbackData->busy = false;
//AudioRecord设置参数
const status_t status = lpRecorder-><