static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
int channelCount = 0;
switch(channelConfig) {
case AudioFormat.CHANNEL_OUT_MONO:
case AudioFormat.CHANNEL_CONFIGURATION_MONO:
channelCount = 1;
break;
case AudioFormat.CHANNEL_OUT_STEREO:
case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
channelCount = 2;
break;
default:
if (!isMultichannelConfigSupported(channelConfig)) {
loge("getMinBufferSize(): Invalid channel configuration.");
return ERROR_BAD_VALUE;
} else {
channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
}
}
if (!AudioFormat.isPublicEncoding(audioFormat)) {
loge("getMinBufferSize(): Invalid audio format.");
return ERROR_BAD_VALUE;
}
// sample rate, note these values are subject to change
// Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
(sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
return ERROR_BAD_VALUE;
}
int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
if (size <= 0) {
loge("getMinBufferSize(): error querying hardware");
return ERROR;
}
else {
return size;
}
}
通过上面的代码我们知道调用到了native_get_min_buff_size
frameworks/base/core/jni/android_media_AudioTrack.cpp
{"native_get_min_buff_size", "(III)I", (void *)android_media_AudioTrack_get_min_buff_size}
// ----------------------------------------------------------------------------
// returns the minimum required size for the successful creation of a streaming AudioTrack
// returns -1 if there was an error querying the hardware.
static jint android_media_AudioTrack_get_min_buff_size(JNIEnv *env, jobject thiz,
jint sampleRateInHertz, jint channelCount, jint audioFormat) {
size_t frameCount;
const status_t status = AudioTrack::getMinFrameCount(&frameCount, AUDIO_STREAM_DEFAULT,
sampleRateInHertz);
if (status != NO_ERROR) {
ALOGE("AudioTrack::getMinFrameCount() for sample rate %d failed with status %d",
sampleRateInHertz, status);
return -1;
}
const audio_format_t format = audioFormatToNative(audioFormat);
if (audio_has_proportional_frames(format)) {
const size_t bytesPerSample = audio_bytes_per_sample(format);
return frameCount * channelCount * bytesPerSample;
} else {
return frameCount;
}
}
if (audio_has_proportional_frames(format)) {
const size_t bytesPerSample = audio_bytes_per_sample(format);
return frameCount * channelCount * bytesPerSample;
} else {
return frameCount;
}
只有 AUDIO_FORMAT_PCM 或者 AUDIO_FORMAT_IEC61937
返回的字节数是 frameCount * channelCount * bytesPerSample;
其他返回 frameCount
frameCount的获取会调用
AudioTrack::getMinFrameCount(&frameCount, AUDIO_STREAM_DEFAULT,
sampleRateInHertz);
// static
status_t AudioTrack::getMinFrameCount(
size_t* frameCount,
audio_stream_type_t streamType,
uint32_t sampleRate)
{
if (frameCount == NULL) {
return BAD_VALUE;
}
// FIXME handle in server, like createTrack_l(), possible missing info:
// audio_io_handle_t output
// audio_format_t format
// audio_channel_mask_t channelMask
// audio_output_flags_t flags (FAST)
uint32_t afSampleRate;
status_t status;
status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
if (status != NO_ERROR) {
ALOGE("%s(): Unable to query output sample rate for stream type %d; status %d",
__func__, streamType, status);
return status;
}
size_t afFrameCount;
status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
if (status != NO_ERROR) {
ALOGE("%s(): Unable to query output frame count for stream type %d; status %d",
__func__, streamType, status);
return status;
}
uint32_t afLatency;
status = AudioSystem::getOutputLatency(&afLatency, streamType);
if (status != NO_ERROR) {
ALOGE("%s(): Unable to query output latency for stream type %d; status %d",
__func__, streamType, status);
return status;
}
// When called from createTrack, speed is 1.0f (normal speed).
// This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
*frameCount = AudioSystem::calculateMinFrameCount(afLatency, afFrameCount, afSampleRate,
sampleRate, 1.0f /*, 0 notificationsPerBufferReq*/);
// The formula above should always produce a non-zero value under normal circumstances:
// AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
// Return error in the unlikely event that it does not, as that's part of the API contract.
if (*frameCount == 0) {
ALOGE("%s(): failed for streamType %d, sampleRate %u",
__func__, streamType, sampleRate);
return BAD_VALUE;
}
ALOGV("%s(): getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
__func__, *frameCount, afFrameCount, afSampleRate, afLatency);
return NO_ERROR;
}
我们可以看到
*frameCount = AudioSystem::calculateMinFrameCount(afLatency, afFrameCount, afSampleRate,
sampleRate, 1.0f /*, 0 notificationsPerBufferReq*/);
afLatency 查询硬件的延时时间
afFrameCount 查询硬件内部缓冲的大小,以Frame为单位。
afSampleRate 查询采样率,一般返回的是所支持的最高采样率
sampleRate 我们需要的采样率
/* static */ size_t AudioSystem::calculateMinFrameCount(
uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
{
// Ensure that buffer depth covers at least audio hardware latency
uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
if (minBufCount < 2) {
minBufCount = 2;
}
#if 0
// The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
// but keeping the code here to make it easier to add later.
if (minBufCount < notificationsPerBufferReq) {
minBufCount = notificationsPerBufferReq;
}
#endif
ALOGV("calculateMinFrameCount afLatency %u afFrameCount %u afSampleRate %u "
"sampleRate %u speed %f minBufCount: %u" /*" notificationsPerBufferReq %u"*/,
afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
/*, notificationsPerBufferReq*/);
return minBufCount * sourceFramesNeededWithTimestretch(
sampleRate, afFrameCount, afSampleRate, speed);
}
minBufCount最小是2,这里没有理解minBufCount的计算
uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
1000/afSampleRate 采样一个点需要多少毫秒
(1000/afSampleRate)* afFrameCount 采样afFrameCount的数据需要多长时间,这个afFrameCount隐藏了一点就是Frame的单位是采样大小是1个字节,声道数是1,Frame对应的大小是1 * 1. 进而afFrameCount就代表了缓冲区的大小。所以(1000/afSampleRate)* afFrameCount ,采集缓冲区的大小的数据需要多少时间。
afLatencyMs 可以理解为要采集最少需要这么长的时间。
afLatencyMs / ((1000 * afFrameCount) / afSampleRate) = afLatencyMs /缓冲区的大小所需时间
所以就计算出来了最少需要多少缓冲区。
if (minBufCount < 2) {
minBufCount = 2;
}
最小缓冲区不能小于2.
那么接下来我们就需要计算每个缓冲区有多少frame
sourceFramesNeededWithTimestretch(
sampleRate, afFrameCount, afSampleRate, speed)
static inline size_t sourceFramesNeededWithTimestretch(
uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate,
float speed) {
// required is the number of input frames the resampler needs
size_t required = sourceFramesNeeded(srcSampleRate, dstFramesRequired, dstSampleRate);
// to deliver this, the time stretcher requires:
return required * (double)speed + 1 + 1; // accounting for rounding dependencies
}
static inline size_t sourceFramesNeeded(
uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate) {
// +1 for rounding - always do this even if matched ratio (resampler may use phases not ratio)
// +1 for additional sample needed for interpolation
return srcSampleRate == dstSampleRate ? dstFramesRequired :
size_t((uint64_t)dstFramesRequired * srcSampleRate / dstSampleRate + 1 + 1);
}
这里涉及到重采样和倍速speed
如果需要的
如果需要的采样率与硬件支持的采样率一样大,直接就返回硬件的framecount
如果需要的采样率与硬件支持的采样率不一样, 就使用公式size_t((uint64_t)dstFramesRequired * srcSampleRate / dstSampleRate + 1 + 1)进行计算出我们要需要的采样率对应的frame个数。
如果是倍速北方,那么frame个数还要乘以倍速,在多两个frame的缓冲。
通过上面的分析这些frame都是啊单字节单声道。
最后返回的frame个数是minBufCount * sourceFramesNeededWithTimestretch(
sampleRate, afFrameCount, afSampleRate, speed);
那么我们最后返回的缓冲区大小是 frameCount * 采样大小 * 声道数.