播放声音可以用MediaPlayer和AudioTrack,两者都提供了java API供应用开发者使用。虽然都可以播放声音,但两者还是有很大的区别的。其中最大的区别是MediaPlayer可以播放多种格式的声音文件,例如MP3,AAC,WAV,OGG,MIDI等。MediaPlayer会在framework层创建对应的音频解码器。而AudioTrack只能播放已经解码的PCM流,如果是文件的话只支持wav格式的音频文件,因为wav格式的音频文件大部分都是PCM流。AudioTrack不创建解码器,所以只能播放不需要解码的wav文件。当然两者之间还是有紧密的联系,MediaPlayer在framework层还是会创建AudioTrack,把解码后的PCM数流传递给AudioTrack,AudioTrack再传递给AudioFlinger进行混音,然后才传递给硬件播放,所以是MediaPlayer包含了AudioTrack。使用AudioTrack播放音乐示例:
/*cts/tests/tests/media/src/android/media/cts*/
public voidtestSetStereoVolumeMax() throwsException {
final String TEST_NAME= "testSetStereoVolumeMax";
final int TEST_SR =22050;
final int TEST_CONF =AudioFormat.CHANNEL_CONFIGURATION_STEREO;
final int TEST_FORMAT= AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE =AudioTrack.MODE_STREAM;
final intTEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
// --------initialization --------------
/*Step1.*/
//根据Hal buff size计算需要为AudioTrack分配的buff size,
//后续会将这些buff分配给track的mblck,用来进行数据传输
int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
/*Step 2.*/
AudioTrack track = newAudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF,
TEST_FORMAT, 2 * minBuffSize,TEST_MODE);
byte data[] = newbyte[minBuffSize];
// -------- test--------------
track.write(data, OFFSET_DEFAULT, data.length);
track.write(data, OFFSET_DEFAULT, data.length);
track.play();
float maxVol =AudioTrack.getMaxVolume();
assertTrue(TEST_NAME, track.setStereoVolume(maxVol, maxVol) == AudioTrack.SUCCESS);
// -------- tear down--------------
track.release();
}
//默认sessionId设置为0
public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
int bufferSizeInBytes, int mode)
throws IllegalArgumentException {
this(streamType, sampleRateInHz, channelConfig, audioFormat,
bufferSizeInBytes, mode, 0 /*session*/);
}
public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
int bufferSizeInBytes, int mode, int sessionId)
throws IllegalArgumentException {
// mState already == STATE_UNINITIALIZED
// remember which looper is associated with the AudioTrack instantiation
Looper looper;
if ((looper = Looper.myLooper()) == null) {
looper = Looper.getMainLooper();
}
//保留创建该AudioTrack的loop,后续可以进行message传递
mInitializationLooper = looper;
/**
* 参数检查
* 1.检查streamType是合法,并赋值给mStreamType
* 2.检查sampleRateInHz是否在4000到48000之间,并赋值给mSampleRate
* 3.设置mChannels,将声道转换为 CHANNEL_OUT_MONO(单声道)
或CHANNEL_OUT_STEREO(双声道)
* 4.设置mAudioFormat:
* ENCODING_PCM_16BIT、ENCODING_DEFAULT ---> ENCODING_PCM_16BIT
* ENCODING_PCM_8BIT ---> ENCODING_PCM_8BIT
* 5.设置mDataLoadMode: MODE_STREAM 或MODE_STATIC
*/
audioParamCheck(streamType, sampleRateInHz, channelConfig, audioFormat, mode);
/ *根据采样精度计算每帧字节大小和缓冲区帧数=缓冲区/每帧大小 ,
*且缓冲区帧数必须是每帧大小的整数倍
*/
audioBuffSizeCheck(bufferSizeInBytes);
if (sessionId < 0) {
throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
}
int[] session = new int[1];
session[0] = sessionId;
// native initialization
int initResult = native_setup(new WeakReference<AudioTrack>(this),
mStreamType, mSampleRate, mChannels, mAudioFormat,
mNativeBufferSizeInBytes, mDataLoadMode, session);
if (initResult != SUCCESS) {
loge("Error code "+initResult+" when initializing AudioTrack.");
return; // with mState == STATE_UNINITIALIZED
}
//更新为native层返回的sessionId
mSessionId = session[0];
if (mDataLoadMode == MODE_STATIC) {
mState = STATE_NO_STATIC_DATA;
} else {
mState = STATE_INITIALIZED;
}
}
static jint
android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_this,
jint streamType, jint sampleRateInHertz, jint javaChannelMask,
jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession)
{
ALOGV("sampleRate=%d, audioFormat(from Java)=%d, channel mask=%x, buffSize=%d",
sampleRateInHertz, audioFormat, javaChannelMask, buffSizeInBytes);
uint32_t afSampleRate;
size_t afFrameCount;
//通过AudioSystem从AudioPolicyService中读取对应音频流类型的帧数
//需要强调的是,getOutputxxxx函数其实很复杂,以getOutputFrameCount为例
//具体过程如下:
//1、通过streamType找到strategy,
//2、然后通过strategy找到device,
//3、最后通过device找到output
//4、返回output的FrameCount(硬件缓冲区包含的帧数)
if (AudioSystem::getOutputFrameCount(&afFrameCount, (audio_stream_type_t) streamType) != NO_ERROR) {
ALOGE("Error creating AudioTrack: Could not get AudioSystem frame count.");
return (jint) AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
}
if (AudioSystem::getOutputSamplingRate(&afSampleRate, (audio_stream_type_t) streamType) != NO_ERROR) {
ALOGE("Error creating AudioTrack: Could not get AudioSystem sampling rate.");
return (jint) AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
}
// Java channel masks don't map directly to the native definition, but it's a simple shift
// to skip the two deprecated channel configurations "default" and "mono".
// native层没有default和mono类型的channel定义,需要转化
uint32_t nativeChannelMask = ((uint32_t)javaChannelMask) >> 2;
if (!audio_is_output_channel(nativeChannelMask)) {
ALOGE("Error creating AudioTrack: invalid channel mask %#x.", javaChannelMask);
return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
}
//计算bit 1的数目,每个bit代表一个声道
int nbChannels = popcount(nativeChannelMask);
// check the stream type
audio_stream_type_t atStreamType;
switch (streamType) {
case AUDIO_STREAM_VOICE_CALL:
case AUDIO_STREAM_SYSTEM:
case AUDIO_STREAM_RING:
case AUDIO_STREAM_MUSIC:
case AUDIO_STREAM_ALARM:
case AUDIO_STREAM_NOTIFICATION:
case AUDIO_STREAM_BLUETOOTH_SCO:
case AUDIO_STREAM_DTMF:
atStreamType = (audio_stream_type_t) streamType;
break;
default:
ALOGE("Error creating AudioTrack: unknown stream type.");
return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDSTREAMTYPE;
}
// check the format.
// This function was called from Java, so we compare the format against the Java constants
if ((audioFormat != ENCODING_PCM_16BIT) && (audioFormat != ENCODING_PCM_8BIT)) {
ALOGE("Error creating AudioTrack: unsupported audio format.");
return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
}
// for the moment 8bitPCM in MODE_STATIC is not supported natively in the AudioTrack C++ class
// so we declare everything as 16bitPCM, the 8->16bit conversion for MODE_STATIC will be handled
// in android_media_AudioTrack_native_write_byte()
// 扩展精度8BIT->16BIT和缓冲区*2,通过将精度设置为16BIT
//因为在static模式下,底层只支持16BIT,
//后续写数据的过程中也要做对应的处理