封装AudioTrack
AudioTrack主要是用来进行主要是用来播放声音的,但是只能播放PCM格式的音频流。这里主要是简单的对AudioTrack进行了封装,加入了一些异常判断:
/**
* Created by ZhangHao on 2017/5/10.
* 播放pcm数据
*/
public class MyAudioTrack {
private int mFrequency;
private int mChannel;
private int mSampBit;
private AudioTrack mAudioTrack;
public MyAudioTrack(int frequency, int channel, int sampbit) {
this.mFrequency = frequency;
this.mChannel = channel;
this.mSampBit = sampbit;
}
/**
* 初始化
*/
public void init() {
if (mAudioTrack != null) {
release();
}
int minBufSize = getMinBufferSize();
mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
mFrequency, mChannel, mSampBit, minBufSize, AudioTrack.MODE_STREAM);
mAudioTrack.play();
}
/**
* 释放资源
*/
public void release() {
if (mAudioTrack != null) {
mAudioTrack.stop();
mAudioTrack.release();
}
}
/**
* 将解码后的pcm数据写入audioTrack播放
*
* @param data 数据
* @param offset 偏移
* @param length 需要播放的长度
*/
public void playAudioTrack(byte[] data, int offset, int length) {
if (data == null || data.length == 0) {
return;
}
try {
mAudioTrack.write(data, offset, length);
} catch (Exception e) {
Log.e("MyAudioTrack", "AudioTrack Exception : " + e.toString());
}
}
public int getMinBufferSize() {
return AudioTrack.getMinBufferSize(mFrequency,
mChannel, mSampBit);
}
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
这里简单介绍一下,在AudioTrack构造方法AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)里几个变量的含义:
1.streamType:指定流的类型,主要包括以下几种:
- STREAM_ALARM:警告声
- STREAM_MUSCI:音乐声
- STREAM_RING:铃声
- STREAM_SYSTEM:系统声音
- STREAM_VOCIE_CALL:电话声音
因为android系统对不同的声音的管理是分开的,所以这个参数的作用就是设置AudioTrack播放的声音类型。
2.sampleRateInHz : 采样率
3.channelConfig : 声道
4.audioFormat : 采样精度
5.bufferSizeInBytes :缓冲区大小,可以通过AudioTrack.getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)来获取
6.mode : MODE_STATIC和MODE_STREAM:
- MODE_STATIC : 直接把所有的数据加载到缓存区,不需要多次write,一般用于占用内存小,延时要求高的情况
- MODE_STREAM : 需要多次write,一般用于像从网络获取数据或者实时解码的情况,本次的例子就是这种情况。
我这里只是简单的介绍,大家可以去网上找更为详细的介绍。
AAC解码器
这里主要对MediaCodec进行封装,实现一帧帧去解码AAC。
/**
* Created by ZhangHao on 2017/5/17.
* 用于aac音频解码
*/
public class AACDecoderUtil {
private static final String TAG = "AACDecoderUtil";
private static final int KEY_CHANNEL_COUNT = 2;
private static final int KEY_SAMPLE_RATE = 48000;
private MyAudioTrack mPlayer;
private MediaCodec mDecoder;
private int count = 0;
/**
* 初始化所有变量
*/
public void start() {
prepare();
}
/**
* 初始化解码器
*
* @return 初始化失败返回false,成功返回true
*/
public boolean prepare() {
mPlayer = new MyAudioTrack(KEY_SAMPLE_RATE, AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT);
mPlayer.init();
try {
String mine = "audio/mp4a-latm";
mDecoder = MediaCodec.createDecoderByType(mine);
MediaFormat mediaFormat = new MediaFormat();
mediaFormat.setString(MediaFormat.KEY_MIME, mine);
mediaFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, KEY_CHANNEL_COUNT);
mediaFormat.setInteger(MediaFormat.KEY_SAMPLE_RATE, KEY_SAMPLE_RATE);
mediaFormat.setInteger(MediaFormat.KEY_BIT_RATE, 128000);
mediaFormat.setInteger(MediaFormat.KEY_IS_ADTS, 1);
mediaFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
byte[] data = new byte[]{(byte) 0x11, (byte) 0x90};
ByteBuffer csd_0 = ByteBuffer.wrap(data);
mediaFormat.setByteBuffer("csd-0", csd_0);
mDecoder.configure(mediaFormat, null, null, 0);
} catch (IOException e) {
e.printStackTrace();
return false;
}
if (mDecoder == null) {
return false;
}
mDecoder.start();
return true;
}
/**
* aac解码+播放
*/
public void decode(byte[] buf, int offset, int length) {
ByteBuffer[] codecInputBuffers = mDecoder.getInputBuffers();
ByteBuffer[] codecOutputBuffers = mDecoder.getOutputBuffers();
long kTimeOutUs = 0;
try {
int inputBufIndex = mDecoder.dequeueInputBuffer(kTimeOutUs);
if (inputBufIndex >= 0) {
ByteBuffer dstBuf = codecInputBuffers[inputBufIndex];
dstBuf.clear();
dstBuf.put(buf, offset, length);
mDecoder.queueInputBuffer(inputBufIndex, 0, length, 0, 0);
}
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
int outputBufferIndex = mDecoder.dequeueOutputBuffer(info, kTimeOutUs);
if (outputBufferIndex < 0) {
count++;
}
ByteBuffer outputBuffer;
while (outputBufferIndex >= 0) {
outputBuffer = codecOutputBuffers[outputBufferIndex];
byte[] outData = new byte[info.size];
outputBuffer.get(outData);
outputBuffer.clear();
mPlayer.playAudioTrack(outData, 0, info.size);
mDecoder.releaseOutputBuffer(outputBufferIndex, false);
outputBufferIndex = mDecoder.dequeueOutputBuffer(info, kTimeOutUs);
}
} catch (Exception e) {
Log.e(TAG, e.toString());
e.printStackTrace();
}
}
public int getCount() {
return count;
}
/**
* 释放资源
*/
public void stop() {
try {
if (mPlayer != null) {
mPlayer.release();
mPlayer = null;
}
if (mDecoder != null) {
mDecoder.stop();
mDecoder.release();
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
- 118
- 119
- 120
- 121
- 122
- 123
- 124
- 125
- 126
- 127
- 128
- 129
- 130
- 131
- 132
- 133
- 134
- 135
- 136
- 137
- 138
- 139
- 140
- 141
- 142
- 143
- 144
- 145
- 146
- 147
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
- 118
- 119
- 120
- 121
- 122
- 123
- 124
- 125
- 126
- 127
- 128
- 129
- 130
- 131
- 132
- 133
- 134
- 135
- 136
- 137
- 138
- 139
- 140
- 141
- 142
- 143
- 144
- 145
- 146
- 147
其实这里和我之前利用MediaCodec解码H264很类似,主要就是在因为解码数据类型不同,所以初始化时有区别。还有一点就是解码H624时,直接将解码后数据利用surface显示,而解码aac是将解码后的数据取出来,再利用AudioTrack播放。
读取aac文件
这里是利用线程读aac文件,获得一帧帧的aac帧数据,然后送入解码器播放。
/**
* Created by ZhangHao on 2017/4/18.
* 播放aac音频文件
*/
public class ReadAACFileThread extends Thread {
private AACDecoderUtil audioUtil;
private String filePath;
private boolean isFinish = false;
private int FRAME_MIN_LEN = 50;
private static int FRAME_MAX_LEN = 100 * 1024;
private int PRE_FRAME_TIME = 1000 / 50;
private int count = 0;
public ReadAACFileThread(String path) {
this.audioUtil = new AACDecoderUtil();
this.filePath = path;
this.audioUtil.start();
}
@Override
public void run() {
super.run();
File file = new File(filePath);
if (file.exists()) {
try {
FileInputStream fis = new FileInputStream(file);
byte[] frame = new byte[FRAME_MAX_LEN];
int frameLen = 0;
byte[] readData = new byte[10 * 1024];
long startTime = System.currentTimeMillis();
while (!isFinish) {
if (fis.available() > 0) {
int readLen = fis.read(readData);
if (frameLen + readLen < FRAME_MAX_LEN) {
System.arraycopy(readData, 0, frame, frameLen, readLen);
frameLen += readLen;
int headFirstIndex = findHead(frame, 0, frameLen);
while (headFirstIndex >= 0 && isHead(frame, headFirstIndex)) {
int headSecondIndex = findHead(frame, headFirstIndex + FRAME_MIN_LEN, frameLen);
if (headSecondIndex > 0 && isHead(frame, headSecondIndex)) {
count++;
Log.e("ReadAACFileThread", "Length : " + (headSecondIndex - headFirstIndex));
audioUtil.decode(frame, headFirstIndex, headSecondIndex - headFirstIndex);
byte[] temp = Arrays.copyOfRange(frame, headSecondIndex, frameLen);
System.arraycopy(temp, 0, frame, 0, temp.length);
frameLen = temp.length;
sleepThread(startTime, System.currentTimeMillis());
startTime = System.currentTimeMillis();
headFirstIndex = findHead(frame, 0, frameLen);
} else {
headFirstIndex = -1;
}
}
} else {
frameLen = 0;
}
} else {
isFinish = true;
}
}
} catch (Exception e) {
e.printStackTrace();
}
Log.e("ReadAACFileThread", "AllCount:" + count + "Error Count : " + audioUtil.getCount());
} else {
Log.e("ReadH264FileThread", "File not found");
}
audioUtil.stop();
}
/**
* 寻找指定buffer中AAC帧头的开始位置
*
* @param startIndex 开始的位置
* @param data 数据
* @param max 需要检测的最大值
* @return
*/
private int findHead(byte[] data, int startIndex, int max) {
int i;
for (i = startIndex; i <= max; i++) {
if (isHead(data, i))
break;
}
if (i == max) {
i = -1;
}
return i;
}
/**
* 判断aac帧头
*/
private boolean isHead(byte[] data, int offset) {
boolean result = false;
if (data[offset] == (byte) 0xFF && data[offset + 1] == (byte) 0xF1
&& data[offset + 3] == (byte) 0x80) {
result = true;
}
return result;
}
private void sleepThread(long startTime, long endTime) {
long time = PRE_FRAME_TIME - (endTime - startTime);
if (time > 0) {
try {
Thread.sleep(time);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
- 118
- 119
- 120
- 121
- 122
- 123
- 124
- 125
- 126
- 127
- 128
- 129
- 130
- 131
- 132
- 133
- 134
- 135
- 136
- 137
- 138
- 139
- 140
- 141
- 142
- 143
- 144
- 145
- 146
- 147
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
- 118
- 119
- 120
- 121
- 122
- 123
- 124
- 125
- 126
- 127
- 128
- 129
- 130
- 131
- 132
- 133
- 134
- 135
- 136
- 137
- 138
- 139
- 140
- 141
- 142
- 143
- 144
- 145
- 146
- 147
这里没有太多的东西,就是通过帧头来判断aac帧,并截取每帧数据送入解码器。我这里只是取巧做了简单的判断,对帧头的判断并不一定满足所有的aac帧头,大家可以根据实际的情况自行修改。
结语
其实,实现分离音频帧,利用MediaExtractor这个类就可以实现,但是因为我实际的数据源是来自网络,所以才会demo才会复杂一点。