在Android中录音可以用MediaRecord录音,操作比较简单。但是不够专业,就是不能对音频进行处理。如果要进行音频的实时的处理或者音频的一些封装
就可以用AudioRecord来进行录音了。
这里给出一段代码。实现了AudioRecord的录音和WAV格式音频的封装。
用AudioTrack和AudioTrack类可以进行边录边播,可以参考:http://blog.sina.com.cn/s/blog_6309e1ed0100j1rw.html
我们这里的代码没有播放。但是有封装和详解,如下:
- package com.ppmeet;
-
- import java.io.File;
- import java.io.FileInputStream;
- import java.io.FileNotFoundException;
- import java.io.FileOutputStream;
- import java.io.IOException;
- import android.app.Activity;
- import android.graphics.PixelFormat;
- import android.media.AudioFormat;
- import android.media.AudioRecord;
- import android.media.MediaRecorder;
- import android.os.Bundle;
- import android.view.View;
- import android.view.View.OnClickListener;
- import android.view.Window;
- import android.view.WindowManager;
- import android.widget.Button;
-
-
-
-
-
-
-
-
-
- public class TestAudioRecord extends Activity {
-
- private int audioSource = MediaRecorder.AudioSource.MIC;
-
- private static int sampleRateInHz = 44100;
-
- private static int channelConfig = AudioFormat.CHANNEL_IN_STEREO;
-
- private static int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
-
- private int bufferSizeInBytes = 0;
- private Button Start;
- private Button Stop;
- private AudioRecord audioRecord;
- private boolean isRecord = false;
-
- private static final String AudioName = "/sdcard/love.raw";
-
- private static final String NewAudioName = "/sdcard/new.wav";
-
- public void onCreate(Bundle savedInstanceState) {
- super.onCreate(savedInstanceState);
- getWindow().setFormat(PixelFormat.TRANSLUCENT);
- requestWindowFeature(Window.FEATURE_NO_TITLE);
- getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN,
- WindowManager.LayoutParams.FLAG_FULLSCREEN);
-
- setContentView(R.layout.main);
- init();
- }
-
- private void init() {
- Start = (Button) this.findViewById(R.id.start);
- Stop = (Button) this.findViewById(R.id.stop);
- Start.setOnClickListener(new TestAudioListener());
- Stop.setOnClickListener(new TestAudioListener());
- creatAudioRecord();
- }
-
- private void creatAudioRecord() {
-
- bufferSizeInBytes = AudioRecord.getMinBufferSize(sampleRateInHz,
- channelConfig, audioFormat);
-
- audioRecord = new AudioRecord(audioSource, sampleRateInHz,
- channelConfig, audioFormat, bufferSizeInBytes);
- }
-
- class TestAudioListener implements OnClickListener {
-
- @Override
- public void onClick(View v) {
- if (v == Start) {
- startRecord();
- }
- if (v == Stop) {
- stopRecord();
- }
-
- }
-
- }
-
- private void startRecord() {
- audioRecord.startRecording();
-
- isRecord = true;
-
- new Thread(new AudioRecordThread()).start();
- }
-
- private void stopRecord() {
- close();
- }
-
- private void close() {
- if (audioRecord != null) {
- System.out.println("stopRecord");
- isRecord = false;
- audioRecord.stop();
- audioRecord.release();
- audioRecord = null;
- }
- }
-
- class AudioRecordThread implements Runnable {
- @Override
- public void run() {
- writeDateTOFile();
- copyWaveFile(AudioName, NewAudioName);
- }
- }
-
-
-
-
-
-
- private void writeDateTOFile() {
-
- byte[] audiodata = new byte[bufferSizeInBytes];
- FileOutputStream fos = null;
- int readsize = 0;
- try {
- File file = new File(AudioName);
- if (file.exists()) {
- file.delete();
- }
- fos = new FileOutputStream(file);
- } catch (Exception e) {
- e.printStackTrace();
- }
- while (isRecord == true) {
- readsize = audioRecord.read(audiodata, 0, bufferSizeInBytes);
- if (AudioRecord.ERROR_INVALID_OPERATION != readsize) {
- try {
- fos.write(audiodata);
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
- }
- try {
- fos.close();
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
-
-
- private void copyWaveFile(String inFilename, String outFilename) {
- FileInputStream in = null;
- FileOutputStream out = null;
- long totalAudioLen = 0;
- long totalDataLen = totalAudioLen + 36;
- long longSampleRate = sampleRateInHz;
- int channels = 2;
- long byteRate = 16 * sampleRateInHz * channels / 8;
- byte[] data = new byte[bufferSizeInBytes];
- try {
- in = new FileInputStream(inFilename);
- out = new FileOutputStream(outFilename);
- totalAudioLen = in.getChannel().size();
- totalDataLen = totalAudioLen + 36;
- WriteWaveFileHeader(out, totalAudioLen, totalDataLen,
- longSampleRate, channels, byteRate);
- while (in.read(data) != -1) {
- out.write(data);
- }
- in.close();
- out.close();
- } catch (FileNotFoundException e) {
- e.printStackTrace();
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
-
-
-
-
-
-
-
- private void WriteWaveFileHeader(FileOutputStream out, long totalAudioLen,
- long totalDataLen, long longSampleRate, int channels, long byteRate)
- throws IOException {
- byte[] header = new byte[44];
- header[0] = 'R';
- header[1] = 'I';
- header[2] = 'F';
- header[3] = 'F';
- header[4] = (byte) (totalDataLen & 0xff);
- header[5] = (byte) ((totalDataLen >> 8) & 0xff);
- header[6] = (byte) ((totalDataLen >> 16) & 0xff);
- header[7] = (byte) ((totalDataLen >> 24) & 0xff);
- header[8] = 'W';
- header[9] = 'A';
- header[10] = 'V';
- header[11] = 'E';
- header[12] = 'f';
- header[13] = 'm';
- header[14] = 't';
- header[15] = ' ';
- header[16] = 16;
- header[17] = 0;
- header[18] = 0;
- header[19] = 0;
- header[20] = 1;
- header[21] = 0;
- header[22] = (byte) channels;
- header[23] = 0;
- header[24] = (byte) (longSampleRate & 0xff);
- header[25] = (byte) ((longSampleRate >> 8) & 0xff);
- header[26] = (byte) ((longSampleRate >> 16) & 0xff);
- header[27] = (byte) ((longSampleRate >> 24) & 0xff);
- header[28] = (byte) (byteRate & 0xff);
- header[29] = (byte) ((byteRate >> 8) & 0xff);
- header[30] = (byte) ((byteRate >> 16) & 0xff);
- header[31] = (byte) ((byteRate >> 24) & 0xff);
- header[32] = (byte) (2 * 16 / 8);
- header[33] = 0;
- header[34] = 16;
- header[35] = 0;
- header[36] = 'd';
- header[37] = 'a';
- header[38] = 't';
- header[39] = 'a';
- header[40] = (byte) (totalAudioLen & 0xff);
- header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
- header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
- header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
- out.write(header, 0, 44);
- }
-
- @Override
- protected void onDestroy() {
- close();
- super.onDestroy();
- }
- }
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AudioRecord和AudioTrack类是Android获取和播放音频流的重要类,放置在android.media包中。与该包中的MediaRecorder和MediaPlayer类不同,AudioRecord和AudioTrack类在获取和播放音频数据流时无需通过文件保存和文件读取,可以动态地直接获取和播放音频流,在实时处理音频数据流时非常有用。
当然,如果用户只想录音后写入文件或从文件中取得音频流进行播放,那么直接使用MediaRecorder和MediaPlayer类是首选方案,因为这两个类使用非常方便,而且成功率很高。而AudioRecord和AudioTrack类的使用却比较复杂,我们发现很多人都不能成功地使用这两个类,甚至认为Android的这两个类是不能工作的。
其实,AudioRecord和AudioTrack类的使用虽然比较复杂,但是可以工作,我们不仅可以很好地使用了这两个类,而且还通过套接字(Socket)实现了音频数据的网络传输,做到了一端使用AudioRecord获取音频流然后通过套接字传输出去,而另一端通过套接字接收后使用AudioTrack类播放。
下面是我们对AudioRecord和AudioTrack类在使用方面的经验总结:
(1)创建AudioRecord和AudioTrack类对象:创建这两个类的对象比较复杂,通过对文档的反复和仔细理解,并通过多次失败的尝试,并在北理工的某个Android大牛的网上的文章启发下,我们也最终成功地创建了这两个类的对象。创建AudioRecord和AudioTrack类对象的代码如下:
AudioRecord类:
m_in_buf_size =AudioRecord.getMinBufferSize(8000,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
m_in_rec = new AudioRecord(MediaRecorder.AudioSource.MIC,
8000,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT,
m_in_buf_size) ;
AudioTrack类:
m_out_buf_size = android.media.AudioTrack.getMinBufferSize(8000,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
m_out_trk = new AudioTrack(AudioManager.STREAM_MUSIC, 8000,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT,
m_out_buf_size,
AudioTrack.MODE_STREAM);
(2)关于AudioRecord和AudioTrack类的监听函数,不用也行。
(3)调试方面,包括初始化后看logcat信息,以确定类的工作状态,初始化是否成功等。
编写好代码,没有语法错误,调用模拟器运行、调试代码时,logcat发挥了很好的功用。刚调试时,经常会出现模拟器显示出现异常,这时我们可以在代码的一些关键语句后添加如Log.d("test1","OK");这样的语句进行标识,出现异常时我们就可以在logcat窗口观察代码执行到哪里出现异常,然后进行相应的修改、调试。模拟器不会出现异常时,又遇到了录放音的问题。录音方面,刚开始选择将语音编码数据存放在多个固定大小的文件中进行传送,但是这种情况下会出现声音断续的现象,而且要反复的建立文件,比较麻烦,后来想到要进行网上传输,直接将语音编码数据以数据流的形式传送,经过验证,这种方法可行并且使代码更加简洁。放音方面,将接收到的数据流存放在一个数组中,然后将数组中数据写到AudioTrack中。刚开始只是“嘟”几声,经过检查发现只是把数据写一次,加入循环,让数据反复写到AudioTrack中,就可以听到正常的语音了。接下来的工作主要是改善话音质量与话音延迟,在进行通话的过程中,观察logcat窗口,发现向数组中写数据时会出现Bufferflow的情况,于是把重心转移到数组大小的影响上,经过试验,发现 AudioRecord一次会读640个数据,然后就对录音和放音中有数组的地方进行实验修改。AudioRecord和AudioTrack进行实例化时,参数中各有一个数组大小,经过试验这个数组大小和AudioRecord和AudioTrack能正常实例化所需的最小Buffer大小(即上面实例化时的m_in_buf_size和m_out_buf_size参数)相等且服务器方进行缓存数据的数组尺寸是上述数值的2倍时,语音质量最好。由于录音和放音的速度不一致,受到北理工大牛的启发,在录音方面,将存放录音数据的数组放到LinkedList中,当LinkedList中数组个数达到2(这个也是经过试验验证话音质量最好时的数据)时,将先录好的数组中数据传送出去。经过上述反复试验和修改,最终使双方通话质量较好,且延时较短(大概有2秒钟)。
(4)通过套接字传输和接收数据
数据传送部分,使用的是套接字。通信双方,通过不同的端口向服务器发送请求,与服务器连接上后,开始通话向服务器发送数据,服务器通过一个套接字接收到一方的数据后,先存在一个数组中,然后将该数组中数据以数据流的形式再通过另一个套接字传送到另一方。这样就实现了双方数据的传送。
(5)代码架构
为避免反复录入和读取数据占用较多资源,使程序在进行录放音时不能执行其他命令,故将录音和放音各写成一个线程类,然后在主程序中,通过MENU控制通话的开始、停止、结束。
最后说明,AudioRecord和AudioTrack类可以用,只是稍微复杂些。以下贴出双方通信的源码,希望对大家有所帮助:
主程序Daudioclient:
package cn.Daudioclient;
import android.app.Activity;
import android.os.Bundle;
import android.view.Menu;
import android.view.MenuItem;
public class Daudioclient extends Activity {
public static final int MENU_START_ID = Menu.FIRST ;
public static final int MENU_STOP_ID = Menu.FIRST + 1 ;
public static final int MENU_EXIT_ID = Menu.FIRST + 2 ;
protected Saudioserver m_player ;
protected Saudioclient m_recorder ;
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.main);
}
public boolean onCreateOptionsMenu(Menu aMenu)
{
boolean res = super.onCreateOptionsMenu(aMenu) ;
aMenu.add(0, MENU_START_ID, 0, "START") ;
aMenu.add(0, MENU_STOP_ID, 0, "STOP") ;
aMenu.add(0, MENU_EXIT_ID, 0, "EXIT") ;
return res ;
}
public boolean onOptionsItemSelected(MenuItem aMenuItem)
{
switch (aMenuItem.getItemId()) {
case MENU_START_ID:
{
m_player = new Saudioserver() ;
m_recorder = new Saudioclient() ;
m_player.init() ;
m_recorder.init() ;
m_recorder.start() ;
m_player.start() ;
}
break ;
case MENU_STOP_ID:
{
m_recorder.free() ;
m_player.free() ;
m_player = null ;
m_recorder = null ;
}
break ;
case MENU_EXIT_ID:
{
int pid = android.os.Process.myPid() ;
android.os.Process.killProcess(pid) ;
}
break ;
default:
break ;
}
return super.onOptionsItemSelected(aMenuItem);
}
}
录音程序Saudioclient:
package cn.Daudioclient;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.Socket;
import java.net.UnknownHostException;
import java.util.LinkedList;
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.MediaRecorder;
import android.util.Log;
public class Saudioclient extends Thread
{
protected AudioRecord m_in_rec ;
protected int m_in_buf_size ;
protected byte [] m_in_bytes ;
protected boolean m_keep_running ;
protected Socket s;
protected DataOutputStream dout;
protected LinkedList<byte[]> m_in_q ;
public void run()
{
try
{
byte [] bytes_pkg ;
m_in_rec.startRecording() ;
while(m_keep_running)
{
m_in_rec.read(m_in_bytes, 0, m_in_buf_size) ;
bytes_pkg = m_in_bytes.clone() ;
if(m_in_q.size() >= 2)
{
dout.write(m_in_q.removeFirst() , 0, m_in_q.removeFirst() .length);
}
m_in_q.add(bytes_pkg) ;
}
m_in_rec.stop() ;
m_in_rec = null ;
m_in_bytes = null ;
dout.close();
}
catch(Exception e)
{
e.printStackTrace();
}
}
public void init()
{
m_in_buf_size = AudioRecord.getMinBufferSize(8000,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
m_in_rec = new AudioRecord(MediaRecorder.AudioSource.MIC,
8000,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT,
m_in_buf_size) ;
m_in_bytes = new byte [m_in_buf_size] ;
m_keep_running = true ;
m_in_q=new LinkedList<byte[]>();
try
{
s=new Socket("192.168.1.100",4332);
dout=new DataOutputStream(s.getOutputStream());
//new Thread(R1).start();
}
catch (UnknownHostException e)
{
// TODO Auto-generated catch block
e.printStackTrace();
}
catch (IOException e)
{
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public void free()
{
m_keep_running = false ;
try {
Thread.sleep(1000) ;
} catch(Exception e) {
Log.d("sleep exceptions...\n","") ;
}
}
}
放音程序Saudioserver:
package cn.Daudioclient;
import java.io.DataInputStream;
import java.io.IOException;
import java.net.Socket;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioTrack;
import android.util.Log;
public class Saudioserver extends Thread
{
protected AudioTrack m_out_trk ;
protected int m_out_buf_size ;
protected byte [] m_out_bytes ;
protected boolean m_keep_running ;
private Socket s;
private DataInputStream din;
public void init()
{
try
{
s=new Socket("192.168.1.100",4331);
din=new DataInputStream(s.getInputStream());
m_keep_running = true ;
m_out_buf_size = AudioTrack.getMinBufferSize(8000,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
m_out_trk = new AudioTrack(AudioManager.STREAM_MUSIC, 8000,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT,
m_out_buf_size,
AudioTrack.MODE_STREAM);
m_out_bytes=new byte[m_out_buf_size];
// new Thread(R1).start();
}
catch(Exception e)
{
e.printStackTrace();
}
}
public void free()
{
m_keep_running = false ;
try {
Thread.sleep(1000) ;
} catch(Exception e) {
Log.d("sleep exceptions...\n","") ;
}
}
public void run()
{
byte [] bytes_pkg = null ;
m_out_trk.play() ;
while(m_keep_running) {
try
{
din.read(m_out_bytes);
bytes_pkg = m_out_bytes.clone() ;
m_out_trk.write(bytes_pkg, 0, bytes_pkg.length) ;
}
catch(Exception e)
{
e.printStackTrace();
}
}
m_out_trk.stop() ;
m_out_trk = null ;
try {
din.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++