最近一直在尝试android下的音视频通话,觉得有几个任务模块得完成:
1、数据的采集与播放
2、数据的编解码
3、音视频流的实时传输
以下是音频流采集与播放的简单思路,使用AudioRecord采集音频流,使用jlibrtp进行传输,使用AudioTrack播放接收到的音频流。
这里只是初步的实现,还是存在着很大的时延,应该还得实现流的编解码,毕竟直接录制的原始流不适宜进行传输。
欢迎大家指出我这里实现存在的问题
主界面Activity
public class MainActivity extends Activity {
private String TAG = "MainActivity";
private boolean isRecording = false;
private Button btn_record;
private Button btn_record_finish;
private Context context;
// private AudioTrack audioTrack;
private SoundReceiver receiver;
/**
* 音频采样率
*/
private int frequency = 11025;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
context = this;
btn_record = (Button) findViewById(R.id.btn_record);
btn_record_finish = (Button) findViewById(R.id.btn_record_finish);
btn_record.setOnClickListener(clickListener);
btn_record_finish.setOnClickListener(clickListener);
new Thread() {
@Override
public void run() {
receiver = new SoundReceiver(10003, 10004);
}
}.start();
}
@Override
protected void onDestroy() {
super.onDestroy();
receiver.destroySession();
}
private OnClickListener clickListener = new OnClickListener() {
@Override
public void onClick(View v) {
switch (v.getId()) {
case R.id.btn_record:
record();
break;
case R.id.btn_record_finish:
isRecording = false;
break;
}
}
};
public void record() {
new Thread() {
@Override
public void run() {
int channelConfiguration = AudioFormat.CHANNEL_IN_STEREO;
int audioEncoding = AudioFormat.ENCODING_PCM_16BIT;
Participant participant = new Participant("127.0.0.1", 10003, 10004);
SoundSender sender = new SoundSender(10001, 10002, participant);
int bufferSize = AudioRecord.getMinBufferSize(frequency, channelConfiguration, audioEncoding);
AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, frequency, channelConfiguration, audioEncoding, bufferSize);
byte[] buffer = new byte[bufferSize];
try {
audioRecord.startRecording();
// audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, frequency, AudioFormat.CHANNEL_CONFIGURATION_MONO,
// AudioFormat.ENCODING_PCM_16BIT, bufferSize, AudioTrack.MODE_STREAM);
// audioTrack.play();
isRecording = true;
while (isRecording) {
int bufferReadResult = audioRecord.read(buffer, 0, bufferSize);
Log.d(TAG, "send buffer length = " + bufferSize);
sender.startSend(buffer);
}
} catch (Throwable t) {
Log.e("AudioRecord", "Recording Failed");
} finally {
// audioTrack.stop();
// audioTrack.release();
sender.destroySession();
audioRecord.stop();
audioRecord.release();
receiver.destroySession();
}
}
}.start();
}
}
SoundSender类:使用jlibrtp发送音频流数据
public class SoundSender implements RTPAppIntf {
private String TAG = "SoundSender";
private RTPSession rtpSession = null;
public SoundSender(int rtpPort, int rtcpPort, Participant... participants) {
DatagramSocket rtpSocket = null;
DatagramSocket rtcpSocket = null;
try {
rtpSocket = new DatagramSocket(rtpPort);
rtcpSocket = new DatagramSocket(rtcpPort);
} catch (Exception e) {
e.printStackTrace();
System.out.println("RTPSession failed to obtain sender port");
}
rtpSession = new RTPSession(rtpSocket, rtcpSocket);
rtpSession.RTPSessionRegister(this, null, null);
System.out.println("CNAME: " + rtpSession.CNAME());
// 添加要传输的目标地址
for (int i = 0; i < participants.length; i++) {
rtpSession.addParticipant(participants[i]);
}
}
public void startSend(byte[] data) {
rtpSession.sendData(data);
}
public void destroySession(){
rtpSession.endSession();
}
@Override
public void receiveData(DataFrame frame, Participant participant) {
}
@Override
public void userEvent(int type, Participant[] participant) {
switch (type) {
case 1:
Log.d(TAG, TAG + " Bye.");
break;
case 2:
break;
case 3:
break;
case 4:
Log.d(TAG, "SDES packet received");
break;
case 5:
Log.d(TAG, "Matched SSRC to ip-address provided by application");
break;
default:
break;
}
}
@Override
public int frameSize(int payloadType) {
return 1;
}
}
SoundReceiver:接收音频流数据,并使用AudioTrack进行播放
public class SoundReceiver implements RTPAppIntf {
private String TAG = "SoundReceiver";
private RTPSession rtpSession = null;
private AudioTrack audioTrack;
private int bufferSize;
/**
* 音频采样率
*/
private int frequency = 11025;
public SoundReceiver(int rtpPort, int rtcpPort) {
DatagramSocket rtpSocket = null;
DatagramSocket rtcpSocket = null;
try {
rtpSocket = new DatagramSocket(rtpPort);
rtcpSocket = new DatagramSocket(rtcpPort);
} catch (Exception e) {
e.printStackTrace();
System.out.println("RTPSession failed to obtain receiver port");
}
rtpSession = new RTPSession(rtpSocket, rtcpSocket);
rtpSession.naivePktReception(true);
rtpSession.RTPSessionRegister(this, null, null);
bufferSize = AudioTrack.getMinBufferSize(frequency, AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_16BIT);
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, frequency, AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
bufferSize, AudioTrack.MODE_STREAM);
audioTrack.play();
}
public void destroySession() {
rtpSession.endSession();
}
@Override
public void receiveData(DataFrame frame, Participant participant) {
if (audioTrack != null) {
byte[] data = frame.getConcatenatedData();
audioTrack.write(data, 0, data.length);
}
}
@Override
public void userEvent(int type, Participant[] participant) {
switch (type) {
case 1:
Log.d(TAG, TAG + " Bye.");
audioTrack.stop();
audioTrack.release();
break;
case 2:
break;
case 3:
break;
case 4:
Log.d(TAG, "SDES packet received");
break;
case 5:
Log.d(TAG, "Matched SSRC to ip-address provided by application");
break;
default:
break;
}
}
@Override
public int frameSize(int payloadType) {
return 1;
}
}