1.首先开createPeerConnectionFactory只调用一次
public void createPeerConnectionFactory(PeerConnectionFactory.Options options) {
if (factory != null) {
throw new IllegalStateException("PeerConnectionFactory has already been constructed");
}//如果没有创建PeerConnectionFactory,就执行以下语句创建
executor.execute(() -> createPeerConnectionFactoryInternal(options));
}
//实现createPeerConnectionFactory具体内容
private void createPeerConnectionFactoryInternal(PeerConnectionFactory.Options options) {
isError = false;
if (peerConnectionParameters.tracing) {
PeerConnectionFactory.startInternalTracingCapture(
Environment.getExternalStorageDirectory().getAbsolutePath() + File.separator
+ "webrtc-trace.txt");
}
// Check if ISAC is used by default.
preferIsac = peerConnectionParameters.audioCodec != null
&& peerConnectionParameters.audioCodec.equals(AUDIO_CODEC_ISAC);
// It is possible to save a copy in raw PCM format on a file by checking
// the "Save input audio to file" checkbox in the Settings UI. A callback
// interface is set when this flag is enabled. As a result, a copy of recorded
// audio samples are provided to this client directly from the native audio
// layer in Java.
if (peerConnectionParameters.saveInputAudioToFile) {
if (!peerConnectionParameters.useOpenSLES) {
Log.d(TAG, "Enable recording of microphone input audio to file");
saveRecordedAudioToFile = new RecordedAudioToFileController(executor);
} else {
// TODO(henrika): ensure that the UI reflects that if OpenSL ES is selected,
// then the "Save inut audio to file" option shall be grayed out.
Log.e(TAG, "Recording of input audio is not supported for OpenSL ES");
}
}
//创建音频设备模块
final AudioDeviceModule adm = createJavaAudioDevice();
// Create peer connection factory.
if (options != null) {
Log.d(TAG, "Factory networkIgnoreMask option: " + options.networkIgnoreMask);
}
final boolean enableH264HighProfile =
VIDEO_CODEC_H264_HIGH.equals(peerConnectionParameters.videoCodec);
final VideoEncoderFactory encoderFactory;//编码器工厂
final VideoDecoderFactory decoderFactory;//解码器工厂
if (peerConnectionParameters.videoCodecHwAcceleration) {
encoderFactory = new DefaultVideoEncoderFactory(
rootEglBase.getEglBaseContext(), true /* enableIntelVp8Encoder */, enableH264HighProfile);
decoderFactory = new DefaultVideoDecoderFactory(rootEglBase.getEglBaseContext());
} else {
encoderFactory = new SoftwareVideoEncoderFactory();
decoderFactory = new SoftwareVideoDecoderFactory();
}
//peerconnectioFactory工厂配置子工厂
factory = PeerConnectionFactory.builder()
.setOptions(options)
.setAudioDeviceModule(adm)
.setVideoEncoderFactory(encoderFactory)
.setVideoDecoderFactory(decoderFactory)
.createPeerConnectionFactory();
Log.d(TAG, "Peer connection factory created.");
adm.release();
}
2.创建音频额设备ADM,关于ADM执行了一系列操作
//创建音频设备模块
final AudioDeviceModule adm = createJavaAudioDevice();
AudioDeviceModule createJavaAudioDevice() {
// Enable/disable OpenSL ES playback.
if (!peerConnectionParameters.useOpenSLES) {
Log.w(TAG, "External OpenSLES ADM not implemented yet.");
// TODO(magjed): Add support for external OpenSLES ADM.
}
// Set audio record error callbacks. 设置音频记录错误回调。
AudioRecordErrorCallback audioRecordErrorCallback = new AudioRecordErrorCallback() {
@Override
public void onWebRtcAudioRecordInitError(String errorMessage) {
Log.e(TAG, "onWebRtcAudioRecordInitError: " + errorMessage);
reportError(errorMessage);
}
@Override
public void onWebRtcAudioRecordStartError(
JavaAudioDeviceModule.AudioRecordStartErrorCode errorCode, String errorMessage) {
Log.e(TAG, "onWebRtcAudioRecordStartError: " + errorCode + ". " + errorMessage);
reportError(errorMessage);
}
@Override
public void onWebRtcAudioRecordError(String errorMessage) {
Log.e(TAG, "onWebRtcAudioRecordError: " + errorMessage);
reportError(errorMessage);
}
};
AudioTrackErrorCallback audioTrackErrorCallback = new AudioTrackErrorCallback() {
@Override
public void onWebRtcAudioTrackInitError(String errorMessage) {
Log.e(TAG, "onWebRtcAudioTrackInitError: " + errorMessage);
reportError(errorMessage);
}
@Override
public void onWebRtcAudioTrackStartError(
JavaAudioDeviceModule.AudioTrackStartErrorCode errorCode, String errorMessage) {
Log.e(TAG, "onWebRtcAudioTrackStartError: " + errorCode + ". " + errorMessage);
reportError(errorMessage);
}
@Override
public void onWebRtcAudioTrackError(String errorMessage) {
Log.e(TAG, "onWebRtcAudioTrackError: " + errorMessage);
reportError(errorMessage);
}
};
// Set audio record state callbacks. 设置音频记录状态回调。
AudioRecordStateCallback audioRecordStateCallback = new AudioRecordStateCallback() {
@Override
public void onWebRtcAudioRecordStart() {
Log.i(TAG, "Audio recording starts");
}
@Override
public void onWebRtcAudioRecordStop() {
Log.i(TAG, "Audio recording stops");
}
};
// Set audio track state callbacks. 设置音轨状态回调。
AudioTrackStateCallback audioTrackStateCallback = new AudioTrackStateCallback() {
@Override
public void onWebRtcAudioTrackStart() {
Log.i(TAG, "Audio playout starts");
}
@Override
public void onWebRtcAudioTrackStop() {
Log.i(TAG, "Audio playout stops");
}
};
//创建的音频设备模块
return JavaAudioDeviceModule.builder(appContext)
.setSamplesReadyCallback(saveRecordedAudioToFile)
.setUseHardwareAcousticEchoCanceler(!peerConnectionParameters.disableBuiltInAEC)
.setUseHardwareNoiseSuppressor(!peerConnectionParameters.disableBuiltInNS)
.setAudioRecordErrorCallback(audioRecordErrorCallback)
.setAudioTrackErrorCallback(audioTrackErrorCallback)
.setAudioRecordStateCallback(audioRecordStateCallback)
.setAudioTrackStateCallback(audioTrackStateCallback)
.createAudioDeviceModule();//执行构建ADM
}
/**
* Construct an AudioDeviceModule based on the supplied arguments. The caller takes ownership
* and is responsible for calling release().
*/
public AudioDeviceModule createAudioDeviceModule() {
Logging.d(TAG, "createAudioDeviceModule");
if (useHardwareNoiseSuppressor) {
Logging.d(TAG, "HW NS will be used.");
} else {
if (isBuiltInNoiseSuppressorSupported()) {
Logging.d(TAG, "Overriding default behavior; now using WebRTC NS!");
}
Logging.d(TAG, "HW NS will not be used.");
}
if (useHardwareAcousticEchoCanceler) {
Logging.d(TAG, "HW AEC will be used.");
} else {
if (isBuiltInAcousticEchoCancelerSupported()) {
Logging.d(TAG, "Overriding default behavior; now using WebRTC AEC!");
}
Logging.d(TAG, "HW AEC will not be used.");
}
final WebRtcAudioRecord audioInput = new WebRtcAudioRecord(context, audioManager, audioSource,
audioFormat, audioRecordErrorCallback, audioRecordStateCallback, samplesReadyCallback,
useHardwareAcousticEchoCanceler, useHardwareNoiseSuppressor);
final WebRtcAudioTrack audioOutput = new WebRtcAudioTrack(
context, audioManager, audioTrackErrorCallback, audioTrackStateCallback);
return new JavaAudioDeviceModule(context, audioManager, audioInput, audioOutput,//上面构造的内容作为参数
inputSampleRate, outputSampleRate, useStereoInput, useStereoOutput);
}
}