Android 开发webrtc的切换前后摄像头、视频互转语音、静音切换整理



import android.content.Context;
import android.util.Log;

import org.webrtc.AudioSource;
import org.webrtc.AudioTrack;
import org.webrtc.Camera1Enumerator;
import org.webrtc.Camera2Enumerator;
import org.webrtc.CameraEnumerator;
import org.webrtc.CameraVideoCapturer;
import org.webrtc.EglBase;
import org.webrtc.MediaConstraints;
import org.webrtc.MediaStream;
import org.webrtc.PeerConnectionFactory;
import org.webrtc.SurfaceTextureHelper;
import org.webrtc.SurfaceViewRenderer;
import org.webrtc.VideoSource;
import org.webrtc.VideoTrack;
import org.webrtc.voiceengine.WebRtcAudioUtils;

/**
 * 类来处理本地媒体捕获
 */
public class MediaCapturer {
    private static final String TAG = "MediaCapturer";

    private static final String MEDIA_STREAM_ID = "ARDAMS";
    private static final String VIDEO_TRACK_ID = "ARDAMSv0";
    private static final String AUDIO_TRACK_ID = "ARDAMSa0";

    private CameraVideoCapturer mCameraVideoCapturer;
    private final PeerConnectionFactory mPeerConnectionFactory;
    private final MediaStream mMediaStream;

    public MediaCapturer() {
        mPeerConnectionFactory = PeerConnectionFactory.builder().createPeerConnectionFactory();
        mMediaStream = mPeerConnectionFactory.createLocalMediaStream(MEDIA_STREAM_ID);

    }

    /**
     * 切换前后摄像头
     */
    public void changeVideoCapturer() {
        CameraVideoCapturer cameraVideoCapturer = mCameraVideoCapturer;
        cameraVideoCapturer.switchCamera(null);
    }

    /**
     * 关闭通话
     */
    public void closeMediaCapturer() {
        if (mMediaStream != null) {
            mMediaStream.dispose();
        }
        if (mCameraVideoCapturer != null) {
            mCameraVideoCapturer.dispose();
        }
    }

    /**
     * 视频转语音
     */
    public void setVideoOrVoice(boolean video) {
        if (video) {
            VideoTrack currentTrack = mMediaStream.videoTracks.get(0);
            currentTrack.setEnabled(false);
        } else {
            VideoTrack currentTrack = mMediaStream.videoTracks.get(0);
            currentTrack.setEnabled(true);
        }
    }

    /**
     * 静音切换
     */
    public void setVoice(boolean voice) {
        if (voice) {
            AudioTrack currentTrack = mMediaStream.audioTracks.get(0);
            currentTrack.setEnabled(false);
        } else {
            AudioTrack currentTrack = mMediaStream.audioTracks.get(0);
            currentTrack.setEnabled(true);
        }
    }

    /**
     * 初始化本地摄像机
     *
     * @param context Context
     * @throws Exception Failed to get camera device
     */
    public void initCamera(Context context)
            throws Exception {
        boolean isCamera2Supported = Camera2Enumerator.isSupported(context);

        CameraEnumerator cameraEnumerator;
        if (isCamera2Supported) {
            cameraEnumerator = new Camera2Enumerator(context);
        } else {
            cameraEnumerator = new Camera1Enumerator();
        }

        final String[] deviceNames = cameraEnumerator.getDeviceNames();

        for (String deviceName : deviceNames) {
            // 现在就去拿前面的摄像机
            if (cameraEnumerator.isFrontFacing(deviceName)) {
                mCameraVideoCapturer = cameraEnumerator.createCapturer(deviceName, new MediaCapturerEventHandler());

                Log.d(TAG, "created camera video capturer deviceName=" + deviceName);
            }
        }

        if (mCameraVideoCapturer == null) {
            throw new Exception("Failed to get Camera Device");
        }
    }

    /**
     * 从摄像机捕捉器创建本地视频轨迹
     *
     * @param context        Context
     * @param localVideoView Local Video View
     * @param eglBaseContext EGL Context
     * @return VideoTrack
     */
    public VideoTrack createVideoTrack(Context context, SurfaceViewRenderer localVideoView, EglBase.Context eglBaseContext) {
        if (mCameraVideoCapturer == null) {
            throw new IllegalStateException("Camera must be initialized");
        }

        SurfaceTextureHelper surfaceTextureHelper = SurfaceTextureHelper.create("CaptureThread", eglBaseContext);
        VideoSource videoSource = mPeerConnectionFactory.createVideoSource(false);

        mCameraVideoCapturer.initialize(surfaceTextureHelper, context, videoSource.getCapturerObserver());

        // Capture 640x480 @ 30fps
        mCameraVideoCapturer.startCapture(640, 480, 30);

        VideoTrack videoTrack = mPeerConnectionFactory.createVideoTrack(VIDEO_TRACK_ID, videoSource);
        videoTrack.setEnabled(true);
        localVideoView.setMirror(true);
        localVideoView.setEnableHardwareScaler(true);
        mMediaStream.addTrack(videoTrack);

        videoTrack.addSink(localVideoView);

        return videoTrack;
    }

    /**
     * Create local audio track
     *
     * @return AudioTrack
     */
    public AudioTrack createAudioTrack() {
        AudioSource audioSource = mPeerConnectionFactory.createAudioSource(new MediaConstraints());
        WebRtcAudioUtils.setWebRtcBasedAcousticEchoCanceler(true);
        WebRtcAudioUtils.setWebRtcBasedNoiseSuppressor(true);

        AudioTrack audioTrack = mPeerConnectionFactory.createAudioTrack(AUDIO_TRACK_ID, audioSource);
        audioTrack.setEnabled(true);
        mMediaStream.addTrack(audioTrack);

        return audioTrack;
    }

    private class MediaCapturerEventHandler implements CameraVideoCapturer.CameraEventsHandler {
        @Override
        public void onCameraOpening(String s) {
            Log.d(TAG, "onCameraOpening s=" + s);
        }

        @Override
        public void onFirstFrameAvailable() {
            Log.d(TAG, "onFirstFrameAvailable");
        }

        @Override
        public void onCameraFreezed(String s) {
            Log.d(TAG, "onCameraFreezed s=" + s);
        }

        @Override
        public void onCameraError(String s) {
            Log.e(TAG, "onCameraError s=" + s);
        }

        @Override
        public void onCameraDisconnected() {
            Log.d(TAG, "onCameraDisconnected");
        }

        @Override
        public void onCameraClosed() {
            Log.d(TAG, "onCameraClosed");
        }
    }
}

 

  • 0
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
要在WebRTC切换手机摄像头,你可以使用以下JavaScript代码: ```javascript // 获取视频轨道对象 const videoTrack = localStream.getVideoTracks()[0]; // 获取所有可用的摄像头设备 navigator.mediaDevices.enumerateDevices() .then(devices => { // 找到摄像头设备 const cameraDevices = devices.filter(device => device.kind === 'videoinput'); // 切换摄像头 if (cameraDevices.length > 1) { const currentDeviceId = videoTrack.getSettings().deviceId; const nextDeviceId = cameraDevices.find(device => device.deviceId !== currentDeviceId).deviceId; // 使用新的设备来替换视频轨道对象 const constraints = { video: { deviceId: { exact: nextDeviceId } } }; return navigator.mediaDevices.getUserMedia(constraints); } else { throw new Error('没有找到多个摄像头设备'); } }) .then(newStream => { // 替换本地流对象 localStream.removeTrack(videoTrack); localStream.addTrack(newStream.getVideoTracks()[0]); // 更新视频元素的源 videoElement.srcObject = localStream; }) .catch(error => { console.error('切换摄像头时发生错误:', error); }); ``` 上述代码假设你已经获取了本地流(`localStream`)和展示视频的元素(`videoElement`)。它会先通过`enumerateDevices`方法获取所有可用的设备,然后根据当前摄像头设备ID找到下一个可用的摄像头设备ID。最后,通过使用新的设备来替换视频轨道对象,并更新视频元素的源。 请注意,此代码仅适用于支持`getUserMedia`和多个摄像头设备的现代浏览器。
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值