前言
有个项目需要用到视频技术以及录制音频等,就去研究了一下。当然很多是有个思路之后借助AI的。
图挺复杂的,我就自己画了一个简单点的,也当作一个思路。
自己先建一个vue项目,因为我这里是个demo,所以我router和store都没建。
创建完之后先加载一个socket.io。
npm i socket.io-client
实时视频
首先是加入房间,其实就是弄一个roomId,让两个人都加入之后,服务端发送的时候直接往这个roomid里面的用户发送就行。我这里就都设置为001。
发起邀请
点击发送邀请后会开启本地的视频,生成stream流,然后把video的src设置为这个stream流。
<script setup>
import { ref, onMounted } from 'vue'
import { io } from 'socket.io-client';
const socket = ref(null)
const roomid = '001'
const localStream = ref(null)//本人视频流
const localVideo = ref(null)//播放本人视频的video
const inviter = ref(false)//是否是邀请的人
const Beinviter = ref(false)//是否是被邀请的人
// 获取视频和音频流
const getLocalStream = async () => {
const stream = await navigator.mediaDevices.getUserMedia({
audio: true,
video: { facingMode: "user" }
})
localVideo.value.srcObject = stream
localVideo.value.play()
localStream.value = stream
return stream
}
onMounted(() => {
socket.value = io('http://127.0.0.1:3000', {
transports: ['websocket'], // 指定传输方式,如WebSocket
autoConnect: true, // 是否自动连接
reconnection: true, // 是否自动重新连接
reconnectionAttempts: 3, // 重新连接尝试次数
reconnectionDelay: 1000, // 重新连接延迟时间(毫秒)
})
socket.value.emit('joinroom', roomid)
})
const invite = async () => {
inviter.value = true
await getLocalStream()
// getOffice()
socket.value.emit('invite', roomid)
}
</script>
<template>
<div style="display: flex;text-align: center;">
<div class="box" style="width: 200px; height: 300px; border: 1px solid black; margin: 20px auto;">
<h2>自己的</h2>
<video ref="localVideo" style="width: 100%; height: 100%;" autoplay muted playsinline></video>
</div>
<div class="box" style="width: 200px; height: 300px; border: 1px solid black; margin: 20px auto;">
<h2>对方的</h2>
<video ref="remoteVideo" style="width: 100%; height: 100%;" autoplay muted playsinline></video>
</div>
<div class="box" style="width: 200px; height: 300px; border: 1px solid black; margin: 20px auto;">
<h2>录制的</h2>
<video ref="recorderVideo" style="width: 100%; height: 100%;" controls></video>
</div>
<div class="box" style="width: 200px; height: 50px; border: 1px solid black; margin: 20px auto;">
<h2>录制的音频</h2>
<audio ref="recorderAudio" controls></audio>
</div>
</div>
<div style="width: 100%;
height: 300px;"></div>
<button @click="invite">发起</button>
<button @click="agree">接受</button>
<button @click="beginRecorder">开启录制</button>
<button @click="overRecorder">结束录制</button>
<button @click="beginRecorderAudio">只录制音频</button>
<button @click="overRecorderAudio">结束录制音频</button>
<button>挂断</button>
</template>
<style scoped>
</style>
搭建一个服务端来转发。
const express = require('express');
const http = require('http');
const socketIo = require('socket.io');
const cors = require('cors');
const app = express();
const server = http.createServer(app);
const io = socketIo(server);
let connectedUsers = {};
const PORT = process.env.PORT || 3000;
let index=0
// 允许来自所有来源的跨域请求
app.use(cors());
io.on('connection', (socket) => {
socket.emit('connectsuccess', socket.id)
// 存储连接的用户
connectedUsers[socket.id] = socket;
socket.on('disconnect', () => {
console.log('A user disconnected');
});
socket.on('joinroom', value => {
socket.join(value)
})
socket.on('invite', roomid => {
socket.to(roomid).emit('callRemote')
})
});
server.listen(PORT, () => {
console.log(`Server is running on port ${PORT}`);
});
然后本地的视频就可以了!(后面的再一一实现 )
接收邀请
对方接收到之后,需要首先接收,然后点击接收之后再发送一次给服务端,告诉发起者我同意了。
//接收邀请
socket.value.on('callRemote', () => {
if (!inviter.value) {
Beinviter.value = true
console.log('收到邀请')
}
})
const agree = () => {
socket.value.emit('replyInvite', {
reply: true,
roomid
})
}
同样服务端要转发同样的事件。
//服务端
socket.on('replyInvite', ({ reply, roomid }) => {
console.log(reply)
if (reply) {
socket.to(roomid).emit('otherReply', true)
}
})
邀请者生成offer
邀请者收到对方的同意之后就可以生成offer并发送出去了。
const peer = ref(null)
const offer = ref(null)
peer.value = new RTCPeerConnection()
//收到对方同意邀请
socket.value.on('otherReply', async (value) => {
if (value) {
if (inviter.value) {
await getOffice()//生成offer
console.log('生成offer', offer.value)
socket.value.emit('sendOffer', {
offer: offer.value,
roomid
})
}
}
})
//生成offer
const getOffice = async () => {
if (localStream.value) {
peer.value.addStream(localStream.value)
const offerCreated = await peer.value.createOffer({
offerToReceiveAudio: 1,
offerToReceiveVideo: 1
})
offer.value = offerCreated
await peer.value.setLocalDescription(offerCreated)
// getAnswer()
} else {
console.error('Local stream is not available.')
}
}
同样管理端要转发offer
socket.on('sendOffer', ({ offer, roomid }) => {
console.log(roomid)
socket.to(roomid).emit('otherOffice', offer)
})
被邀请者收到offer。
socket.value.on('otherOffice', async (Office) => {
if (Beinviter.value) {
console.log('收到offer',Office)
}
})
一边点邀请,一边点接收就可以了。
接收者打开本地视频并生成answer
接收者收到offer就可以打开视频并且发送answer了。
socket.value.on('otherOffice', async (Office) => {
if (Beinviter.value) {
offer.value = Office
console.log('收到Office', Office)
//添加本地音视频流
const stream = await getLocalStream()
peer.value.addStream(stream)
//设置远端描述信息SDP
await peer.value.setRemoteDescription(offer.value)
//生成answer
const answer = await peer.value.createAnswer()
console.log('生成answer', answer)
//在本地设置answer信息
await peer.value.setLocalDescription(answer)
//发送answer
socket.value.emit('sendAnswer', { answer: answer, roomid: roomid })
}
})
服务端转发answer
socket.on('sendAnswer', ({ answer, roomid }) => {
socket.to(roomid).emit('otherAnswer', answer)
})
邀请者接收answer
socket.value.on('otherAnswer', async (answer) => {
if (inviter.value) {
console.log('收到answer', answer)
await peer.value.setRemoteDescription(answer)
}
})
互相发送candidate
邀请者是加在收到同意的地方,被邀请者是加在收到offer的地方。
//邀请者
socket.value.on('otherReply', async (value) => {
if (value) {
if (inviter.value) {
await getOffice()//生成offer
console.log(offer.value)
peer.value.onicecandidate = (e) => {//加上这个
if (e.candidate) {
console.log('获取A的candidate信息', e.candidate)
socket.value.emit('sendCandidate', {
candidate: e.candidate,
roomid
})
}
}
socket.value.emit('sendOffer', {
offer: offer.value,
roomid
})
}
}
})
//被邀请者
socket.value.on('otherOffice', async (Office) => {
if (Beinviter.value) {
offer.value = Office
console.log('收到Office', Office)
peer.value.onicecandidate = (e) => {//加上这个
if (e.candidate) {
console.log('获取B的candidate信息', e.candidate)
socket.value.emit('sendCandidate', {
candidate: e.candidate,
roomid
})
}
}
//添加本地音视频流
const stream = await getLocalStream()
peer.value.addStream(stream)
//设置远端描述信息SDP
await peer.value.setRemoteDescription(offer.value)
//生成answer
const answer = await peer.value.createAnswer()
console.log('生成answer', answer)
//在本地设置answer信息
await peer.value.setLocalDescription(answer)
//发送answer
socket.value.emit('sendAnswer', { answer: answer, roomid: roomid })
}
})
服务端转发
socket.on('sendCandidate', ({ candidate, roomid }) => {
socket.to(roomid).emit('othercandidate', candidate)
})
而双方收到后都需要保存,这里不需要分辨邀请者和被邀请者。
socket.value.on('othercandidate', async (candidate) => {
console.log('收到candidate', candidate)
await peer.value.addIceCandidate(candidate)
})
播放对方画面
就在刚才捕获candidate的地方,分别加上播放的代码
const remoteVideo = ref(null)
//邀请者
socket.value.on('otherReply', async (value) => {
if (value) {
if (inviter.value) {
await getOffice()//生成offer
console.log(offer.value)
peer.value.onicecandidate = (e) => {
if (e.candidate) {
console.log('获取A的candidate信息', e.candidate)
socket.value.emit('sendCandidate', {
candidate: e.candidate,
roomid
})
}
}
peer.value.ontrack = (e) => {//加上这个
console.log('用户A获取用户B的', e.streams[0])
remoteVideo.value.srcObject = e.streams[0]
remoteVideo.value.play()
}
socket.value.emit('sendOffer', {
offer: offer.value,
roomid
})
}
}
})
//被邀请者
socket.value.on('otherOffice', async (Office) => {
if (Beinviter.value) {
offer.value = Office
console.log('收到Office', Office)
peer.value.onicecandidate = (e) => {
if (e.candidate) {
console.log('获取B的candidate信息', e.candidate)
socket.value.emit('sendCandidate', {
candidate: e.candidate,
roomid
})
}
}
peer.value.ontrack = (e) => {//加上这个
console.log('用户B获取用户A的', e.streams[0])
remoteVideo.value.srcObject = e.streams[0]
remoteVideo.value.play()
}
//添加本地音视频流
const stream = await getLocalStream()
peer.value.addStream(stream)
//设置远端描述信息SDP
await peer.value.setRemoteDescription(offer.value)
//生成answer
const answer = await peer.value.createAnswer()
console.log('生成answer', answer)
//在本地设置answer信息
await peer.value.setLocalDescription(answer)
//发送answer
socket.value.emit('sendAnswer', { answer: answer, roomid: roomid })
}
})
然后我们就可以互相播放对方的视频了!实现实时视频!
视频录制
const chunks = ref([]) // 存储录制的数据块
const mediaRecorder = ref(null) // 音视频录制器对象
const recorderVideo = ref(null)
然后在获取本地视频流的地方
const getLocalStream = async () => {
const stream = await navigator.mediaDevices.getUserMedia({
audio: true,
video: { facingMode: "user" }
})
localVideo.value.srcObject = stream
localVideo.value.play()
localStream.value = stream
//加上下面的---------------------
// 创建视频录制器对象
mediaRecorder.value = new MediaRecorder(localStream.value)
// 监听视频录制数据事件
mediaRecorder.value.ondataavailable = function (event) {
chunks.value.push(event.data)
}
// 监听视频录制结束事件
mediaRecorder.value.onstop = function () {
// 创建一个包含所有录制数据的 Blob 对象
let recordedBlob = new Blob(chunks.value, { type: mediaRecorder.value.mimeType })
// 创建一个 URL 对象
let recordedUrl = URL.createObjectURL(recordedBlob)
// 将录制的视频 URL 设置到 video 元素上
recorderVideo.value.src = recordedUrl
recorderVideo.value.play()
}
//---------------------
return stream
}
// 开始录制
const beginRecorder = () => {
console.log('开始')
chunks.value = [] // 重置数据块数组
mediaRecorder.value.start()
}
// 停止录制
const overRecorder = () => {
console.log('结束')
mediaRecorder.value.stop()
}
就可以实现视频的录制咯!
只录制音频
在原来的基础上,只要改一些捕获的数据就行
const chunks = ref([]) // 存储录制的数据块
const mediaRecorder = ref(null) // 音视频录制器对象
const recorderVideo = ref(null)
// 获取视频和音频流
const getLocalStream = async () => {
try {
console.log("Requesting user media...")
const stream = await navigator.mediaDevices.getUserMedia({
audio: true,
video: { facingMode: "user" }
})
localVideo.value.srcObject = stream
localVideo.value.play()
localStream.value = stream
// 创建视频录制器对象
mediaRecorder.value = new MediaRecorder(localStream.value)
// 监听视频录制数据事件
mediaRecorder.value.ondataavailable = function (event) {
chunks.value.push(event.data)
}
// 监听视频录制结束事件
mediaRecorder.value.onstop = function () {
// 创建一个包含所有录制数据的 Blob 对象
let recordedBlob = new Blob(chunks.value, { type: mediaRecorder.value.mimeType })
// 创建一个 URL 对象
let recordedUrl = URL.createObjectURL(recordedBlob)
// 将录制的视频 URL 设置到 video 元素上
recorderVideo.value.src = recordedUrl
recorderVideo.value.play()
}
//加上下面------
// 只保留音频轨道
const audioStream = new MediaStream()
stream.getAudioTracks().forEach(track => audioStream.addTrack(track))
// 创建音频录制器对象
mediaRecorder_audio.value = new MediaRecorder(audioStream)
// 监听音频录制数据事件
mediaRecorder_audio.value.ondataavailable = function (event) {
chunks_audio.value.push(event.data)
}
// 监听音频录制结束事件
mediaRecorder_audio.value.onstop = function () {
// 创建一个包含所有录制数据的 Blob 对象
let recordedBlob = new Blob(chunks_audio.value, { type: mediaRecorder_audio.value.mimeType })
// 创建一个 URL 对象
let recordedUrl = URL.createObjectURL(recordedBlob)
// 将录制的音频 URL 设置到 audio 元素上
recorderAudio.value.src = recordedUrl
recorderAudio.value.play()
}
//------
return stream
} catch (error) {
console.error("Error accessing media devices:", error)
}
}
// 开始录制音频
const beginRecorderAudio = () => {
chunks_audio.value = [] // 重置数据块数组
mediaRecorder_audio.value.start()
}
// 停止录制音频
const overRecorderAudio = () => {
mediaRecorder_audio.value.stop()
}
也可以实现音频播放了!
视频上传服务器成为mp4文件
服务端:
npm install multer
npm install ffmpeg fluent-ffmpeg
npm install @ffmpeg-installer/ffmpeg @ffprobe-installer/ffprobe fluent-ffmpeg
const multer = require('multer');
const path = require('path');
const ffmpeg = require('fluent-ffmpeg');
// 提供 'public' 文件夹中的静态文件
app.use(express.static('public'));
// 安装ffmpeg-binaries以避免本地ffmpeg的安装
const ffmpegPath = require('@ffmpeg-installer/ffmpeg').path;
const ffprobePath = require('@ffprobe-installer/ffprobe').path;
ffmpeg.setFfmpegPath(ffmpegPath);
ffmpeg.setFfprobePath(ffprobePath);
// 配置 multer 存储
const storage = multer.diskStorage({
destination: (req, file, cb) => {
cb(null, path.join(__dirname, 'public')); // 将文件保存到 'public' 文件夹
},
filename: (req, file, cb) => {
const ext = path.extname(file.originalname);
const filename = Date.now() + ext; // 使用时间戳避免文件名冲突
cb(null, filename);
},
});
const upload = multer({ storage: storage });
// 处理文件上传的 POST 接口
app.post('/upload', upload.single('file'), (req, res) => {
if (!req.file) {
return res.status(400).send('没有上传文件。');
}
const inputFilePath = path.join(__dirname, 'public', req.file.filename);
const outputFilePath = inputFilePath.replace('.webm', '.mp4');
ffmpeg(inputFilePath)
.output(outputFilePath)
.on('end', () => {
console.log('转换完成:', outputFilePath);
res.status(200).send(`文件上传并转换成功。文件 URL: /${path.basename(outputFilePath)}`);
})
.on('error', (err) => {
console.error('转换出错:', err);
res.status(500).send('文件转换出错。');
})
.run();
});
用户端:
// 上传文件的函数
const uploadFile=(blob, type)=> {
const formData = new FormData();
formData.append('file', blob, `recorded.${type === 'video' ? 'webm' : 'webm'}`); // 根据文件类型设置文件名
fetch('http://localhost:3000/upload', {
method: 'POST',
body: formData,
})
.then(response => response.text())
.then(data => {
console.log('成功:', data);
})
.catch(error => {
console.error('错误:', error);
});
}
然后在原来录制视频的地方改成这个:就可以录制完自动上传并且转化成mp4了。
// 设置视频录制器的 MIME 类型
const videoMimeType = 'video/webm;codecs=vp8';
mediaRecorder.value = new MediaRecorder(localStream.value, { mimeType: videoMimeType });
// // 创建视频录制器对象
// mediaRecorder.value = new MediaRecorder(localStream.value)
// 监听视频录制数据事件
mediaRecorder.value.ondataavailable = function (event) {
chunks.value.push(event.data)
}
// 监听视频录制结束事件
mediaRecorder.value.onstop = function () {
// 创建一个包含所有录制数据的 Blob 对象
let recordedBlob = new Blob(chunks.value, { type: mediaRecorder.value.mimeType })
let recordedBlobTo = new Blob(chunks_audio.value, { type: mediaRecorder_audio.value.mimeType });
uploadFile(recordedBlob, 'video');//上传音频
// 创建一个 URL 对象
let recordedUrl = URL.createObjectURL(recordedBlob)
// 将录制的视频 URL 设置到 video 元素上
recorderVideo.value.src = recordedUrl
recorderVideo.value.play()
}
可以测试一下浏览器是否支持这些格式先:
const supportedTypes = [
'video/webm;codecs=vp9',
'video/webm;codecs=vp8',
'audio/webm;codecs=opus',
'audio/ogg;codecs=opus'
];
supportedTypes.forEach(type => {
console.log(type + " is " + (MediaRecorder.isTypeSupported(type) ? "supported" : "not supported"));
});
我测试是edge和谷歌都
这样保存后public文件夹下会有两个后缀的文件,分别是webm和mp4,因为webm是我们最开始保存的,而mp4是我们转化的,所以我们可以写一个方法来删除webm文件。
function deleteWebmFiles() {
// 读取目录中的所有文件
fs.readdir('public', (err, files) => {
if (err) {
console.error('读取目录失败:', err);
return;
}
// 过滤出所有 .webm 文件
const webmFiles = files.filter(file => path.extname(file).toLowerCase() === '.webm');
// 删除所有 .webm 文件
webmFiles.forEach(file => {
const filePath = path.join('public', file);
fs.unlink(filePath, err => {
if (err) {
console.error(`删除文件失败: ${filePath}`, err);
} else {
console.log(`成功删除文件: ${filePath}`);
}
});
});
});
}
音频上传服务器成为mp3文件
其实和视频差不多
用户端
//上传音频
const uploadAudio = (blob) => {
const formData = new FormData();
formData.append('file', blob, 'recording.webm');
fetch('http://localhost:3000/uploadAudio', {
method: 'POST',
body: formData,
})
.then(response => response.text())
.then(data => {
console.log('成功:', data);
})
.catch(error => {
console.error('错误:', error);
});
}
const audioMimeType = 'audio/webm;codecs=opus';
// 只保留音频轨道
const audioStream = new MediaStream();
stream.getAudioTracks().forEach(track => audioStream.addTrack(track));
// 创建音频录制器对象,并指定 MIME 类型
mediaRecorder_audio.value = new MediaRecorder(audioStream, { mimeType: audioMimeType });
// 监听音频录制数据事件
mediaRecorder_audio.value.ondataavailable = function (event) {
chunks_audio.value.push(event.data);
};
// 监听音频录制结束事件
mediaRecorder_audio.value.onstop = function () {
// 创建一个包含所有录制数据的 Blob 对象
let recordedBlob = new Blob(chunks_audio.value, { type: mediaRecorder_audio.value.mimeType });
// 创建一个 URL 对象
let recordedUrl = URL.createObjectURL(recordedBlob);
// 将录制的音频 URL 设置到 audio 元素上
uploadAudio(recordedBlob);
recorderAudio.value.src = recordedUrl;
recorderAudio.value.play();
};
服务端:
app.post('/uploadAudio', upload.single('file'), (req, res) => {
if (!req.file) {
return res.status(400).send('没有上传文件。');
}
const inputFilePath = path.join(__dirname, 'public', req.file.filename);
const outputFilePath = inputFilePath.replace('.webm', '.mp3');
ffmpeg(inputFilePath)
.output(outputFilePath)
.on('end', () => {
console.log('转换完成:', outputFilePath);
deleteWebmFiles()
res.status(200).send(`文件上传并转换成功。文件 URL: /${path.basename(outputFilePath)}`);
})
.on('error', (err) => {
console.error('转换出错:', err);
res.status(500).send('文件转换出错。');
})
.run();
});
完整代码汇总
用户端
<script setup>
import { ref, onMounted } from 'vue'
import { io } from 'socket.io-client';
const socket = ref(null)
const roomid = '001'
const localStream = ref(null)
const localVideo = ref(null)
const remoteVideo = ref(null)
const peer = ref(null)
const offer = ref(null)
const inviter = ref(false)//是否是邀请的人
const Beinviter = ref(false)//是否是被邀请的人
const chunks = ref([]) // 存储录制的数据块
const mediaRecorder = ref(null) // 音视频录制器对象
const recorderVideo = ref(null)
const recorderAudio = ref(null)
const chunks_audio = ref([]) // 存储录制的数据块
const mediaRecorder_audio = ref(null) // 音频录制器对象
peer.value = new RTCPeerConnection()
// 上传视频的函数
const uploadFileVideo = (blob, type) => {
const formData = new FormData();
formData.append('file', blob, `recorded.${type === 'video' ? 'webm' : 'webm'}`); // 根据文件类型设置文件名
fetch('http://localhost:3000/uploadVideo', {
method: 'POST',
body: formData,
})
.then(response => response.text())
.then(data => {
console.log('成功:', data);
})
.catch(error => {
console.error('错误:', error);
});
}
//上传音频
const uploadAudio = (blob) => {
const formData = new FormData();
formData.append('file', blob, 'recording.webm');
fetch('http://localhost:3000/uploadAudio', {
method: 'POST',
body: formData,
})
.then(response => response.text())
.then(data => {
console.log('成功:', data);
})
.catch(error => {
console.error('错误:', error);
});
}
// 获取视频和音频流
const getLocalStream = async () => {
try {
console.log("Requesting user media...")
const stream = await navigator.mediaDevices.getUserMedia({
audio: true,
video: { facingMode: "user" }
})
localVideo.value.srcObject = stream
localVideo.value.play()
localStream.value = stream
// 设置视频录制器的 MIME 类型
const videoMimeType = 'video/webm;codecs=vp8';
mediaRecorder.value = new MediaRecorder(localStream.value, { mimeType: videoMimeType });
// // 创建视频录制器对象
// mediaRecorder.value = new MediaRecorder(localStream.value)
// 监听视频录制数据事件
mediaRecorder.value.ondataavailable = function (event) {
chunks.value.push(event.data)
}
// 监听视频录制结束事件
mediaRecorder.value.onstop = function () {
// 创建一个包含所有录制数据的 Blob 对象
let recordedBlob = new Blob(chunks.value, { type: mediaRecorder.value.mimeType })
uploadFileVideo(recordedBlob, 'video');//上传音频
// 创建一个 URL 对象
let recordedUrl = URL.createObjectURL(recordedBlob)
// 将录制的视频 URL 设置到 video 元素上
recorderVideo.value.src = recordedUrl
recorderVideo.value.play()
}
const audioMimeType = 'audio/webm;codecs=opus';
// 只保留音频轨道
const audioStream = new MediaStream();
stream.getAudioTracks().forEach(track => audioStream.addTrack(track));
// 创建音频录制器对象,并指定 MIME 类型
mediaRecorder_audio.value = new MediaRecorder(audioStream, { mimeType: audioMimeType });
// 监听音频录制数据事件
mediaRecorder_audio.value.ondataavailable = function (event) {
chunks_audio.value.push(event.data);
};
// 监听音频录制结束事件
mediaRecorder_audio.value.onstop = function () {
// 创建一个包含所有录制数据的 Blob 对象
let recordedBlob = new Blob(chunks_audio.value, { type: mediaRecorder_audio.value.mimeType });
// 创建一个 URL 对象
let recordedUrl = URL.createObjectURL(recordedBlob);
// 将录制的音频 URL 设置到 audio 元素上
uploadAudio(recordedBlob);
recorderAudio.value.src = recordedUrl;
recorderAudio.value.play();
};
return stream
} catch (error) {
console.error("Error accessing media devices:", error)
}
}
// 开始录制音频
const beginRecorderAudio = () => {
chunks_audio.value = [] // 重置数据块数组
mediaRecorder_audio.value.start()
}
// 停止录制音频
const overRecorderAudio = () => {
mediaRecorder_audio.value.stop()
}
// 开始录制
const beginRecorder = () => {
console.log('开始')
chunks.value = [] // 重置数据块数组
mediaRecorder.value.start()
}
// 停止录制
const overRecorder = () => {
console.log('结束')
mediaRecorder.value.stop()
}
const agree = () => {
socket.value.emit('replyInvite', {
reply: true,
roomid
})
}
const getOffice = async () => {
if (localStream.value) {
peer.value.addStream(localStream.value)
const offerCreated = await peer.value.createOffer({
offerToReceiveAudio: 1,
offerToReceiveVideo: 1
})
offer.value = offerCreated
await peer.value.setLocalDescription(offerCreated)
// getAnswer()
} else {
console.error('Local stream is not available.')
}
}
onMounted(() => {
socket.value = io('http://127.0.0.1:3000', {
transports: ['websocket'], // 指定传输方式,如WebSocket
autoConnect: true, // 是否自动连接
reconnection: true, // 是否自动重新连接
reconnectionAttempts: 3, // 重新连接尝试次数
reconnectionDelay: 1000, // 重新连接延迟时间(毫秒)
})
socket.value.emit('joinroom', roomid)
//接收邀请
socket.value.on('callRemote', () => {
if (!inviter.value) {
Beinviter.value = true
console.log('收到邀请')
}
})
socket.value.on('otherReply', async (value) => {
if (value) {
if (inviter.value) {
await getOffice()//生成offer
console.log(offer.value)
peer.value.onicecandidate = (e) => {
if (e.candidate) {
console.log('获取A的candidate信息', e.candidate)
socket.value.emit('sendCandidate', {
candidate: e.candidate,
roomid
})
}
}
peer.value.ontrack = (e) => {
console.log('用户A获取用户B的', e.streams[0])
remoteVideo.value.srcObject = e.streams[0]
remoteVideo.value.play()
}
socket.value.emit('sendOffer', {
offer: offer.value,
roomid
})
}
}
})
socket.value.on('otherOffice', async (Office) => {
if (Beinviter.value) {
offer.value = Office
console.log('收到Office', Office)
// peer.value = new RTCPeerConnection()
peer.value.onicecandidate = (e) => {
if (e.candidate) {
console.log('获取B的candidate信息', e.candidate)
socket.value.emit('sendCandidate', {
candidate: e.candidate,
roomid
})
}
}
peer.value.ontrack = (e) => {
console.log('用户B获取用户A的', e.streams[0])
remoteVideo.value.srcObject = e.streams[0]
remoteVideo.value.play()
}
//添加本地音视频流
const stream = await getLocalStream()
peer.value.addStream(stream)
//设置远端描述信息SDP
await peer.value.setRemoteDescription(offer.value)
//生成answer
const answer = await peer.value.createAnswer()
console.log('生成answer', answer)
//在本地设置answer信息
await peer.value.setLocalDescription(answer)
//发送answer
socket.value.emit('sendAnswer', { answer: answer, roomid: roomid })
}
})
socket.value.on('otherAnswer', async (answer) => {
if (inviter.value) {
console.log('收到answer', answer)
await peer.value.setRemoteDescription(answer)
}
})
socket.value.on('othercandidate', async (candidate) => {
// if (Beinviter.value) {
// console.log('收到candidate', candidate)
// }
console.log('收到candidate', candidate)
await peer.value.addIceCandidate(candidate)
})
})
const invite = async () => {
inviter.value = true
await getLocalStream()
// getOffice()
console.log('发起邀请')
socket.value.emit('invite', roomid)
}
</script>
<template>
<div style="display: flex;text-align: center;">
<div class="box" style="width: 200px; height: 300px; border: 1px solid black; margin: 20px auto;">
<h2>自己的</h2>
<video ref="localVideo" style="width: 100%; height: 100%;" autoplay muted playsinline></video>
</div>
<div class="box" style="width: 200px; height: 300px; border: 1px solid black; margin: 20px auto;">
<h2>对方的</h2>
<video ref="remoteVideo" style="width: 100%; height: 100%;" autoplay muted playsinline></video>
</div>
<div class="box" style="width: 200px; height: 300px; border: 1px solid black; margin: 20px auto;">
<h2>录制的</h2>
<video ref="recorderVideo" style="width: 100%; height: 100%;" controls></video>
</div>
<div class="box" style="width: 200px; height: 50px; border: 1px solid black; margin: 20px auto;">
<h2>录制的音频</h2>
<audio ref="recorderAudio" controls></audio>
</div>
</div>
<div style="width: 100%;
height: 300px;"></div>
<button @click="invite">发起</button>
<button @click="agree">接受</button>
<button @click="beginRecorder">开启录制</button>
<button @click="overRecorder">结束录制</button>
<button @click="beginRecorderAudio">只录制音频</button>
<button @click="overRecorderAudio">结束录制音频</button>
<button>挂断</button>
</template>
<style scoped></style>
服务端
const express = require('express');
const http = require('http');
const socketIo = require('socket.io');
const fs = require('fs');
const path = require('path');
const multer = require('multer');
const ffmpeg = require('fluent-ffmpeg');
// 安装ffmpeg-binaries以避免本地ffmpeg的安装
const ffmpegPath = require('@ffmpeg-installer/ffmpeg').path;
const ffprobePath = require('@ffprobe-installer/ffprobe').path;
ffmpeg.setFfmpegPath(ffmpegPath);
ffmpeg.setFfprobePath(ffprobePath);
const app = express();
const server = http.createServer(app);
const io = socketIo(server);
let connectedUsers = {};
const PORT = process.env.PORT || 3000;
let index = 0
app.all('*', function (req, res, next) {
res.header('Access-Control-Allow-Origin', '*');
res.header('Access-Control-Allow-Headers', 'Content-Type');
res.header('Access-Control-Allow-Methods', '*');
res.header('Content-Type', 'application/json;charset=utf-8');
next();
});
app.use((req, res, next) => {
res.setHeader('Access-Control-Allow-Origin', '*');
res.setHeader('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE');
res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization, token');
next();
});
// 配置 multer 存储
const storage = multer.diskStorage({
destination: (req, file, cb) => {
cb(null, path.join(__dirname, 'public')); // 将文件保存到 'public' 文件夹
},
filename: (req, file, cb) => {
const ext = path.extname(file.originalname);
const filename = Date.now() + ext; // 使用时间戳避免文件名冲突
cb(null, filename);
},
});
const upload = multer({ storage: storage });
function deleteWebmFiles() {
// 读取目录中的所有文件
fs.readdir('public', (err, files) => {
if (err) {
console.error('读取目录失败:', err);
return;
}
// 过滤出所有 .webm 文件
const webmFiles = files.filter(file => path.extname(file).toLowerCase() === '.webm');
// 删除所有 .webm 文件
webmFiles.forEach(file => {
const filePath = path.join('public', file);
fs.unlink(filePath, err => {
if (err) {
console.error(`删除文件失败: ${filePath}`, err);
} else {
console.log(`成功删除文件: ${filePath}`);
}
});
});
});
}
// 提供 'public' 文件夹中的静态文件
app.use(express.static('public'));
// 处理文件上传的 POST 接口
app.post('/uploadVideo', upload.single('file'), (req, res) => {
if (!req.file) {
return res.status(400).send('没有上传文件。');
}
const inputFilePath = path.join(__dirname, 'public', req.file.filename);
const outputFilePath = inputFilePath.replace('.webm', '.mp4');
ffmpeg(inputFilePath)
.output(outputFilePath)
.on('end', () => {
console.log('转换完成:', outputFilePath);
deleteWebmFiles()
res.status(200).send(`文件上传并转换成功。文件 URL: /${path.basename(outputFilePath)}`);
})
.on('error', (err) => {
console.error('转换出错:', err);
res.status(500).send('文件转换出错。');
})
.run();
});
app.post('/uploadAudio', upload.single('file'), (req, res) => {
if (!req.file) {
return res.status(400).send('没有上传文件。');
}
const inputFilePath = path.join(__dirname, 'public', req.file.filename);
const outputFilePath = inputFilePath.replace('.webm', '.mp3');
ffmpeg(inputFilePath)
.output(outputFilePath)
.on('end', () => {
console.log('转换完成:', outputFilePath);
deleteWebmFiles()
res.status(200).send(`文件上传并转换成功。文件 URL: /${path.basename(outputFilePath)}`);
})
.on('error', (err) => {
console.error('转换出错:', err);
res.status(500).send('文件转换出错。');
})
.run();
});
io.on('connection', (socket) => {
socket.emit('connectsuccess', socket.id)
console.log(index + 'User come')
index++
// 存储连接的用户
connectedUsers[socket.id] = socket;
socket.on('disconnect', () => {
console.log('A user disconnected');
index++
});
socket.on('joinroom', value => {
socket.join(value)
})
socket.on('invite', roomid => {
socket.to(roomid).emit('callRemote')
})
socket.on('replyInvite', ({ reply, roomid }) => {
console.log(reply)
if (reply) {
socket.to(roomid).emit('otherReply', true)
}
})
socket.on('sendOffer', ({ offer, roomid }) => {
console.log(roomid)
socket.to(roomid).emit('otherOffice', offer)
})
socket.on('sendAnswer', ({ answer, roomid }) => {
socket.to(roomid).emit('otherAnswer', answer)
})
socket.on('sendCandidate', ({ candidate, roomid }) => {
socket.to(roomid).emit('othercandidate', candidate)
})
});
server.listen(PORT, () => {
console.log(`Server is running on port ${PORT}`);
});