基于node+vue+webrtc+socket.io实现实时视频、录制视频、录音并上传为mp3、mp4文件

前言

有个项目需要用到视频技术以及录制音频等,就去研究了一下。当然很多是有个思路之后借助AI的。

图挺复杂的,我就自己画了一个简单点的,也当作一个思路。

 自己先建一个vue项目,因为我这里是个demo,所以我router和store都没建。

创建完之后先加载一个socket.io。

npm i socket.io-client

实时视频

首先是加入房间,其实就是弄一个roomId,让两个人都加入之后,服务端发送的时候直接往这个roomid里面的用户发送就行。我这里就都设置为001。

发起邀请

点击发送邀请后会开启本地的视频,生成stream流,然后把video的src设置为这个stream流。

<script setup>
import { ref, onMounted } from 'vue'
import { io } from 'socket.io-client';
const socket = ref(null)
const roomid = '001'
const localStream = ref(null)//本人视频流
const localVideo = ref(null)//播放本人视频的video
const inviter = ref(false)//是否是邀请的人
const Beinviter = ref(false)//是否是被邀请的人
// 获取视频和音频流
const getLocalStream = async () => {
    const stream = await navigator.mediaDevices.getUserMedia({
      audio: true,
      video: { facingMode: "user" }
    })
    localVideo.value.srcObject = stream
    localVideo.value.play()
    localStream.value = stream
    return stream
}

onMounted(() => {
  socket.value = io('http://127.0.0.1:3000', {
    transports: ['websocket'], // 指定传输方式,如WebSocket
    autoConnect: true, // 是否自动连接
    reconnection: true, // 是否自动重新连接
    reconnectionAttempts: 3, // 重新连接尝试次数
    reconnectionDelay: 1000, // 重新连接延迟时间(毫秒)
  })
  socket.value.emit('joinroom', roomid)
})

const invite = async () => {
  inviter.value = true
  await getLocalStream()
  // getOffice()
  socket.value.emit('invite', roomid)
}
</script>

<template>
  <div style="display: flex;text-align: center;">
    <div class="box" style="width: 200px; height: 300px; border: 1px solid black; margin: 20px auto;">
      <h2>自己的</h2>
      <video ref="localVideo" style="width: 100%; height: 100%;" autoplay muted playsinline></video>
    </div>
    <div class="box" style="width: 200px; height: 300px; border: 1px solid black; margin: 20px auto;">
      <h2>对方的</h2>
      <video ref="remoteVideo" style="width: 100%; height: 100%;" autoplay muted playsinline></video>
    </div>
    <div class="box" style="width: 200px; height: 300px; border: 1px solid black; margin: 20px auto;">
      <h2>录制的</h2>
      <video ref="recorderVideo" style="width: 100%; height: 100%;" controls></video>
    </div>
    <div class="box" style="width: 200px; height: 50px; border: 1px solid black; margin: 20px auto;">
      <h2>录制的音频</h2>
      <audio ref="recorderAudio" controls></audio>
    </div>
  </div>
  <div style="width: 100%;
height: 300px;"></div>
  <button @click="invite">发起</button>
  <button @click="agree">接受</button>
  <button @click="beginRecorder">开启录制</button>
  <button @click="overRecorder">结束录制</button>
  <button @click="beginRecorderAudio">只录制音频</button>
  <button @click="overRecorderAudio">结束录制音频</button>
  <button>挂断</button>
</template>


<style scoped>

</style>

搭建一个服务端来转发。

const express = require('express');
const http = require('http');
const socketIo = require('socket.io');
const cors = require('cors');

const app = express();
const server = http.createServer(app);
const io = socketIo(server);
let connectedUsers = {};
const PORT = process.env.PORT || 3000;
let index=0
// 允许来自所有来源的跨域请求
app.use(cors());
io.on('connection', (socket) => {
    socket.emit('connectsuccess', socket.id)
    // 存储连接的用户
    connectedUsers[socket.id] = socket;
    socket.on('disconnect', () => {
        console.log('A user disconnected');
    });
    socket.on('joinroom', value => {
        socket.join(value)
    })
    socket.on('invite', roomid => {
        socket.to(roomid).emit('callRemote')
    })
});

server.listen(PORT, () => {
    console.log(`Server is running on port ${PORT}`);
});

 然后本地的视频就可以了!(后面的再一一实现 )

接收邀请

对方接收到之后,需要首先接收,然后点击接收之后再发送一次给服务端,告诉发起者我同意了。




//接收邀请
  socket.value.on('callRemote', () => {
    if (!inviter.value) {
      Beinviter.value = true
      console.log('收到邀请')
    }
  })



const agree = () => {
  socket.value.emit('replyInvite', {
    reply: true,
    roomid
  })
}

同样服务端要转发同样的事件。

    //服务端
socket.on('replyInvite', ({ reply, roomid }) => {
        console.log(reply)
        if (reply) {
            socket.to(roomid).emit('otherReply', true)
        }
    })

 

邀请者生成offer

邀请者收到对方的同意之后就可以生成offer并发送出去了。

  
  const peer = ref(null)
  const offer = ref(null)
  peer.value = new RTCPeerConnection()
//收到对方同意邀请
  socket.value.on('otherReply', async (value) => {
    if (value) {
      if (inviter.value) {
        await getOffice()//生成offer
        console.log('生成offer', offer.value)
        socket.value.emit('sendOffer', {
          offer: offer.value,
          roomid
        })
      }
    }
  })

//生成offer
const getOffice = async () => {
  if (localStream.value) {
    peer.value.addStream(localStream.value)
    const offerCreated = await peer.value.createOffer({
      offerToReceiveAudio: 1,
      offerToReceiveVideo: 1
    })
    offer.value = offerCreated
    await peer.value.setLocalDescription(offerCreated)
    // getAnswer()

  } else {
    console.error('Local stream is not available.')
  }
}

同样管理端要转发offer

    socket.on('sendOffer', ({ offer, roomid }) => {
        console.log(roomid)
        socket.to(roomid).emit('otherOffice', offer)
    })

被邀请者收到offer。

  socket.value.on('otherOffice', async (Office) => {
    if (Beinviter.value) {
      console.log('收到offer',Office)
    }
  })

 一边点邀请,一边点接收就可以了。

 接收者打开本地视频并生成answer

接收者收到offer就可以打开视频并且发送answer了。

 socket.value.on('otherOffice', async (Office) => {
    if (Beinviter.value) {
      offer.value = Office
      console.log('收到Office', Office)
      //添加本地音视频流
      const stream = await getLocalStream()
      peer.value.addStream(stream)
      //设置远端描述信息SDP
      await peer.value.setRemoteDescription(offer.value)
      //生成answer
      const answer = await peer.value.createAnswer()
      console.log('生成answer', answer)
      //在本地设置answer信息
      await peer.value.setLocalDescription(answer)
      //发送answer
      socket.value.emit('sendAnswer', { answer: answer, roomid: roomid })
    }
  })

服务端转发answer

    socket.on('sendAnswer', ({ answer, roomid }) => {
        socket.to(roomid).emit('otherAnswer', answer)
    })

邀请者接收answer

  socket.value.on('otherAnswer', async (answer) => {
    if (inviter.value) {
      console.log('收到answer', answer)
      await peer.value.setRemoteDescription(answer)
    }
  })

互相发送candidate

邀请者是加在收到同意的地方,被邀请者是加在收到offer的地方。

  //邀请者
socket.value.on('otherReply', async (value) => {
    if (value) {
      if (inviter.value) {
        await getOffice()//生成offer
        console.log(offer.value)
        peer.value.onicecandidate = (e) => {//加上这个
          if (e.candidate) {
            console.log('获取A的candidate信息', e.candidate)
            socket.value.emit('sendCandidate', {
              candidate: e.candidate,
              roomid
            })
          }
        }
        socket.value.emit('sendOffer', {
          offer: offer.value,
          roomid
        })
      }

    }
  })
 //被邀请者
socket.value.on('otherOffice', async (Office) => {
    if (Beinviter.value) {
      offer.value = Office
      console.log('收到Office', Office)
      peer.value.onicecandidate = (e) => {//加上这个
        if (e.candidate) {
          console.log('获取B的candidate信息', e.candidate)
          socket.value.emit('sendCandidate', {
            candidate: e.candidate,
            roomid
          })
        }
      }
      //添加本地音视频流
      const stream = await getLocalStream()
      peer.value.addStream(stream)
      //设置远端描述信息SDP
      await peer.value.setRemoteDescription(offer.value)
      //生成answer
      const answer = await peer.value.createAnswer()
      console.log('生成answer', answer)
      //在本地设置answer信息
      await peer.value.setLocalDescription(answer)
      //发送answer
      socket.value.emit('sendAnswer', { answer: answer, roomid: roomid })
    }
  })

服务端转发

    socket.on('sendCandidate', ({ candidate, roomid }) => {
        socket.to(roomid).emit('othercandidate', candidate)
    })

而双方收到后都需要保存,这里不需要分辨邀请者和被邀请者。

  socket.value.on('othercandidate', async (candidate) => {
    console.log('收到candidate', candidate)
    await peer.value.addIceCandidate(candidate)
  })

播放对方画面

就在刚才捕获candidate的地方,分别加上播放的代码

const remoteVideo = ref(null)

//邀请者
socket.value.on('otherReply', async (value) => {
    if (value) {
      if (inviter.value) {
        await getOffice()//生成offer
        console.log(offer.value)
        peer.value.onicecandidate = (e) => {
          if (e.candidate) {
            console.log('获取A的candidate信息', e.candidate)
            socket.value.emit('sendCandidate', {
              candidate: e.candidate,
              roomid
            })
          }
        }
        peer.value.ontrack = (e) => {//加上这个
          console.log('用户A获取用户B的', e.streams[0])
          remoteVideo.value.srcObject = e.streams[0]
          remoteVideo.value.play()
        }
        socket.value.emit('sendOffer', {
          offer: offer.value,
          roomid
        })
      }

    }
  })
  //被邀请者
socket.value.on('otherOffice', async (Office) => {
    if (Beinviter.value) {
      offer.value = Office
      console.log('收到Office', Office)
      peer.value.onicecandidate = (e) => {
        if (e.candidate) {
          console.log('获取B的candidate信息', e.candidate)
          socket.value.emit('sendCandidate', {
            candidate: e.candidate,
            roomid
          })
        }
      }
      peer.value.ontrack = (e) => {//加上这个
        console.log('用户B获取用户A的', e.streams[0])
        remoteVideo.value.srcObject = e.streams[0]
        remoteVideo.value.play()
      }
      //添加本地音视频流
      const stream = await getLocalStream()
      peer.value.addStream(stream)
      //设置远端描述信息SDP
      await peer.value.setRemoteDescription(offer.value)
      //生成answer
      const answer = await peer.value.createAnswer()
      console.log('生成answer', answer)
      //在本地设置answer信息
      await peer.value.setLocalDescription(answer)
      //发送answer
      socket.value.emit('sendAnswer', { answer: answer, roomid: roomid })
    }
  })

然后我们就可以互相播放对方的视频了!实现实时视频!

视频录制

const chunks = ref([]) // 存储录制的数据块
const mediaRecorder = ref(null) // 音视频录制器对象
const recorderVideo = ref(null)

然后在获取本地视频流的地方

const getLocalStream = async () => {
  const stream = await navigator.mediaDevices.getUserMedia({
    audio: true,
    video: { facingMode: "user" }
  })
  localVideo.value.srcObject = stream
  localVideo.value.play()
  localStream.value = stream

  //加上下面的---------------------
  // 创建视频录制器对象
  mediaRecorder.value = new MediaRecorder(localStream.value)

  // 监听视频录制数据事件
  mediaRecorder.value.ondataavailable = function (event) {
    chunks.value.push(event.data)
  }

  // 监听视频录制结束事件
  mediaRecorder.value.onstop = function () {
    // 创建一个包含所有录制数据的 Blob 对象
    let recordedBlob = new Blob(chunks.value, { type: mediaRecorder.value.mimeType })
    // 创建一个 URL 对象
    let recordedUrl = URL.createObjectURL(recordedBlob)
    // 将录制的视频 URL 设置到 video 元素上
    recorderVideo.value.src = recordedUrl
    recorderVideo.value.play()
  }
  //---------------------


  return stream
}
// 开始录制
const beginRecorder = () => {
  console.log('开始')
  chunks.value = [] // 重置数据块数组
  mediaRecorder.value.start()
}

// 停止录制
const overRecorder = () => {
  console.log('结束')
  mediaRecorder.value.stop()
}

就可以实现视频的录制咯!

只录制音频

在原来的基础上,只要改一些捕获的数据就行

const chunks = ref([]) // 存储录制的数据块
const mediaRecorder = ref(null) // 音视频录制器对象
const recorderVideo = ref(null)


// 获取视频和音频流
const getLocalStream = async () => {
  try {
    console.log("Requesting user media...")
    const stream = await navigator.mediaDevices.getUserMedia({
      audio: true,
      video: { facingMode: "user" }
    })
    localVideo.value.srcObject = stream
    localVideo.value.play()
    localStream.value = stream

    // 创建视频录制器对象
    mediaRecorder.value = new MediaRecorder(localStream.value)

    // 监听视频录制数据事件
    mediaRecorder.value.ondataavailable = function (event) {
      chunks.value.push(event.data)
    }

    // 监听视频录制结束事件
    mediaRecorder.value.onstop = function () {
      // 创建一个包含所有录制数据的 Blob 对象
      let recordedBlob = new Blob(chunks.value, { type: mediaRecorder.value.mimeType })
      // 创建一个 URL 对象
      let recordedUrl = URL.createObjectURL(recordedBlob)
      // 将录制的视频 URL 设置到 video 元素上
      recorderVideo.value.src = recordedUrl
      recorderVideo.value.play()
    }
//加上下面------
    // 只保留音频轨道
    const audioStream = new MediaStream()
    stream.getAudioTracks().forEach(track => audioStream.addTrack(track))

    // 创建音频录制器对象
    mediaRecorder_audio.value = new MediaRecorder(audioStream)

    // 监听音频录制数据事件
    mediaRecorder_audio.value.ondataavailable = function (event) {
      chunks_audio.value.push(event.data)
    }

    // 监听音频录制结束事件
    mediaRecorder_audio.value.onstop = function () {
      // 创建一个包含所有录制数据的 Blob 对象
      let recordedBlob = new Blob(chunks_audio.value, { type: mediaRecorder_audio.value.mimeType })
      // 创建一个 URL 对象
      let recordedUrl = URL.createObjectURL(recordedBlob)
      // 将录制的音频 URL 设置到 audio 元素上
      recorderAudio.value.src = recordedUrl
      recorderAudio.value.play()
    }
//------
    return stream
  } catch (error) {
    console.error("Error accessing media devices:", error)
  }
}


// 开始录制音频
const beginRecorderAudio = () => {
  chunks_audio.value = [] // 重置数据块数组
  mediaRecorder_audio.value.start()
}

// 停止录制音频
const overRecorderAudio = () => {
  mediaRecorder_audio.value.stop()
}

也可以实现音频播放了!

视频上传服务器成为mp4文件

服务端:

npm install multer
npm install ffmpeg fluent-ffmpeg
npm install @ffmpeg-installer/ffmpeg @ffprobe-installer/ffprobe fluent-ffmpeg     
const multer = require('multer');
const path = require('path');

const ffmpeg = require('fluent-ffmpeg');

// 提供 'public' 文件夹中的静态文件
app.use(express.static('public'));

// 安装ffmpeg-binaries以避免本地ffmpeg的安装
const ffmpegPath = require('@ffmpeg-installer/ffmpeg').path;
const ffprobePath = require('@ffprobe-installer/ffprobe').path;

ffmpeg.setFfmpegPath(ffmpegPath);
ffmpeg.setFfprobePath(ffprobePath);


// 配置 multer 存储
const storage = multer.diskStorage({
    destination: (req, file, cb) => {
        cb(null, path.join(__dirname, 'public')); // 将文件保存到 'public' 文件夹
    },
    filename: (req, file, cb) => {
        const ext = path.extname(file.originalname);
        const filename = Date.now() + ext; // 使用时间戳避免文件名冲突
        cb(null, filename);
    },
});

const upload = multer({ storage: storage });


// 处理文件上传的 POST 接口
app.post('/upload', upload.single('file'), (req, res) => {
    if (!req.file) {
      return res.status(400).send('没有上传文件。');
    }
  
    const inputFilePath = path.join(__dirname, 'public', req.file.filename);
    const outputFilePath = inputFilePath.replace('.webm', '.mp4');
  
    ffmpeg(inputFilePath)
      .output(outputFilePath)
      .on('end', () => {
        console.log('转换完成:', outputFilePath);
        res.status(200).send(`文件上传并转换成功。文件 URL: /${path.basename(outputFilePath)}`);
      })
      .on('error', (err) => {
        console.error('转换出错:', err);
        res.status(500).send('文件转换出错。');
      })
      .run();
  });

 用户端:

// 上传文件的函数
const uploadFile=(blob, type)=> {
  const formData = new FormData();
  formData.append('file', blob, `recorded.${type === 'video' ? 'webm' : 'webm'}`); // 根据文件类型设置文件名

  fetch('http://localhost:3000/upload', {
    method: 'POST',
    body: formData,
  })
    .then(response => response.text())
    .then(data => {
      console.log('成功:', data);
    })
    .catch(error => {
      console.error('错误:', error);
    });
}

 然后在原来录制视频的地方改成这个:就可以录制完自动上传并且转化成mp4了。

    // 设置视频录制器的 MIME 类型
    const videoMimeType = 'video/webm;codecs=vp8';
    mediaRecorder.value = new MediaRecorder(localStream.value, { mimeType: videoMimeType });
    // // 创建视频录制器对象
    // mediaRecorder.value = new MediaRecorder(localStream.value)

    // 监听视频录制数据事件
    mediaRecorder.value.ondataavailable = function (event) {
      chunks.value.push(event.data)
    }

    // 监听视频录制结束事件
    mediaRecorder.value.onstop = function () {
      // 创建一个包含所有录制数据的 Blob 对象
      let recordedBlob = new Blob(chunks.value, { type: mediaRecorder.value.mimeType })

      let recordedBlobTo = new Blob(chunks_audio.value, { type: mediaRecorder_audio.value.mimeType });
      uploadFile(recordedBlob, 'video');//上传音频
      // 创建一个 URL 对象
      let recordedUrl = URL.createObjectURL(recordedBlob)
      // 将录制的视频 URL 设置到 video 元素上
      recorderVideo.value.src = recordedUrl
      recorderVideo.value.play()
    }

 可以测试一下浏览器是否支持这些格式先:

    const supportedTypes = [
      'video/webm;codecs=vp9',
      'video/webm;codecs=vp8',
      'audio/webm;codecs=opus',
      'audio/ogg;codecs=opus'
    ];

    supportedTypes.forEach(type => {
      console.log(type + " is " + (MediaRecorder.isTypeSupported(type) ? "supported" : "not supported"));
    });

 我测试是edge和谷歌都

这样保存后public文件夹下会有两个后缀的文件,分别是webm和mp4,因为webm是我们最开始保存的,而mp4是我们转化的,所以我们可以写一个方法来删除webm文件。

function deleteWebmFiles() {
    // 读取目录中的所有文件
    fs.readdir('public', (err, files) => {
        if (err) {
            console.error('读取目录失败:', err);
            return;
        }

        // 过滤出所有 .webm 文件
        const webmFiles = files.filter(file => path.extname(file).toLowerCase() === '.webm');

        // 删除所有 .webm 文件
        webmFiles.forEach(file => {
            const filePath = path.join('public', file);
            fs.unlink(filePath, err => {
                if (err) {
                    console.error(`删除文件失败: ${filePath}`, err);
                } else {
                    console.log(`成功删除文件: ${filePath}`);
                }
            });
        });
    });
}

 音频上传服务器成为mp3文件

其实和视频差不多

用户端

//上传音频
const uploadAudio = (blob) => {
  const formData = new FormData();
  formData.append('file', blob, 'recording.webm');

  fetch('http://localhost:3000/uploadAudio', {
    method: 'POST',
    body: formData,
  })
    .then(response => response.text())
    .then(data => {
      console.log('成功:', data);
    })
    .catch(error => {
      console.error('错误:', error);
    });
}


 const audioMimeType = 'audio/webm;codecs=opus';
    // 只保留音频轨道
    const audioStream = new MediaStream();
    stream.getAudioTracks().forEach(track => audioStream.addTrack(track));

    // 创建音频录制器对象,并指定 MIME 类型
    mediaRecorder_audio.value = new MediaRecorder(audioStream, { mimeType: audioMimeType });

    // 监听音频录制数据事件
    mediaRecorder_audio.value.ondataavailable = function (event) {
      chunks_audio.value.push(event.data);
    };

    // 监听音频录制结束事件
    mediaRecorder_audio.value.onstop = function () {
      // 创建一个包含所有录制数据的 Blob 对象
      let recordedBlob = new Blob(chunks_audio.value, { type: mediaRecorder_audio.value.mimeType });
      // 创建一个 URL 对象
      let recordedUrl = URL.createObjectURL(recordedBlob);
      // 将录制的音频 URL 设置到 audio 元素上
      uploadAudio(recordedBlob);
      recorderAudio.value.src = recordedUrl;
      recorderAudio.value.play();
    };

 服务端:


app.post('/uploadAudio', upload.single('file'), (req, res) => {
    if (!req.file) {
        return res.status(400).send('没有上传文件。');
    }
    const inputFilePath = path.join(__dirname, 'public', req.file.filename);
    const outputFilePath = inputFilePath.replace('.webm', '.mp3');
    ffmpeg(inputFilePath)
        .output(outputFilePath)
        .on('end', () => {
            console.log('转换完成:', outputFilePath);
            deleteWebmFiles()
            res.status(200).send(`文件上传并转换成功。文件 URL: /${path.basename(outputFilePath)}`);
        })
        .on('error', (err) => {
            console.error('转换出错:', err);
            res.status(500).send('文件转换出错。');
        })
        .run();
});

完整代码汇总

用户端

<script setup>
import { ref, onMounted } from 'vue'
import { io } from 'socket.io-client';

const socket = ref(null)
const roomid = '001'
const localStream = ref(null)
const localVideo = ref(null)
const remoteVideo = ref(null)
const peer = ref(null)
const offer = ref(null)
const inviter = ref(false)//是否是邀请的人
const Beinviter = ref(false)//是否是被邀请的人


const chunks = ref([]) // 存储录制的数据块
const mediaRecorder = ref(null) // 音视频录制器对象
const recorderVideo = ref(null)

const recorderAudio = ref(null)
const chunks_audio = ref([]) // 存储录制的数据块
const mediaRecorder_audio = ref(null) // 音频录制器对象

peer.value = new RTCPeerConnection()

// 上传视频的函数
const uploadFileVideo = (blob, type) => {
  const formData = new FormData();
  formData.append('file', blob, `recorded.${type === 'video' ? 'webm' : 'webm'}`); // 根据文件类型设置文件名

  fetch('http://localhost:3000/uploadVideo', {
    method: 'POST',
    body: formData,
  })
    .then(response => response.text())
    .then(data => {
      console.log('成功:', data);
    })
    .catch(error => {
      console.error('错误:', error);
    });
}

//上传音频
const uploadAudio = (blob) => {
  const formData = new FormData();
  formData.append('file', blob, 'recording.webm');

  fetch('http://localhost:3000/uploadAudio', {
    method: 'POST',
    body: formData,
  })
    .then(response => response.text())
    .then(data => {
      console.log('成功:', data);
    })
    .catch(error => {
      console.error('错误:', error);
    });
}
// 获取视频和音频流
const getLocalStream = async () => {
  try {
    console.log("Requesting user media...")
    const stream = await navigator.mediaDevices.getUserMedia({
      audio: true,
      video: { facingMode: "user" }
    })
    localVideo.value.srcObject = stream
    localVideo.value.play()
    localStream.value = stream

    // 设置视频录制器的 MIME 类型
    const videoMimeType = 'video/webm;codecs=vp8';
    mediaRecorder.value = new MediaRecorder(localStream.value, { mimeType: videoMimeType });
    // // 创建视频录制器对象
    // mediaRecorder.value = new MediaRecorder(localStream.value)

    // 监听视频录制数据事件
    mediaRecorder.value.ondataavailable = function (event) {
      chunks.value.push(event.data)
    }

    // 监听视频录制结束事件
    mediaRecorder.value.onstop = function () {
      // 创建一个包含所有录制数据的 Blob 对象
      let recordedBlob = new Blob(chunks.value, { type: mediaRecorder.value.mimeType })

      uploadFileVideo(recordedBlob, 'video');//上传音频
      // 创建一个 URL 对象
      let recordedUrl = URL.createObjectURL(recordedBlob)
      // 将录制的视频 URL 设置到 video 元素上
      recorderVideo.value.src = recordedUrl
      recorderVideo.value.play()
    }

    const audioMimeType = 'audio/webm;codecs=opus';
    // 只保留音频轨道
    const audioStream = new MediaStream();
    stream.getAudioTracks().forEach(track => audioStream.addTrack(track));

    // 创建音频录制器对象,并指定 MIME 类型
    mediaRecorder_audio.value = new MediaRecorder(audioStream, { mimeType: audioMimeType });

    // 监听音频录制数据事件
    mediaRecorder_audio.value.ondataavailable = function (event) {
      chunks_audio.value.push(event.data);
    };

    // 监听音频录制结束事件
    mediaRecorder_audio.value.onstop = function () {
      // 创建一个包含所有录制数据的 Blob 对象
      let recordedBlob = new Blob(chunks_audio.value, { type: mediaRecorder_audio.value.mimeType });
      // 创建一个 URL 对象
      let recordedUrl = URL.createObjectURL(recordedBlob);
      // 将录制的音频 URL 设置到 audio 元素上
      uploadAudio(recordedBlob);
      recorderAudio.value.src = recordedUrl;
      recorderAudio.value.play();
    };

    return stream
  } catch (error) {
    console.error("Error accessing media devices:", error)
  }
}
// 开始录制音频
const beginRecorderAudio = () => {
  chunks_audio.value = [] // 重置数据块数组
  mediaRecorder_audio.value.start()
}

// 停止录制音频
const overRecorderAudio = () => {
  mediaRecorder_audio.value.stop()
}


// 开始录制
const beginRecorder = () => {
  console.log('开始')
  chunks.value = [] // 重置数据块数组
  mediaRecorder.value.start()
}

// 停止录制
const overRecorder = () => {
  console.log('结束')
  mediaRecorder.value.stop()
}

const agree = () => {
  socket.value.emit('replyInvite', {
    reply: true,
    roomid
  })
}

const getOffice = async () => {
  if (localStream.value) {
    peer.value.addStream(localStream.value)
    const offerCreated = await peer.value.createOffer({
      offerToReceiveAudio: 1,
      offerToReceiveVideo: 1
    })
    offer.value = offerCreated
    await peer.value.setLocalDescription(offerCreated)
    // getAnswer()

  } else {
    console.error('Local stream is not available.')
  }
}

onMounted(() => {
  socket.value = io('http://127.0.0.1:3000', {
    transports: ['websocket'], // 指定传输方式,如WebSocket
    autoConnect: true, // 是否自动连接
    reconnection: true, // 是否自动重新连接
    reconnectionAttempts: 3, // 重新连接尝试次数
    reconnectionDelay: 1000, // 重新连接延迟时间(毫秒)
  })
  socket.value.emit('joinroom', roomid)

  //接收邀请
  socket.value.on('callRemote', () => {
    if (!inviter.value) {
      Beinviter.value = true
      console.log('收到邀请')
    }
  })

  socket.value.on('otherReply', async (value) => {
    if (value) {
      if (inviter.value) {
        await getOffice()//生成offer
        console.log(offer.value)
        peer.value.onicecandidate = (e) => {
          if (e.candidate) {
            console.log('获取A的candidate信息', e.candidate)
            socket.value.emit('sendCandidate', {
              candidate: e.candidate,
              roomid
            })
          }
        }
        peer.value.ontrack = (e) => {
          console.log('用户A获取用户B的', e.streams[0])
          remoteVideo.value.srcObject = e.streams[0]
          remoteVideo.value.play()
        }
        socket.value.emit('sendOffer', {
          offer: offer.value,
          roomid
        })
      }

    }
  })

  socket.value.on('otherOffice', async (Office) => {
    if (Beinviter.value) {
      offer.value = Office
      console.log('收到Office', Office)
      // peer.value = new RTCPeerConnection()
      peer.value.onicecandidate = (e) => {
        if (e.candidate) {
          console.log('获取B的candidate信息', e.candidate)
          socket.value.emit('sendCandidate', {
            candidate: e.candidate,
            roomid
          })
        }
      }
      peer.value.ontrack = (e) => {
        console.log('用户B获取用户A的', e.streams[0])
        remoteVideo.value.srcObject = e.streams[0]
        remoteVideo.value.play()
      }
      //添加本地音视频流
      const stream = await getLocalStream()
      peer.value.addStream(stream)
      //设置远端描述信息SDP
      await peer.value.setRemoteDescription(offer.value)
      //生成answer
      const answer = await peer.value.createAnswer()
      console.log('生成answer', answer)
      //在本地设置answer信息
      await peer.value.setLocalDescription(answer)
      //发送answer
      socket.value.emit('sendAnswer', { answer: answer, roomid: roomid })
    }
  })

  socket.value.on('otherAnswer', async (answer) => {
    if (inviter.value) {
      console.log('收到answer', answer)
      await peer.value.setRemoteDescription(answer)
    }
  })

  socket.value.on('othercandidate', async (candidate) => {
    // if (Beinviter.value) {
    //   console.log('收到candidate', candidate)
    // }
    console.log('收到candidate', candidate)
    await peer.value.addIceCandidate(candidate)
  })

})

const invite = async () => {
  inviter.value = true
  await getLocalStream()
  // getOffice()
  console.log('发起邀请')
  socket.value.emit('invite', roomid)
}
</script>

<template>
  <div style="display: flex;text-align: center;">
    <div class="box" style="width: 200px; height: 300px; border: 1px solid black; margin: 20px auto;">
      <h2>自己的</h2>
      <video ref="localVideo" style="width: 100%; height: 100%;" autoplay muted playsinline></video>
    </div>
    <div class="box" style="width: 200px; height: 300px; border: 1px solid black; margin: 20px auto;">
      <h2>对方的</h2>
      <video ref="remoteVideo" style="width: 100%; height: 100%;" autoplay muted playsinline></video>
    </div>
    <div class="box" style="width: 200px; height: 300px; border: 1px solid black; margin: 20px auto;">
      <h2>录制的</h2>
      <video ref="recorderVideo" style="width: 100%; height: 100%;" controls></video>
    </div>
    <div class="box" style="width: 200px; height: 50px; border: 1px solid black; margin: 20px auto;">
      <h2>录制的音频</h2>
      <audio ref="recorderAudio" controls></audio>
    </div>
  </div>
  <div style="width: 100%;
height: 300px;"></div>
  <button @click="invite">发起</button>
  <button @click="agree">接受</button>
  <button @click="beginRecorder">开启录制</button>
  <button @click="overRecorder">结束录制</button>
  <button @click="beginRecorderAudio">只录制音频</button>
  <button @click="overRecorderAudio">结束录制音频</button>
  <button>挂断</button>
</template>


<style scoped></style>

服务端

const express = require('express');
const http = require('http');
const socketIo = require('socket.io');
const fs = require('fs');
const path = require('path');
const multer = require('multer');

const ffmpeg = require('fluent-ffmpeg');

// 安装ffmpeg-binaries以避免本地ffmpeg的安装
const ffmpegPath = require('@ffmpeg-installer/ffmpeg').path;
const ffprobePath = require('@ffprobe-installer/ffprobe').path;

ffmpeg.setFfmpegPath(ffmpegPath);
ffmpeg.setFfprobePath(ffprobePath);

const app = express();
const server = http.createServer(app);
const io = socketIo(server);
let connectedUsers = {};
const PORT = process.env.PORT || 3000;
let index = 0

app.all('*', function (req, res, next) {
    res.header('Access-Control-Allow-Origin', '*');
    res.header('Access-Control-Allow-Headers', 'Content-Type');
    res.header('Access-Control-Allow-Methods', '*');
    res.header('Content-Type', 'application/json;charset=utf-8');
    next();
});
app.use((req, res, next) => {
    res.setHeader('Access-Control-Allow-Origin', '*');
    res.setHeader('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE');
    res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization, token');
    next();
});


// 配置 multer 存储
const storage = multer.diskStorage({
    destination: (req, file, cb) => {
        cb(null, path.join(__dirname, 'public')); // 将文件保存到 'public' 文件夹
    },
    filename: (req, file, cb) => {
        const ext = path.extname(file.originalname);
        const filename = Date.now() + ext; // 使用时间戳避免文件名冲突
        cb(null, filename);
    },
});

const upload = multer({ storage: storage });
function deleteWebmFiles() {
    // 读取目录中的所有文件
    fs.readdir('public', (err, files) => {
        if (err) {
            console.error('读取目录失败:', err);
            return;
        }

        // 过滤出所有 .webm 文件
        const webmFiles = files.filter(file => path.extname(file).toLowerCase() === '.webm');

        // 删除所有 .webm 文件
        webmFiles.forEach(file => {
            const filePath = path.join('public', file);
            fs.unlink(filePath, err => {
                if (err) {
                    console.error(`删除文件失败: ${filePath}`, err);
                } else {
                    console.log(`成功删除文件: ${filePath}`);
                }
            });
        });
    });
}


// 提供 'public' 文件夹中的静态文件
app.use(express.static('public'));
// 处理文件上传的 POST 接口
app.post('/uploadVideo', upload.single('file'), (req, res) => {
    if (!req.file) {
        return res.status(400).send('没有上传文件。');
    }
    const inputFilePath = path.join(__dirname, 'public', req.file.filename);
    const outputFilePath = inputFilePath.replace('.webm', '.mp4');
    ffmpeg(inputFilePath)
        .output(outputFilePath)
        .on('end', () => {
            console.log('转换完成:', outputFilePath);
            deleteWebmFiles()
            res.status(200).send(`文件上传并转换成功。文件 URL: /${path.basename(outputFilePath)}`);
        })
        .on('error', (err) => {
            console.error('转换出错:', err);
            res.status(500).send('文件转换出错。');
        })
        .run();
});


app.post('/uploadAudio', upload.single('file'), (req, res) => {
    if (!req.file) {
        return res.status(400).send('没有上传文件。');
    }
    const inputFilePath = path.join(__dirname, 'public', req.file.filename);
    const outputFilePath = inputFilePath.replace('.webm', '.mp3');
    ffmpeg(inputFilePath)
        .output(outputFilePath)
        .on('end', () => {
            console.log('转换完成:', outputFilePath);
            deleteWebmFiles()
            res.status(200).send(`文件上传并转换成功。文件 URL: /${path.basename(outputFilePath)}`);
        })
        .on('error', (err) => {
            console.error('转换出错:', err);
            res.status(500).send('文件转换出错。');
        })
        .run();
});

io.on('connection', (socket) => {
    socket.emit('connectsuccess', socket.id)
    console.log(index + 'User come')
    index++
    // 存储连接的用户
    connectedUsers[socket.id] = socket;
    socket.on('disconnect', () => {
        console.log('A user disconnected');
        index++
    });
    socket.on('joinroom', value => {
        socket.join(value)
    })
    socket.on('invite', roomid => {
        socket.to(roomid).emit('callRemote')
    })
    socket.on('replyInvite', ({ reply, roomid }) => {
        console.log(reply)
        if (reply) {
            socket.to(roomid).emit('otherReply', true)
        }
    })
    socket.on('sendOffer', ({ offer, roomid }) => {
        console.log(roomid)
        socket.to(roomid).emit('otherOffice', offer)
    })

    socket.on('sendAnswer', ({ answer, roomid }) => {
        socket.to(roomid).emit('otherAnswer', answer)
    })
    socket.on('sendCandidate', ({ candidate, roomid }) => {
        socket.to(roomid).emit('othercandidate', candidate)
    })
});

server.listen(PORT, () => {
    console.log(`Server is running on port ${PORT}`);
});

  • 26
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,我可以为你提供基于Vue3的setup socket.io实现聊天的页面以及完整的聊天功能的代码。下面是代码示例: 首先,你需要在你的Vue项目中安装socket.io-client: ```bash npm install socket.io-client ``` 然后在你的Vue组件中引入socket.io-client并在setup函数中使用它来连接服务器: ```javascript import { ref, onMounted } from 'vue'; import io from 'socket.io-client'; export default { setup() { const messages = ref([]); const socket = io('http://localhost:3000'); onMounted(() => { socket.on('message', (message) => { messages.value.push(message); }); }); function sendMessage(message) { socket.emit('message', message); } return { messages, sendMessage }; } } ``` 在这个示例中,我们使用了Vue3的ref和onMounted函数来创建一个响应式的messages数组,并在组件挂载后使用socket.on函数监听服务器发送的message事件,并将接收到的消息push到messages数组中。我们还创建了一个sendMessage函数,用于向服务器发送消息。 接下来,我们可以在模板中使用messages数组和sendMessage函数来实现聊天界面: ```html <template> <div> <div v-for="message in messages" :key="message.id"> <p>{{ message.sender }}: {{ message.text }}</p> </div> <form @submit.prevent="sendMessage(newMessage)"> <input type="text" v-model="newMessage" /> <button type="submit">Send</button> </form> </div> </template> ``` 在这个示例中,我们使用v-for指令循环渲染messages数组中的每个消息,并在模板中使用一个表单来允许用户发送新消息。我们使用v-model指令将表单的输入绑定到newMessage变量,并在表单提交时调用sendMessage函数来向服务器发送新消息。 最后,我们需要在服务器端实现接收和处理消息的逻辑。以下是一个简单的Node.js服务器端代码示例: ```javascript const http = require('http'); const socketIo = require('socket.io'); const server = http.createServer(); const io = socketIo(server); io.on('connection', (socket) => { console.log(`Client connected: ${socket.id}`); socket.on('message', (message) => { io.emit('message', { id: Date.now(), sender: 'User', text: message }); }); socket.on('disconnect', () => { console.log(`Client disconnected: ${socket.id}`); }); }); server.listen(3000, () => { console.log('Server started'); }); ``` 在这个示例中,我们使用Node.js创建了一个HTTP服务器,并使用socket.io创建了一个WebSocket服务器。在WebSocket连接建立时,我们使用socket.on函数监听客户端发送的message事件,并使用io.emit函数将接收到的消息广播给所有连接的客户端。当连接断开时,我们使用socket.on函数监听disconnect事件。 这就是一个基于Vue3的setup socket.io实现聊天的页面以及完整的聊天功能的代码示例。希望能对你有所帮助!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值