vue3 基于faceapi.js实现人脸识别

10 篇文章 0 订阅
vue3 基于faceapi.js实现人脸识别

先贴代码

<template>
  <div class="app-container">
    <div>{{ title }}</div>
    <div class="x-face-detect-modal">
      <video ref="video" autoplay :onCanplay="handleVideoCanPlay" />
      <canvas ref="canvas" width="{this.width}" height="{this.height}" />
    </div>
  </div>
</template>
<script setup lang="ts">
import { ref, onMounted } from 'vue'
import { uploadFile } from '@/api/base' // 这里需要使用 图片对比接口
import { detectSingleFace, nets, matchDimensions, resizeResults, draw, SsdMobilenetv1Options, Box } from 'face-api.js'
const options = new SsdMobilenetv1Options({
  // 最小置信阈值
  // 默认值:0.5
  minConfidence: 0.5
})
const formId = 'x-face-detect-form'
const title = ref('人脸识别') //  初始化title
const canvas = ref('canvas') // 图像画布
const video = ref('video') // 视频元素
const stream = ref(null) // 当前流
const getUserMediaFail = ref(false) // 获取用户媒体失败
const boxObject = ref({ width: 100, height: 100 }) // 初始化box
const viewFinderBox = ref({
  topLeft: {
    x: 0,
    y: 0
  },
  topRight: {
    x: 0,
    y: 0
  },
  bottomLeft: {
    x: 0,
    y: 0
  },
  bottomRight: {
    x: 0,
    y: 0
  }
}) // 初始化viewFinderBox

// 加载算法模型 文件存储在 public 文件夹下models文件夹。// 需要文件的话联系我
const init = async () => {
  await nets.ssdMobilenetv1.loadFromUri('/models')
}

/** @name 调用摄像头 */
const getUserMedia = (success: NavigatorUserMediaSuccessCallback, error: NavigatorUserMediaErrorCallback) => {
  //优先使用前置摄像头(如果有的话):{ video: { facingMode: "user" } }
  //强制使用后置摄像头:{ video: { facingMode: { exact: "environment" } } }
  // video: {
  //    width: { min: 1024, ideal: 1280, max: 1920 },
  //    height: { min: 776, ideal: 720, max: 1080 }
  // }
  //ideal(应用最理想的)值
  const constraints = {
    video: {
      facingMode: 'user',
      width: { ideal: canvas.value.width },
      height: { ideal: canvas.value.height }
    }
  }
  if (navigator.mediaDevices.getUserMedia) {
    // 最新的标准API
    navigator.mediaDevices.getUserMedia(constraints).then(success).catch(error)
  } else if (navigator.webkitGetUserMedia) {
    // webkit核心浏览器
    navigator.webkitGetUserMedia(constraints, success, error)
  } else if (navigator.mozGetUserMedia) {
    // firfox浏览器
    navigator.mozGetUserMedia(constraints, success, error)
  } else if (navigator.getUserMedia) {
    // 旧版API
    navigator.getUserMedia(constraints, success, error)
  }
}
/** @name 初始化取景框 */
const initViewFinder = () => {
  if (!video.value) return
  const marginLeft = (video.value.width - boxObject.value.width) / 2
  const marginTop = (video.value.width - boxObject.value.height) / 2
  if (canvas.value) {
    canvas.value.width = video.value.width
    canvas.value.height = video.value.height
  }
  viewFinderBox.value = {
    topLeft: {
      x: marginLeft,
      y: marginTop
    },
    topRight: {
      x: marginLeft + boxObject.value.width,
      y: marginTop
    },
    bottomLeft: {
      x: marginLeft,
      y: marginTop + boxObject.value.height
    },
    bottomRight: {
      x: marginLeft + boxObject.value.width,
      y: marginTop + boxObject.value.height
    }
  }
}

/** @name 绘制取景框 */
const drawViewFinder = () => {
  const context = canvas.value?.getContext('2d')
  const rectWith = 50
  if (!context) return
  context.clearRect(0, 0, canvas.value?.width || 0, canvas.value?.height || 0)
  const fontLeft = video.value ? (video.value.width - 200) / 2 : 200
  context.font = '20px Arial'
  context.fillText('请保持脸部在取景框内', fontLeft, 50)
  const keys = Object.keys(viewFinderBox.value)
  keys.forEach((key) => {
    const point = viewFinderBox.value[key]
    if (!point) return
    context.moveTo(point.x, point.y)
    switch (key) {
      case 'topLeft':
        context.lineTo(point.x + rectWith, point.y)
        context.moveTo(point.x, point.y)
        context.lineTo(point.x, point.y + rectWith)
        break
      case 'topRight':
        context.lineTo(point.x - rectWith, point.y)
        context.moveTo(point.x, point.y)
        context.lineTo(point.x, point.y + rectWith)
        break
      case 'bottomLeft':
        context.lineTo(point.x + rectWith, point.y)
        context.moveTo(point.x, point.y)
        context.lineTo(point.x, point.y - rectWith)
        break
      case 'bottomRight':
        context.lineTo(point.x - rectWith, point.y)
        context.moveTo(point.x, point.y)
        context.lineTo(point.x, point.y - rectWith)
        break
      default:
        break
    }
  })
  context.lineWidth = 2
  context.strokeStyle = 'white'
  context.stroke()
}

/** @name 截取快照 */
const cameraShoot = (video: HTMLVideoElement, startPoint: { x: number; y: number }, width: number, height: number) => {
  const canvas = document.createElement('canvas')
  canvas.width = video.videoWidth
  canvas.height = video.videoHeight
  canvas
    .getContext('2d')
    ?.drawImage(video, startPoint.x - 40, startPoint.y - 40, width + 80, height + 80, 0, 0, canvas.width, canvas.height)
  return new Promise<Blob | null>((resolve) =>
    // eslint-disable-next-line no-promise-executor-return
    canvas.toBlob(resolve, 'image/jpeg')
  )
}
// 画盒子
const drawBox = (box, label) => {
  if (!canvas.value) return
  const context = canvas.value.getContext('2d')
  context?.clearRect(box.x, box.y, box.width, box.height)
  const drawBox = new draw.DrawBox(box, {
    label: label
  })
  drawBox.draw(canvas.value)
}

// 停止
const handleStopVideo = () => {
  if (stream.value) {
    stream.value.getTracks().forEach((track) => {
      track.stop()
    })
  }
}

/** @name 人脸检测 */
const detectFace = async () => {
  // eslint-disable-next-line no-promise-executor-return
  //非常重要:防止卡死
  await new Promise((resolve) => requestAnimationFrame(resolve))
  //绘制取景框
  // drawViewFinder()
  if (!canvas.value || !video.value || !video.value.currentTime || video.value.paused || video.value.ended)
    return detectFace()
  // 检测图像中具有最高置信度得分的脸部
  const result = await detectSingleFace(video.value, options)
  if (!result) return detectFace()
  // 匹配尺寸
  const dims = matchDimensions(canvas.value, video.value, true)
  // 调整检测到的框的大小,以防显示的图像的大小与原始
  const resizedResult = resizeResults(result, dims)
  const box = resizedResult.box
  // 检测框是否在取景框内
  // if (!checkInViewFinder(box)) return detectFace()
  // drawViewFinder()
  // 将检测结果绘制到画布(此处不用,可以直接用来绘制检测到的人脸盒子)
  // draw.drawDetections(this.canvas, resizedResult.box);
  drawBox(box, '识别中')
  video.value.pause()
  // //截取人脸图片
  const image = await cameraShoot(
    video.value,
    resizedResult.box.topLeft,
    resizedResult.box.width,
    resizedResult.box.height
  )
  if (!image) {
    drawBox(box, '识别失败')
    await delay(1000)
    video.value.play()
    return detectFace()
  }
  let files = new window.File([image], '人脸头像.jpeg', {
    type: 'image/jpeg'
  })
  // 调用接口传入截取的人脸头像进行检测
  // const detectResult = await uploadFile({ file: files })    // 没有图片对比接口就暂时用 图片上传代理了
  // if (!detectResult) {
  //   drawBox(box, '识别失败')
  video.value.play()
  return detectFace()
  // }
  // handleStopVideo()
}
// onMounted
onMounted(() => {
  console.log('mounted', canvas.value, video.value)
  // 获取用户媒体流
  getUserMedia(
    (streams) => {
      //后续用于停止视频流
      stream.value = streams
      //显示视频
      if (video.value) {
        video.value['srcObject'] = streams
      }
    },
    (error) => (getUserMediaFail.value = true)
  )
  init()
  detectFace()
})
</script>
<style lang="scss">
.x-face-detect-modal {
  display: flex;
  flex-direction: column;
  align-items: center;
  justify-content: center;
  position: relative;
  transform: rotateY(180deg);
  // overflow: hidden;
  canvas {
    position: absolute;
    top: 0;
  }
  video {
    object-fit: fill;
  }
}
</style>


  • 5
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 12
    评论
要在Vue3中使用face-api.js进行人脸识别,需要先安装face-api.js库和相关依赖。可以通过以下命令进行安装: ```bash npm install face-api.js canvas --save ``` 接下来,在Vue3中使用face-api.js需要执行以下步骤: 1. 在Vue组件中引入face-api.js库: ```javascript import * as faceapi from 'face-api.js'; ``` 2. 在Vue组件中创建一个canvas元素,并将其添加到DOM中: ```javascript mounted() { const video = this.$refs.video; const canvas = this.$refs.canvas; const context = canvas.getContext('2d'); document.body.append(canvas); } ``` 3. 加载face-api.js模型: ```javascript async loadModels() { const MODEL_URL = '/models'; await faceapi.loadSsdMobilenetv1Model(MODEL_URL); await faceapi.loadFaceLandmarkModel(MODEL_URL); await faceapi.loadFaceRecognitionModel(MODEL_URL); await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL); } ``` 4. 进行人脸识别: ```javascript async detectFace() { const video = this.$refs.video; const canvas = this.$refs.canvas; const context = canvas.getContext('2d'); const { width, height } = video.getBoundingClientRect(); canvas.width = width; canvas.height = height; const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()) .withFaceLandmarks() .withFaceDescriptors(); context.clearRect(0, 0, canvas.width, canvas.height); faceapi.draw.drawDetections(canvas, detections); faceapi.draw.drawFaceLandmarks(canvas, detections); } ``` 以上就是在Vue3中使用face-api.js进行人脸识别的基本步骤。需要注意的是,在调用face-api.jsAPI时,需要将视频流转换成canvas元素。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 12
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值