1. 下载model放置在工程public文件夹下, 该model是人脸识别核心大模型
2. 安装 face-api.js插件 npm run face-api.js
3. 引入该页面 页面中包含了人脸识别样式 跳转至该页面即可使用
兼容ios 的 Safari浏览器
capturePhoto 方法中 将拿到的人脸照片包裹至formData中 可自行上传服务器
4. 注意事项 : 代码中img标签引用的图标 下载下来放置在assets文件夹下即可
<template>
<div class="container">
<div class="title">人像捕捉</div>
<!-- <div class="nav">创建面部密码,保障数据安全</div> -->
<p v-if="message" class="nav">{{ message }}</p>
<div class="borders">
<!-- 添加 playsinline 解决ios默认全屏播放问题 -->
<video ref="video" autoplay playsinline class="video"></video>
</div>
<div class="icon_title">拍摄须知</div>
<div class="icons">
<div class="item">
<img class="icon" src="@/assets/iocns.svg" alt="">
<div class="icon_name">保持光线充足</div>
</div>
<div class="item">
<img class="icon" src="@/assets/iocns.svg" alt="">
<div class="icon_name">请面对屏幕</div>
</div>
<div class="item">
<img class="icon" src="@/assets/iocns.svg" alt="">
<div class="icon_name">保持面无表情</div>
</div>
</div>
<van-overlay :show="show">
<div class="wrapper">
<van-loading color="#1989fa" vertical>加载中...</van-loading>
</div>
</van-overlay>
</div>
</template>
<script setup>
import * as faceapi from 'face-api.js'; // 确保安装并引入 face-api.js
import { ref, onMounted } from 'vue'
const router = useRouter()
const route = useRoute()
// import { upload } from '@/api/index.js'
const show = ref(true) // 用于控制加载动画的显示与隐藏
const message = ref('') // 用于存储用户提示信息
const recognitionInterval = ref(null) // 用于存储识别的定时器
const video = ref(null) // 用于存储 video 元素
onMounted(() => {
init()
})
const init = async () => {
await loadModels()
await startVideo()
}
const loadModels = async () => {
const MODEL_URL = '/models'; // 模型文件存放路径
await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL);
await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL);
await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL);
console.log("模型加载完成");
}
const startVideo = async () => {
const videoElement = video.value; // 使用 ref 获取 video 元素
const stream = await navigator.mediaDevices.getUserMedia({ video: {} });
videoElement.srcObject = stream;
// 延迟500毫秒后开始识别
let times = setTimeout(() => {
startRecognition();
show.value = false
clearTimeout(times)
}, 1000);
}
const startRecognition = () => {
const videoElement = video.value;
videoElement.play();
recognitionInterval.value = setInterval(async () => {
const detections = await faceapi.detectAllFaces(video.value, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceDescriptors();
if (detections.length > 0) {
const score = detections[0].alignedRect.score;
if (score > 0.9) {
// 置信度大于 0.9,获取图像的 Base64 数据并传送给考勤机
const imgData = getBase64Image(video.value);
sendToAttendanceMachine(imgData);
message.value = '图像采集成功,正在传输请等待...';
clearInterval(recognitionInterval.value); // 停止识别
} else {
// 置信度低于 0.9,提示用户调整位置
message.value = getUserFeedback(score);
}
} else {
message.value = '未检测到人脸,请调整位置';
}
}, 100);
}
const getBase64Image = () => {
const canvas = document.createElement('canvas');
canvas.width = video.value.videoWidth;
canvas.height = video.value.videoHeight;
const ctx = canvas.getContext('2d');
ctx.drawImage(video.value, 0, 0, canvas.width, canvas.height);
return canvas.toDataURL('image/png'); // 返回 Base64 数据
}
const sendToAttendanceMachine = (imgData) => {
capturePhoto(imgData);
}
const getUserFeedback = (score) => {
// 根据置信度给出用户提示
if (score < 0.5) {
return '请向前移动';
} else if (score < 0.7) {
return '请向左或右移动';
} else {
return '请稍微调整位置';
}
}
const capturePhoto = async (imgData) => {
// 移除 base64 头部信息
const base64Data = imgData.replace(/^data:image\/\w+;base64,/, "");
// 解码 base64 数据
const byteCharacters = atob(base64Data);
// 创建 Uint8Array 来存储解码后的数据
const byteNumbers = new Array(byteCharacters.length);
for (let i = 0; i < byteCharacters.length; i++) {
byteNumbers[i] = byteCharacters.charCodeAt(i);
}
const byteArray = new Uint8Array(byteNumbers);
// 创建 Blob 对象,设置 MIME 类型为 'image/jpeg'
const blob = new Blob([byteArray], { type: 'image/jpeg' });
// 创建 FormData 对象
const formData = new FormData();
formData.append('file', blob, 'image.jpg');
// try {
// // 上传接口
// let res = await upload(formData);
// if (res.code === 200) {
// router.replace({
// path: '/home',
// query: {
// url: res.data.url,
// imgId: res.data.ossId,
// idCard: route.query.idCard
// }
// })
// } else {
// message.value = res.msg
// startVideo()
// }
// } catch (error) {
// showToast('上传失败');
// }
}
// 清空视频流 解决ios 二次进入视频流未清空导致的黑屏问题
onBeforeUnmount(() => {
if (video.value) {
const stream = video.value.srcObject;
if (stream) {
const tracks = stream.getTracks();
tracks.forEach(track => track.stop());
}
video.value.srcObject = null;
}
clearInterval(recognitionInterval.value);
});
</script>
<style scoped lang="scss">
.container {
display: flex;
flex-direction: column;
align-items: center;
box-sizing: border-box;
padding: 0 12px;
padding-top: 40px;
.title {
font-weight: bold;
font-size: 24px;
color: #333333;
margin-bottom: 12px;
margin-top: 26px;
}
.nav {
font-weight: 400;
font-size: 15px;
color: #666666;
margin-bottom: 30px;
}
}
.icon_title {
font-weight: bold;
font-size: 16px;
color: #333333;
line-height: 19px;
margin-bottom: 30px;
margin-top: 30px
}
.icons {
width: 100%;
display: flex;
justify-content: space-evenly;
.item {
display: flex;
flex-direction: column;
align-items: center;
.icon {
width: 20px;
height: 20px;
margin-bottom: 8px;
}
.icon_name {
font-weight: 400;
font-size: 14px;
color: #333333;
}
}
}
.video {
width: 100% !important;
height: 100% !important;
border-radius: 50% !important;
object-fit: cover !important;
transform: rotateY(180deg) !important;
-webkit-transform: rotateY(180deg) !important;
}
.borders {
width: 80% !important;
height: 280px !important;
border: 4px solid #4980FF !important;
border-radius: 50% !important;
box-sizing: border-box !important;
padding: 2px !important;
overflow: hidden;
}
.wrapper {
display: flex;
align-items: center;
justify-content: center;
height: 100%;
}
.block {
width: 120px;
height: 120px;
background-color: #fff;
}
.message {}
</style>
5. 实现效果