tensorflow 在 vue 项目中使用示例

33 篇文章 0 订阅
29 篇文章 2 订阅
本文档展示了一个人脸识别系统,利用TensorFlow.js库进行前端部署。系统包括FaceDetectionModel和FaceMeshModel两个模型,前者用于人脸检测,后者用于关键点识别。在页面中,系统捕获视频流,通过FaceEngine处理每一帧图像,进行人脸检测和关键点预测,并根据检测结果提供反馈。此外,系统还提供了光照、位置和距离的评估,确保人脸在理想状态下。
摘要由CSDN通过智能技术生成

人脸识别示例

1、依赖的第三方库

package.json

{
    "peerdependencies": {
        "@tensorflow/tfjs-backend-cpu": "3.8.0",
        "@tensorflow/tfjs-backend-webgl": "3.8.0",
        "@tensorflow/tfjs-backend-wasm": "3.8.0",
        "@tensorflow/tfjs-converter": "3.8.0",
        "@tensorflow/tfjs-core": "3.8.0"
     },
}

2、lib文件夹

anchors.js

const arr = [
  ...
  [0.9375, 0.9375, 1.0, 1.0]
];

export default function get_blazeface_priors() {
  return arr;
}


FaceDetectionModel.js

import * as tf from '@tensorflow/tfjs-core'
import {load_model} from "./ModelUtils";
import get_blazeface_priors from "./anchors";

const DETECTION_IMAGE_SIZE = 128; //人脸检测的输入张量尺寸 [128,128]
export class FaceDetectionModel {

    constructor({
                    modelPath,
                    imageSize,
                    faceConfThresh,
                    nmsThresh
                }) {
        this.modelPath = modelPath || null;
        this.faceConfThresh = faceConfThresh || 0.6;
        this.nmsThresh = nmsThresh || 0.4;
        this.imageSize = imageSize || [640, 480];
    }

    destroy() {
        tf.dispose();
        tf.disposeVariables();
        if (this.model) {
            this.model = null;
        }
    }

    async loadModel() {
        return this.model = await load_model(this.modelPath);
    }

    /**
     * 人脸检测,外部调用方法
     * @param inputTensor
     * @returns {Promise<null>} 人脸框
     */
    async executeFaceDetection(inputTensor) {
        var outputFaceBox = null;
        const detectionResult = this.predict(inputTensor, DETECTION_IMAGE_SIZE);

        const confTensor = detectionResult[0];
        const locTensor = detectionResult[1];

        // decode boxes and nms
        const decodeResult = this.decodeBox(locTensor, get_blazeface_priors());
        const decodeLocTensor = decodeResult[0];
        const decodeKeyPointTensor = decodeResult[1];
        const keepIdxsTensor = await this.nms(decodeLocTensor,
            confTensor,
            this.faceConfThresh,
            this.nmsThresh);

        const faceBoxesTensor = tf.gather(decodeLocTensor, keepIdxsTensor);
        const faceKeyPointTensor = tf.gather(decodeKeyPointTensor, keepIdxsTensor);

        const faceBoxes = faceBoxesTensor.arraySync();
        const keyPointList = faceKeyPointTensor.arraySync();

        //没有用tidy清除掉的tensor要手动清除
        confTensor.dispose();
        locTensor.dispose();
        decodeLocTensor.dispose();
        faceBoxesTensor.dispose();
        decodeKeyPointTensor.dispose();
        faceKeyPointTensor.dispose();
        keepIdxsTensor.dispose();

        let x_1, x_2, y_1, y_2;
        let xMax, yMax;
        let outputKeyPoint = [];
        [xMax, yMax] = this.imageSize;
        if (faceBoxes.length > 0) {
            //从tensor的比例大小,放缩成原来的大小
            x_1 = faceBoxes[0][0] * xMax;
            y_1 = faceBoxes[0][1] * yMax;
            x_2 = faceBoxes[0][2] * xMax;
            y_2 = faceBoxes[0][3] * yMax;

            //图像向外部扩充一定区域
            const w = x_2 - x_1;
            const h = y_2 - y_1;
            x_1 = Math.round(Math.max(x_1 - w / 10, 0));
            x_2 = Math.round(Math.min(x_2 + w / 10, xMax));
            y_1 = Math.round(Math.max(y_1 - h / 5, 0));
            y_2 = Math.round(Math.min(y_2 + h / 20, yMax));

            outputFaceBox = [x_1, y_1, x_2, y_2];
            for (let i = 0; i < keyPointList[0].length; i += 2) {
                const point = [keyPointList[0][i] * xMax, keyPointList[0][i + 1] * yMax];
                outputKeyPoint.push(point);
            }

        }
        return [outputFaceBox, outputKeyPoint];
    }

    /**
     * 预测人脸框
     * @param image_tensor
     * @param scaled_size
     * @returns {[T, Tensor<Rank>]}
     */
    predict(imageTensor, scaledSize) {
        return tf.tidy(() => {
            const resizedImg = tf.image.resizeBilinear(imageTensor, [scaledSize, scaledSize]);
            //预处理
            const inputTensor = tf.expandDims(tf.div(resizedImg, 255.0));
            //模型检测
            const preds = this.model.predict(inputTensor);

            const confTensor = tf.sigmoid(tf.squeeze(tf.concat([preds[1], preds[3]], 1)));
            const locTensor = tf.squeeze(tf.concat([preds[2], preds[0]], 1));

            return [confTensor, locTensor];
        });
    }


    /**
     * 检测框坐标映射至图像的尺寸和位置(在cpu上计算)
     * @param loc
     * @param anchors
     * @returns {*}
     */
    decodeBox(loc_tensor, anchors) {
        return tf.tidy(() => {
            //注意loc_tensor不要被tf释放
            const reshape_loc_tensor = tf.reshape(loc_tensor, [-1]);

            const loc = reshape_loc_tensor.dataSync();
            const num_anchor = anchors.length;
            const loc_length = 16;
            for (let anchor_i = 0; anchor_i < num_anchor; anchor_i++) {
                // decode boxes:
                // to xywh
                loc[anchor_i * loc_length + 0] = loc[anchor_i * loc_length + 0] / 128.0 * anchors[anchor_i][2] + anchors[anchor_i][0];
                loc[anchor_i * loc_length + 1] = loc[anchor_i * loc_length + 1] / 128.0 * anchors[anchor_i][3] + anchors[anchor_i][1];
                loc[anchor_i * loc_length + 2] = loc[anchor_i * loc_length + 2] / 128.0 * anchors[anchor_i][2];
                loc[anchor_i * loc_length + 3] = loc[anchor_i * loc_length + 3] / 128.0 * anchors[anchor_i][3];
                // xywh -> xyxy
                loc[anchor_i * loc_length + 0] = loc[anchor_i * loc_length + 0] - 0.5 * loc[anchor_i * loc_length + 2];
                loc[anchor_i * loc_length + 1] = loc[anchor_i * loc_length + 1] - 0.5 * loc[anchor_i * loc_length + 3];
                loc[anchor_i * loc_length + 2] = loc[anchor_i * loc_length + 0] + loc[anchor_i * loc_length + 2];
                loc[anchor_i * loc_length + 3] = loc[anchor_i * loc_length + 1] + loc[anchor_i * loc_length + 3];

                //decode keypoints
                for (let k = 4; k < loc_length; k++) {
                    if (k % 2 == 0) {
                        loc[anchor_i * loc_length + k] = loc[anchor_i * loc_length + k] / 128.0 * anchors[anchor_i][2] + anchors[anchor_i][0];
                    } else {
                        loc[anchor_i * loc_length + k] = loc[anchor_i * loc_length + k] / 128.0 * anchors[anchor_i][3] + anchors[anchor_i][1];
                    }
                }
            }
            const decode_loc_tensor = tf.tensor(loc, [num_anchor, loc_length], "float32");
            const [boxes_tensor, keypoints_tensor] = tf.split(decode_loc_tensor, [4, 12], 1);
            return [boxes_tensor, keypoints_tensor];
        });
    }

    /**
     * nms
     * @param _boxes
     * @param _conf
     * @param conf_thresh
     * @param nms_thresh
     * @returns {Promise<Tensor<Rank>>}
     */
    async nms(_boxes, _conf, conf_thresh, nms_thresh) {
        const tf_boxes_tensor = tf.tidy(() => {
            const [x1, y1, x2, y2] = tf.split(_boxes, 4, 1);
            return tf.concat([y1, x1, y2, x2], 1);
        });
        // nms
        let select_idx = await tf.image.nonMaxSuppressionAsync(tf_boxes_tensor, _conf, 10, nms_thresh, conf_thresh);
        tf_boxes_tensor.dispose();
        return select_idx;
    }
}

export default FaceDetectionModel;

FaceEngine.js

import * as tf from '@tensorflow/tfjs-core'
import FaceMeshModel from "./FaceMeshModel";
import FaceDetectionModel from "./FaceDetectionModel";
import {newFrameTensor, checkFace} from "./ModelUtils";

export class FaceEngine {

    constructor({
                    detectionModelPath,
                    meshModelPath,
                    imageSize,
                    faceConfThresh,
                    nmsThresh,
                    landmarkThresh,
                    backend
                }) {
        this.faceDetectionModel = new FaceDetectionModel({
            modelPath: detectionModelPath,
            imageSize: imageSize,
            faceConfThresh: faceConfThresh,
            nmsThresh: nmsThresh,
            backend: backend
        });
        this.faceMeshModel = new FaceMeshModel({
            modelPath: meshModelPath,
            backend: backend
        });
        this.backend = backend || "webgl"
        this.imageSize = imageSize;
        this.landmarkThresh = landmarkThresh;

        this.currentFrameTensor = null;
        this.lastFaceBox = null;
        this.isModelReady = false;
    }

    /**
     *
     * @returns {Promise<void>}
     */
    async init() {
        await tf.setBackend(this.backend);
        await this.faceDetectionModel.loadModel();
        await this.faceMeshModel.loadModel();

        const preload_time = performance.now();
        console.log('start to preload model');
        await this.preloadEmptyFrame();
        console.log('preload model cost = ', performance.now() - preload_time);
        this.isModelReady = true;
    }

    setImageSize(imageSize){
        this.imageSize = imageSize;
        this.faceDetectionModel.imageSize = imageSize;
    }

    /**
     * 使用空数据初次加载
     * @returns {Promise<number>}
     */
    async preloadEmptyFrame() {
        const frameWidth = this.imageSize[0];
        const frameHeight = this.imageSize[1];
        const bufferSize = frameWidth * frameHeight * 4;

        const imageTensor = newFrameTensor(new Uint8ClampedArray(bufferSize), frameWidth, frameHeight, 3);
        if (this.faceDetectionModel) {
            await this.faceDetectionModel.executeFaceDetection(imageTensor);
        }
        if (this.faceMeshModel) {
            await this.faceMeshModel.executeFaceMesh(imageTensor, [0, 0, frameWidth, frameHeight]);
        }

        imageTensor.dispose();
        return 0;
    }

    destroy() {
        try{
            tf.dispose();
            tf.disposeVariables();
            if (this.faceDetectionModel) {
                this.faceDetectionModel.destroy();
            }
            if (this.faceMeshModel) {
                this.faceMeshModel.destroy();
            }
        }catch (e) {
            console.log("failed to release faceEngine, error = ", e);
        }
    }

    rotateImageTensor(tensor, radian){
        return tf.tidy(()=>{
            const inputTensor = tf.expandDims(tensor, 0);
            const outputTensor = tf.image.rotateWithOffset(inputTensor, radian);
            return outputTensor;
        });
       return
    }

    calAngle(pLeftEye, pRightEye){
        if (pLeftEye && pRightEye){
            const leftX = pLeftEye[0];
            const leftY = pLeftEye[1];
            const rightX = pRightEye[0];
            const rightY = pRightEye[1];

            const angle = Math.atan((rightY - leftY) / (rightX - leftX)) * 180 / Math.PI;
            return angle;
            console.log('angle= ', angle);
        }else{
            return null;
        }
    }

    /**
     *
     * @param frame
     * @returns {Promise<{bbox: *, confidence: *, landmarks: *, time: *}>}
     */
    async handleImageFrame(frame, constrainsOptions) {
        this.startTime = performance.now();
        this.faceDetectionBox = null;
        this.faceDetectionKeypoints = null;

        const frameWidth = this.imageSize[0];
        const frameHeight = this.imageSize[1];
        if (!frame) {
            return this.sendErrorAndDisposeTensor("input frame error");
        }
        if (!this.isModelReady) {
            return this.sendErrorAndDisposeTensor("model is not ready");
        }

        this.currentFrameTensor = newFrameTensor(frame, frameWidth, frameHeight, 3);

        let landmarkFaceBox = null;

        //确认是否需要执行人脸检测,并指定关键点使用的人脸框
        if (!this.lastFaceBox) {
        // if (true) {
            const detectionResult = await this.faceDetectionModel.executeFaceDetection(this.currentFrameTensor);
            const faceBox = detectionResult[0];
            this.faceDetectionBox = faceBox;
            this.faceDetectionKeypoints = detectionResult[1];
            if (this.faceDetectionKeypoints){
                this.calAngle(this.faceDetectionKeypoints[0], this.faceDetectionKeypoints[1]);
            }
            if (!faceBox) {
                return this.sendResultAndDisposeTensor(null, null, -1, "no face");
            } else {
                landmarkFaceBox = faceBox;
            }
        } else {
            landmarkFaceBox = this.lastFaceBox;
        }

        let facePreprocessResult = null;
        if (constrainsOptions){
            facePreprocessResult = checkFace(this.currentFrameTensor, landmarkFaceBox, this.imageSize, constrainsOptions);
        }

        const meshResult = await this.faceMeshModel.executeFaceMesh(this.currentFrameTensor, landmarkFaceBox);
        const landmarksConfidence = meshResult[0];
        const landmarksPostion = meshResult[1];

        //后面全部在cpu运算,属于js的typearray类型
        const filterResult = this.faceMeshModel.decode(landmarksPostion, landmarkFaceBox);
        const landmarks = filterResult[0];
        const landmarksFaceBox = filterResult[1];

        //根据关键点置信度的阈值,更新下一帧的人脸检测框
        //如果未达到阈值,则将人脸检测框置空,下一次执行人脸检测
        const finalFaceBox = this.updateLastFaceBox(landmarksFaceBox, landmarkFaceBox, landmarksConfidence, this.landmarkThresh);
        this.lastFaceBox = finalFaceBox;
        if (this.faceDetectionBox){
            //优先返回有检测结果的人脸框
            return this.sendResultAndDisposeTensor(
                this.faceDetectionBox,
                landmarks,
                landmarksConfidence,
                '检测成功', facePreprocessResult);
        }else{
            //返回采用跟踪时的人脸框
            return this.sendResultAndDisposeTensor(
                finalFaceBox,
                landmarks,
                landmarksConfidence,
                '检测成功',facePreprocessResult);
        }
    }

    updateLastFaceBox(faceBox, lastFaceBox, confidence, threshold) {
        let newFaceBox;
        let xMax, yMax;
        [xMax, yMax] = this.imageSize;
        if (confidence > threshold) {
            let x_1, y_1, x_2, y_2;
            [x_1, y_1, x_2, y_2] = faceBox;
            //图像向外部扩充一定区域
            if (lastFaceBox) {
                let x_11, y_11, x_22, y_22;
                [x_11, y_11, x_22, y_22] = lastFaceBox;
                const w = x_22 - x_11;
                const h = y_22 - y_11;
                const center_x = (x_1 + x_2) / 2;
                const center_y = (y_1 + y_2) / 2;

                x_1 = Math.round(Math.max(center_x - w / 2, 0));
                x_2 = Math.round(Math.min(center_x + w / 2, xMax));
                y_1 = Math.round(Math.max(center_y - h / 2, 0));
                y_2 = Math.round(Math.min(center_y + h / 2, yMax));

            } else {
                const w = x_2 - x_1;
                const h = y_2 - y_1;
                x_1 = Math.round(Math.max(x_1 - w / 5, 0));
                x_2 = Math.round(Math.min(x_2 + w / 5, xMax));
                y_1 = Math.round(Math.max(y_1 - h / 3, 0));
                y_2 = Math.round(Math.min(y_2 + h / 3, yMax));
            }
            newFaceBox = [x_1, y_1, x_2, y_2];
        } else {
            newFaceBox = null;
        }
        return newFaceBox;
    }


    /**
     * 生成输出json字典
     * @param message 返回信息
     * @param bbox 人脸框array
     * @param landmarks 关键点array2D
     * @param confidence 关键点置信度
     * @param keypoints 人脸定位关键点
     * @returns {{bbox: *, confidence: *, landmarks: *, time: *, message: *, keypoints: *}}
     */
    sendResultAndDisposeTensor(bbox, landmarks, confidence, message, optionsMessage = null) {
        if (this.currentFrameTensor) {
            this.currentFrameTensor.dispose();
            this.currentFrameTensor = null;
        }
        return {bbox: bbox, landmarks: landmarks, confidence: confidence,
            time: performance.now() - this.startTime, message: message, keypoints: this.faceDetectionKeypoints, optionsMessage:optionsMessage};
    }

    sendErrorAndDisposeTensor(error_message) {
        return this.sendResultAndDisposeTensor(null, null, -1, error_message);
    }
}

export default FaceEngine;

FaceMeshModel.js

import * as tf from '@tensorflow/tfjs-core'
import {load_model} from "./ModelUtils";

const MESH_IMAGE_SIZE = 192; //人脸关键点的输入张量尺寸 [192, 192]
const id_list = [119, 348, 129, 358, 69, 299, 189, 413, 105, 52, 334,
    282, 70, 46, 300, 276, 230, 450, 9, 35, 265, 139, 368,
    107, 55, 336, 285, 207, 427, 57, 287, 18, 164];

export class FaceMeshModel {
    constructor({
                    modelPath
                }) {
        this.modelPath = modelPath || null;
        this.model = null;
    }

    destroy() {
        tf.dispose();
        tf.disposeVariables();
        if (this.model) {
            this.model = null;
        }
    }

    async loadModel() {
        this.model = await load_model(this.modelPath);
    }

    /**
     * 人脸关键点预测
     * @param inputTensor 人脸tensor
     * @param faceBox 人脸框
     * @returns {Promise<*[]>} 置信度、人脸关键点,未经过decode
     */
    async executeFaceMesh(inputTensor, faceBox) {
        let x_1, y_1, x_2, y_2;
        [x_1, y_1, x_2, y_2] = faceBox;

        let w_c = x_2 - x_1;
        let h_c = y_2 - y_1;
        const meshResult = this.predict(inputTensor, x_1, y_1, w_c, h_c, MESH_IMAGE_SIZE);

        const landmarksPosTensor = meshResult[0];
        const landmarksConfTensor = meshResult[1];

        const landmarksPos = await landmarksPosTensor.dataSync();
        const temp = await landmarksConfTensor.dataSync();
        const landmarksConf = temp[0];

        //输入图像的tensor后面不用了,需要删除
        landmarksPosTensor.dispose();
        landmarksConfTensor.dispose();
        return [landmarksConf, landmarksPos];
    }

    /**
     * 预测关键点,输出原图像坐标系下的关键点坐标
     * @param imageTensor
     * @param x_1
     * @param y_1
     * @param w_c
     * @param h_c
     * @param resizedSize
     * @returns {Tensor<Rank> | Tensor[] | NamedTensorMap}
     */
    predict(imageTensor, x_1, y_1, w_c, h_c, resizedSize) {
        return tf.tidy(() => {
            const cropedImg = tf.slice(imageTensor, [y_1, x_1, 0], [h_c, w_c, 3]);
            const cropedResizedImg = tf.image.resizeBilinear(cropedImg, [resizedSize, resizedSize]);
            const inputTensor = tf.expandDims(tf.div(cropedResizedImg, 255.0));
            const preds = this.model.predict(inputTensor);
            return preds;
        });

    }

    decode(preds, faceBox) {
        let w, h, x_1, y_1, x_2, y_2;
        [x_1, y_1, x_2, y_2] = faceBox;
        w = x_2 - x_1;
        h = y_2 - y_1;

        let landmarks = new Array(468);
        let pnts = preds;

        let x_min = x_1 + w;
        let x_max = -1;
        let y_min = y_1 + h;
        let y_max = -1;

        let index = 0;
        for (let i = 0; i < pnts.length; i += 3) {
            let xp = pnts[i] * w / MESH_IMAGE_SIZE + x_1;
            let yp = pnts[i + 1] * h / MESH_IMAGE_SIZE + y_1;

            if (id_list.indexOf(index) != -1) {
                x_min = Math.min(x_min, xp);
                y_min = Math.min(y_min, yp);
                x_max = Math.max(x_max, xp);
                y_max = Math.max(y_max, yp);
            }
            landmarks[index] = [xp, yp];
            index = index + 1;
        }
        return [landmarks, [x_min, y_min, x_max, y_max]];
    }
}

export default FaceMeshModel;

ModelUtils.js

import * as tf from "@tensorflow/tfjs-core";
import * as tfc from "@tensorflow/tfjs-converter";
import {assert} from "@tensorflow/tfjs-core/dist/util_base";


export function newFrameTensor(frame, width, height, channel) {
	return tf.browser.fromPixels(
		{
			data: new Uint8Array(frame),
			width: width,
			height: height,
		}, channel
	);
}

export async function load_model(model_url) {
    console.log('start load model, url = ', model_url);
    if (!model_url) {
        throw new Error("detection_model can't be empty");
    }
    try {
        let model = await tfc.loadGraphModel(model_url);
        console.log("load model success, path = ", model_url);
        return model;
    } catch (e) {
        console.log("load model failed, err = ", e);
        return null;
    }
}

export function rgb2gray(imageTensor) {
    const [r, g, b] = tf.split(imageTensor, 3, 2)
    let grayTensor = tf.addN([tf.mul(r, 0.299), tf.mul(g, 0.587), tf.mul(b, 0.144)]);
    // let output = tf.squeeze(grayTensor);
    return grayTensor;
}

export function SMD2(grayTensor) {
    // img_t: [1,H,W,4] : rgba
    const gray = grayTensor;
    let [h, w] = gray.shape;

    let img_left = tf.slice(gray, [1, 0], [h - 1, w - 1]);
    let img_right = tf.slice(gray, [1, 1], [h - 1, w - 1]);

    let img_up = tf.slice(gray, [1, 1], [h - 1, w - 1]);
    let img_bottom = tf.slice(gray, [0, 1], [h - 1, w - 1]);

    let diff_lr = tf.abs(tf.sub(img_left, img_right));
    let diff_ub = tf.abs(tf.sub(img_up, img_bottom));

    let vals = tf.mul(diff_lr, diff_ub);
    let smd2 = tf.mean(vals);
    return smd2;
}

export function getBrightTensor(grayTensor) {
    const gray = grayTensor;
    let bright = tf.mean(gray);
    return bright;
}


export function getBright(rgb_data, width, height) {
    let bright = 0;
    for (let i = 0; i < width; i++) {
        for (let j = 0; j < height; j++) {
            let r = rgb_data[3 * (i * width + j) + 0];
            let g = rgb_data[3 * (i * width + j) + 1];
            let b = rgb_data[3 * (i * width + j) + 2];
            bright += 0.2989 * r + 0.5870 * g + 0.1140 * b;
        }
    }
    const count = height * width;
    bright /= count;
    return bright;
}

export function getFacePosition(faceBox, imageSize, thresholds) {
    const minDistance = thresholds.minDistance;
    const maxDistance = thresholds.maxDistance;

    const xThreshold = thresholds.xThreshold;
    const yThreshold = thresholds.yThreshold;


    const width = imageSize[0];
    const height = imageSize[1];

    const imgXCenter = width / 2;
    const imgYCenter = height / 2;

    let x_1, y_1, x_2, y_2;
    [x_1, y_1, x_2, y_2] = faceBox;

    const faceXCenter = (x_2 + x_1) / 2;
    const faceYCenter = (y_2 + y_1) / 2;

    const distanceValue = (x_2 - x_1) / width;
    let isDistanceMatch, distanceTip;
    if (distanceValue < minDistance) {
        isDistanceMatch = false;
        // distanceTip = "距离太远";
        distanceTip = "太远";
    } else if (distanceValue > maxDistance) {
        isDistanceMatch = false;
        // distanceTip = "距离太近"; //too close
        distanceTip = "太近";
    } else {
        isDistanceMatch = true;
        // distanceTip = "距离合适";
        distanceTip = "合适";
    }
    const deltaX = (faceXCenter - imgXCenter);
    const deltaY = (faceYCenter - imgYCenter);

    // const positionTips = [deltaX >= xThreshold ? deltaX : 0,
    //     deltaY >= yThreshold ? deltaY : 0]
    let isPositionMatch, positionTips, positionTips2;
    if (Math.abs(deltaX) >= xThreshold){
			isPositionMatch = false;
			positionTips = deltaX > 0 ? "请向左移动" : "请向右移动";
			positionTips2 = deltaX > 0 ? "偏右" : "偏左";
    } else if (Math.abs(deltaY) >= yThreshold){
			isPositionMatch = false;
			positionTips = deltaY > 0 ? "请向上移动" : "请向下移动";
			positionTips2 = deltaY > 0 ? "偏下" : "偏上";
    } else {
			isPositionMatch = true;
			positionTips = "位置合适";
			positionTips2 = "合适"
    }

    return {
			isPositionMatch: isPositionMatch,
			positionTips: positionTips,
			positionTips2: positionTips2,
			isDistanceMatch: isDistanceMatch,
			distanceTip: distanceTip,
			distanceValue: distanceValue
    }
}

/**
 *
 * @param inputTensor
 * @param faceBox
 * @param constrainsOptions
 * @returns {{}}
 */
export function checkFace(inputTensor, faceBox, imageSize, constrainsOptions) {
    assert(constrainsOptions);
    let x_1, y_1, x_2, y_2;
    [x_1, y_1, x_2, y_2] = faceBox;

    let w_c = x_2 - x_1;
    let h_c = y_2 - y_1;

    return tf.tidy(() => {
        let finalResult = {};

        const cropedImg = tf.slice(inputTensor, [y_1, x_1, 0], [h_c, w_c, 3]);
        let positionResult = {};
        let brightResult = {};
        let clarifyResult = {};

        if (constrainsOptions.positionConstrains) {
					positionResult = getFacePosition(faceBox, imageSize, constrainsOptions.positionConstrains);
					finalResult['positionConstrains'] = positionResult;
        }

        if (constrainsOptions.brightnessConstrains || constrainsOptions.clarifyConstrains) {
					let grayTensor = rgb2gray(cropedImg);
					if (constrainsOptions.brightnessConstrains) {
						assert(constrainsOptions.brightnessConstrains.minValue && constrainsOptions.brightnessConstrains.maxValue);
						const bright = getBrightTensor(grayTensor).arraySync();
						brightResult['brightnessValue'] = Math.floor(bright);

						let isBrightMatch = false;
						let brightTips;
						if (bright < constrainsOptions.brightnessConstrains.minValue) {
								// brightTips = '亮度过低';
								brightTips = '过暗';
						} else if (bright > constrainsOptions.brightnessConstrains.maxValue) {
								// brightTips = '亮度过高';
								brightTips = '过高';
						} else {
								isBrightMatch = true;
								// brightTips = '亮度合适';
								brightTips = '合适';
						}
						brightResult['brightTips'] = brightTips;
						brightResult['isBrightMatch'] = isBrightMatch;
						finalResult['brightnessConstrains'] = brightResult;
					}

					if (constrainsOptions.clarifyConstrains) {
						assert(constrainsOptions.clarifyConstrains.minValue);
						const clarify = SMD2(grayTensor).arraySync();
						clarifyResult['clarifyValue'] = Math.floor(clarify);

						let isClarifyMatch = false;
						let clarifyTips;
						if (clarify < constrainsOptions.clarifyConstrains.minValue) {
								clarifyTips = '清晰度过低';
						} else {
								isClarifyMatch = true;
								clarifyTips = '清晰度合适';
						}
						clarifyResult['clarifyTips'] = clarifyTips;
						clarifyResult['isClarifyMatch'] = isClarifyMatch;
						finalResult['clarifyConstrains'] = clarifyResult;
					}
        }
        return finalResult;
    });
}

index.js

import {FaceEngine} from "./FaceEngine";
import {FaceMeshModel} from "./FaceMeshModel";
import {FaceDetectionModel} from "./FaceDetectionModel";

export {FaceEngine, FaceMeshModel, FaceDetectionModel};
export default {FaceEngine, FaceMeshModel, FaceDetectionModel};

3、在页面中使用模型

<template>
	<div class="main_container">

		<div class="ai-diagnosis-wrapper" v-if="pageState==0">
			<img class="zb-bg" src="@/assets/camera/zb-bg.jpg" alt=""/>
			<div class="adw-btn gray" v-if="modelLoading">模型加载中...</div>
			<div class="adw-btn" v-if="!modelLoading" @click="showCamera">开始</div>
		</div>

		<div class="video-box" v-show="pageState == 1">
			
			<video class="video" id="video" ref="video" playsinline autoplay muted></video>
			
			<div class="video-mask">
				<img class="circle-img" src="@/assets/camera/animate-camera-circle.png"/>
			</div>

			<canvas id="imgCanvas"></canvas>
			<canvas class="canvas_render" id="drawerCanvas" ref="canvasRenderRef"></canvas>
			
			<div class="mantle-box">
				<div class="message-tips">
					<p class="message-tips-p">{{ faceDetect ? "识别成功" : faceDetectorTips }}</p>
				</div>
				<ul class="photo-env">
					<li>
						<img class="icon" v-show="!detectStatus" src="@/assets/camera/gy-1.png"/>
						<img class="icon" v-show="detectStatus && detectBright === 0" src="@/assets/camera/gy-3.png"/>
						<img class="icon" v-show="detectStatus && detectBright !== 0" src="@/assets/camera/gy-2.png"/>
						<div class="r-box">
							<p class="p-1">拍照光源</p>
							<p class="p-2">{{detectBright !== -1 ? brightTxt[detectBright] : (!detectStatus ? '待检' : '无人脸')}}</p>
						</div>
					</li>

					<li>
						<img class="icon" v-show="!detectStatus" src="@/assets/camera/wz-1.png"/>
						<img class="icon" v-show="detectStatus && (detectPosition === 0 && detectDistance === 0)" src="@/assets/camera/wz-3.png"/>
						<img class="icon" v-show="detectStatus && (detectPosition !== 0 || detectDistance !== 0)" src="@/assets/camera/wz-2.png"/>
						<div class="r-box">
							<p class="p-1">人脸位置</p>
							<p class="p-2">{{detectDistance !== -1 ? (detectDistance === 0 ? positionTxt[detectPosition] : distanceTxt[detectDistance]) : (!detectStatus ? '待检' : '无人脸')}}</p>
						</div>
					</li>
					
					<li>
						<img class="icon" v-show="!detectStatus" src="@/assets/camera/jd-1.png"/>
						<img class="icon" v-show="detectStatus && detectPosture === 0" src="@/assets/camera/jd-3.png"/>
						<img class="icon" v-show="detectStatus && detectPosture !== 0" src="@/assets/camera/jd-2.png"/>
						<div class="r-box">
							<p class="p-1">人脸角度</p>
							<p class="p-2">{{detectPosture !== -1 ? postureTxt[detectPosture] : (!detectStatus ? '待检' : '无人脸')}}</p>
						</div>
					</li>
				</ul>
			</div>
		</div>

	</div>
</template>

<script>
	import * as tf from "@tensorflow/tfjs"
	import { positionMsg, positionTxt, brightTxt, distanceTxt, brightDetailTxt, postureTxt } from '@/utils/utils.js'
    import {FaceEngine} from '@/detector/index';
	
	export default {
		name: 'Cameraface',

		data () {
			return {
				pageState: 0,
				modelLoading: true, // 模型加载
				detectStatus: false,

		        getImageDataing: false,
				startTime: 0, //避免浪费资源,设置两帧检测间隔时间
				face_detect_model: '',
				faceDetect: false,
				faceDetectSuccess: -1, // [-1, 0, 1]
				runnerTimeout: '',

			  detectBright: -1,// 亮度
				detectPosition: -1, // 偏移位置
				detectDistance: -1, // 偏移距离
				detectPosture: -1, // 人脸角度
				faceDetectorTips:'请保持人脸在采集框中',

                videoWidth: 0,
				videoHeight: 0,
				//画布尺寸
				videoObj: '',
				canvasObj: '',
				context: '',
				streaming: false, // 是否开始捕获媒体
				mediaStreamTrack: '',
		
				distanceTxt: distanceTxt,
				positionTxt: positionTxt,
				brightDetailTxt: brightDetailTxt,
				brightTxt: brightTxt,
				positionMsg: positionMsg,
				postureTxt: postureTxt,
				sliceFlag:false,

				isUploading: false,
				isUploadSuccess: false,
			}
		},

		created () {
			this.loadModel()
		},

		mounted() {
			if (!window.URL) {
				window.URL = window.URL || window.webkitURL || window.msURL || window.oURL;
			}
			this.videoObj = this.$refs.video;
			this.videoObj.removeAttribute("controls");

			this.canvasObj = document.getElementById("imgCanvas");//获取图像帧数据画布
			this.context = this.canvasObj.getContext('2d');

			this.drawerCanvasObj = document.getElementById("drawerCanvas");//绘制检测结果画布
			this.drawerCanvasCtx = this.drawerCanvasObj.getContext('2d');
		},

		unmounted() {
			this.hideCamera(true);
		},

		beforeDestroy() {
			this.hideCamera(true);
		},

		methods: {
			async loadModel() {
				console.log('model load start!');
				this.frameCanvas = document.createElement('canvas');

				this.$toast.loading({
					duration: 0,
					message: "模型加载中",
					forbidClick: true,
				});

				this.pageState = 0;
				this.modelLoading = true
				this.isUploading = false;
				this.isUploadSuccess = false;

				/**SDK:模型加载 */
				this.face_detect_model = new FaceEngine({
			        detectionModelPath:'xxx/FaceDetectionModel.json',
					meshModelPath: 'xxx/FaceMeshModel_v1.0.0.json',
					wasmPath: 'xxx/zyd_bg_1.wasm',
					cache:'localstorage',
					backend:'webgl'
				});

				await this.face_detect_model.init();

				this.$toast.clear();
				this.modelLoading = false
				console.log('model load finished!');
			},

			getImageData () {
				let frameData = null;
        if (this.videoWidth&&this.videoHeight) {
					if(this.sliceFlag){
						/**
						 * 目的:PC采取中心裁剪方式,从一个大的video里面裁出来canvas大小的帧数据
						 * 例如:video 宽高:960*640   canvas铺满设备视窗宽高:375 667  
						 * */
						const videoWidthCenterX = Math.floor(this.videoWidth / 2);
						const videoWidthCenterY = Math.floor(this.videoHeight / 2);
						const startX = Math.floor(videoWidthCenterX - this.canvasObj.width / 2);
						const startY = Math.floor(videoWidthCenterY - this.canvasObj.height / 2);
						this.context.clearRect(0,0,this.canvas1Width, this.canvas1Height)
						this.context.translate(this.canvasObj.width,0);
						this.context.scale(-1,1) //由于video镜像问题,渲染到canvas后需要进行左右翻转,否则偏左偏右提示相反
						this.context.drawImage(this.videoObj, startX, startY, this.canvasObj.width, this.canvasObj.height, 0, 0, this.canvasObj.width, this.canvasObj.height);
						frameData = this.context.getImageData(0, 0, this.canvasObj.width, this.canvasObj.height);
						this.context.setTransform(1,0,0,1,0,0) 
					} else {
						const videoWidthCenterX = Math.floor(this.videoWidth / 2);
						const videoWidthCenterY = Math.floor(this.videoHeight / 2);
						const startX = 0;
						const startY = 0;
						this.context.clearRect(0,0,this.canvas1Width, this.canvas1Height)
						this.context.translate(this.canvasObj.width,0);
						this.context.scale(-1,1) //由于video镜像问题,渲染到canvas后需要进行左右翻转,否则偏左偏右提示相反
						this.context.drawImage(this.videoObj, startX, startY, this.canvasObj.width, this.canvasObj.height, 0, 0, this.canvasObj.width, this.canvasObj.height);
						frameData = this.context.getImageData(0, 0, this.canvasObj.width, this.canvasObj.height);
						this.context.setTransform(1,0,0,1,0,0) 
					}
				}
				this.detectStatus = true; //开始检测
				
				this.checkPhoto(frameData);
			},
			runnerFunction () {
				if (this.getImageDataing || this.showTip || (this.faceDetect) || new Date().getTime() - this.startTime < 600) {
					// console.log('不执行任何操作')
				} else {
					// console.log('开始执行获取像素')
					this.getImageDataing = true;
					this.getImageData();
					this.startTime = new Date().getTime();
				}

				this.runnerTimeout = requestAnimationFrame(this.runnerFunction);
			},
			/**
			 * 检查视频图像帧
			 */
			checkPhoto(frame) {
				if(frame){
					this.facesModelDetector(frame)
				}else{
					this.continueDetect()
				}
			},

			facesModelDetector(frame){
				if(!this.face_detect_model) {
					this.startFaceDetectOver()
					return 
				}
	
	      /**SDK:模型检测 */
		this.face_detect_model.handleImageFrame(frame, modelUtils.getDefaultCameraConstrains(), false).then(result => {
					const bbox = result.bbox;
					console.log("检测结果::",result)
					if (bbox) {
						this.drawerCanvasCtx.clearRect(0, 0, this.drawerCanvasObj.width, this.drawerCanvasObj.height)
						this.drawPoints(this.drawerCanvasCtx, [bbox[0], bbox[1]], 25, "red")
            this.drawPoints(this.drawerCanvasCtx, [bbox[2], bbox[3]], 25, "red")
						if ( commonUtils.checkMouthOpen(result.landmarks, 10)) {
							this.faceDetectorTips = "请不要张嘴"
						} else {
							if (this.checkImg(result.optionsMessage)) {
								//位置、清晰度、距离稳定
								console.log('检测通过,质量合格')
								this.faceDetect = true
								this.faceDetectorTips = ""
								Toast('采集成功')

								// this.stopDetect();
								
								// 测试canvas图像是否和当前video显示一致
								let imgUrl = this.canvasObj.toDataURL('image/jpg')
								// let alink = document.createElement('a');
								// alink.download = 'face'+new Date().getTime()
								// alink.href = imgUrl
								// document.body.appendChild(alink)
								// alink.click()
								// document.body.removeChild(alink)

								this.upload(this.dataURLtoBlob(imgUrl));

								return;
							} else {
								this.continueDetect()
							}
						}
					    this.continueDetect()
					} else {
						this.startFaceDetectOver()
					}
					}).catch(err => {
						console.log('面部检测失败', err)
						this.startFaceDetectOver()
					});
			},

			checkImg(result) {
				if (result && result.positionConstrains && result.brightnessConstrains) {
					let position = -1, distance = -1, bright = -1, clarify=-1, colorCast=-1, posture=-1,btip='', dtip = '', ptip='',postip='',postureDict;
					bright = result.brightnessConstrains.brightTipsVal //0合适 1过亮 2过暗 3不均匀
					distance =  result.positionConstrains.distanceTipsVal //表示偏远偏近。0合适 1太近 2太远
					position =  result.positionConstrains.positionTipsVal //表示上下左右偏移。0合适 1偏下 2偏上 3偏左 4偏右
					clarify = result.clarifyConstrains.clarifyTipsVal
					colorCast = result.colorConstrains.colorCastTipsVal
					posture = result.postureConstrains.postureTipsVal // 对应上方提示数值分别为:0(面部正视前方,姿态正确) 1(请摆正面部正视摄像头) 2(请不要向一侧歪头) 3(请不要仰视) 4(请不要俯视)
					
					if(!result.brightnessConstrains.isBrightMatch){
						// 亮度不合适
						btip =  result.brightnessConstrains.brightTips
					}
					if(!result.postureConstrains.isPostureMatch){
						// 姿态不合适
						postip = result.postureConstrains.postureTips
					}
					if(!result.positionConstrains.isDistanceMatch){
						dtip = result.positionConstrains.distanceTips
					}
					if(!result.positionConstrains.isPositionMatch){
						ptip = result.positionConstrains.positionTips
					}
					this.detectPosition = position
					this.detectDistance = distance
					this.detectPosture = posture
					this.detectBright = bright
					this.detectClarify = clarify
					this.detectColorCast = colorCast
					this.faceDetectorTips =  btip ? btip:(postip ? postip: (dtip ? dtip : ptip))
					this.postureDict = postureDict
					
					if(position === 0 && distance === 0 && clarify===0 && colorCast===0 && posture===0){
						return true //质量合格
					}
				}
				return false;
			},

			drawPoints(ctxDrawer, item, radiu = 5, color = "white") {
				ctxDrawer.fillStyle = color
				ctxDrawer.beginPath();
				ctxDrawer.font = "16px sans-serif";
				ctxDrawer.arc(item[0], item[1], 3, 0, 360, false);
				ctxDrawer.fill()
				ctxDrawer.closePath()
			},

			showCamera () {
				this.openCamera();
			},

			openCamera () {
				this.$toast.loading({
					duration: 0,
					message: "摄像头调用中",
					forbidClick: true,
				});
				//非https证书报错 because navigator.mediaDevices is undefined
				if (navigator.mediaDevices.getUserMedia || navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia) {
					this.getUserMedia({
						video: {
							width: {min: 1152, ideal: 1344, max: 1920},
							height: {min: 648, ideal: 756, max: 1080},
            	facingMode: 'user',
						}
          });
				} else {
					this.$toast.clear();
					setTimeout(() => {
						Toast('你的浏览器不支持访问用户媒体设备')
					})
					console.log("你的浏览器不支持访问用户媒体设备");
				}
			},

			getUserMedia(constrains) {
				/* 获取摄像头对象方法兼容 */
				let that = this;
				if (navigator.mediaDevices.getUserMedia) {
					// 最新标准API、
					navigator.mediaDevices.getUserMedia(constrains).then(stream => {
						that.getUserMediaSuccess(stream);
					}).catch(err => {
						that.getUserMediaFail(err);
					});
				} else if (navigator.webkitGetUserMedia || navigator.mozGetUserMedia) {
					// webkit内核浏览器
					if (navigator.mediaDevices === undefined) {
						navigator.mediaDevices = {};
					}

					// 一些浏览器部分支持 mediaDevices。我们不能直接给对象设置 getUserMedia
					// 因为这样可能会覆盖已有的属性。这里我们只会在没有getUserMedia属性的时候添加它。
					if (navigator.mediaDevices.getUserMedia === undefined) {
						navigator.mediaDevices.getUserMedia = function(constraints) {

							// 首先,如果有getUserMedia的话,就获得它
							var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

							// 一些浏览器根本没实现它 - 那么就返回一个error到promise的reject来保持一个统一的接口
							if (!getUserMedia) {
								return Promise.reject(new Error('getUserMedia is not implemented in this browser'));
							}

							// 否则,为老的navigator.getUserMedia方法包裹一个Promise
							return new Promise(function(resolve, reject) {
								getUserMedia.call(navigator, constraints, resolve, reject);
							});
						}
					}
					navigator.mediaDevices.getUserMedia(constrains).then(stream => {
						that.getUserMediaSuccess(stream);
					}).catch(err => {
						that.getUserMediaFail(err);
					});
				} else if (navigator.getUserMedia) {
					// 旧版API
					navigator.getUserMedia(constrains).then(stream => {
						that.getUserMediaSuccess(stream);
					}).catch(err => {
						that.getUserMediaFail(err);
					});
				}
			},

			/* 获取媒体对象成功 */
			getUserMediaSuccess (stream) {
				this.pageState = 1;
				let _stream = stream
				try {
					_stream = window.URL.createObjectURL(stream);
				} catch (err) {
					_stream = stream;
				}

				this.$toast.clear();
				this.mediaStreamTrack = stream; // _stream;
				this.videoObj.srcObject = _stream; // 将捕获的视频流传递给video  放弃window.URL.createObjectURL(stream)的使用
				this.videoObj.play(); // 播放视频
 
				// 监听视频流就位事件,即视频可以播放了
				this.videoObj.addEventListener('canplay', (ev) => {
				// this.videoObj.addEventListener('loadedmetadata', (ev) => {
					if (!this.streaming) {
						this.videoWidth = this.videoObj.videoWidth;
						this.videoHeight = this.videoObj.videoHeight;
						this.streaming = true;

						if(this.videoObj.videoWidth > this.videoObj.videoHeight){
							this.sliceFlag = true
						}
    
						if(this.sliceFlag){
							let imageWidth = window.innerWidth
							let imageHeight = window.innerHeight
							this.canvasObj.setAttribute('width', imageWidth);
							this.canvasObj.setAttribute('height', imageHeight);
							this.drawerCanvasObj.setAttribute('width', imageWidth);
							this.drawerCanvasObj.setAttribute('height', imageHeight);

							this.face_detect_model.setImageSize([imageWidth,imageHeight]) //人脸视窗大小
						}else{
							this.canvasObj.setAttribute('width', this.videoWidth);
							this.canvasObj.setAttribute('height', this.videoHeight);
							this.drawerCanvasObj.setAttribute('width', this.videoWidth);
							this.drawerCanvasObj.setAttribute('height', this.videoHeight);

							this.face_detect_model.setImageSize([this.videoWidth,this.videoHeight]) //人脸视窗大小
						}

						// 开启检测
						this.startDetect()
					}
				}, false);
		
			},

			getUserMediaFail (err) {
				/* 获取媒体对象失败 */
				this.$toast.clear();
				this.hideCamera(false);
				setTimeout(() => {
					Toast('请检查摄像头是否正常开启')
				})
			},

			hideCamera (isCloseCamera) {
				this.stopDetect();
				if(this.mediaStreamTrack) {
      		this.mediaStreamTrack.getTracks()[0].stop(); // 关闭媒体对象
				}
			},

			/* 启动检测 */
			async startDetect () {
				console.log('开始检测')
				this.startTime = new Date().getTime();
				this.runnerTimeout = requestAnimationFrame(this.runnerFunction)
			},

			/* 停止检测 */
			stopDetect () {
				console.log('停止检测')
				this.detectStatus = false
				cancelAnimationFrame(this.runnerTimeout);
			},

			startFaceDetectOver(){
				this.getImageDataing = false
				this.faceDetect = false
				this.detectBright = -1
				this.detectPosition = -1
				this.detectDistance = -1
				this.detectPosture = -1
				this.faceDetectorTips = '请保持人脸在采集框中'
			},

			continueDetect() {
				this.getImageDataing = false
			},

			toggleDetect(){
        if(this.detectStatus){
				   this.stopDetect()
				}else{
          this.startDetect()
				}
			},

			upload(imageData) {
				if (!this.isUploading) {
					console.log("调用上传接口");
					this.isUploading = true;
					uploadImage(imageData, this.onUploadSuccess, this.onUploadFailed);
					setTimeout(() => {
						this.pageState = 2;
						this.hideCamera(true);
					}, 3000);
				}
			},

			onUploadSuccess(response) {
				console.log('response = ');
				console.log(response);
				this.isUploading = false;
				if (response.data && response.data.code == 200) {
					this.isUploadSuccess = true;

					console.log('上传成功')
				} else {
					console.log('上传失败')
				}
			},

			onUploadFailed(error) {
				console.log('uploading image failed, error= ', error);
				console.log('网络错误')
			},

			dataURLtoBlob(dataurl) {
				let arr = dataurl.split(','), mime = arr[0].match(/:(.*?);/)[1],
						bstr = atob(arr[1]), n = bstr.length, u8arr = new Uint8Array(n);
				while (n--) {
						u8arr[n] = bstr.charCodeAt(n);
				}
				return new Blob([u8arr], {type: mime});
			},
		}
	}
</script>


<style lang="less" scoped>
	#imgCanvas{
		position: fixed;
		z-index: -99;
		display: block;
		left: 300%;
	}

	

	* {
		margin: 0;
		padding: 0;
		font-family: 'FZYANS_JW';
	}

	.main_container {
		position: fixed;
		top: 0;
		left: 0;
		right: 0;
		bottom: 0;
		z-index: 0;


		.ai-diagnosis-wrapper {
			position: absolute;
			overflow: hidden;
			width: 100%;
			height: 100%;
			background-color: #787a7a;
			display: flex;
			flex-direction: column;
			text-align: center;
			align-items: center;
			.zb-bg {
				display: block;
				width: 100%;
			}
		}

		.video-box {
			position: absolute;
			overflow: hidden;
			width: 100%;
			height: 100%;

			.video {
				position: absolute;
				display: block;
				width: 100%;
				height: 100%;
				background: black;
				transform: rotateY(180deg);
				-webkit-transform: rotateY(180deg); /* Safari 和 Chrome */
				-moz-transform: rotateY(180deg);
				object-fit: cover;
				z-index: 99;
			}

			.canvas_render {
				/*visibility: hidden;*/
				position: absolute;
				top: 0px;
				left: 0px;
				width: 100%;
				height: 100%;
				background: transparent;
				transform: rotateY(180deg);
				-webkit-transform: rotateY(180deg); /* Safari 和 Chrome */
				-moz-transform: rotateY(180deg);
				object-fit: cover;
				z-index: 110;
			}

			.video-mask {
				position: absolute;
				top: 1.47rem;
				left: 50%;
				width: 2.74rem;
				z-index: 99;
				transform: translateX(-50%);

				.circle-img {
					width: 100%;
					box-shadow: 0 0 100vh 100vh rgba(0, 0, 0, 0.50);
					border-radius: 100%;
				}
			}

			.mantle-box {
				z-index: 101;
				position: absolute;
				top: 0px;
				left: 0px;
				width: 100%;
				height: 100%;

				.message-tips {
					position: absolute;
					display: flex;
					align-items: center;
					justify-content: center;
					top: 1.25rem;
					width: 100%;
					object-fit: cover;

					.message-tips-img {
						position: absolute;
						width: 70%;
						z-index: 103;
					}

					.message-tips-p {
						position: absolute;
						flex: auto;
						font-family: PingFang SC;
						color: white;
						font-weight: bold;
						font-size: 0.2rem;
						z-index: 104;
					}
				}

				.photo-env {
					position: absolute;
					display: flex;
					align-items: center;
					justify-content: space-between;
					bottom: 0.5rem;
					width: 100%;
					padding: 0 0.3rem;
					box-sizing: border-box;

					li {
						width: 0.84rem;
						height: 0.765rem;
						background: rgba(0, 0, 0, 0.1);
						border-radius: 0.12rem;
						display: flex;
						align-items: center;
						flex-direction: column;
						text-align: center;
						
						.icon {
							flex-shrink: 1;
							width: 0.29rem;
						}

						.r-box {
							.p-1 {
								color: #C9C9C9;
								font-size: 0.13rem;
							}

							.p-2 {
								font-weight: 600;
								color: #ffffff;
								font-size: 0.15rem;
							}
						}
					}
				}

				.txt {
					margin-top: 0.25rem;
					text-align: center;
					font-weight: 600;
					color: white;
					font-size: 0.16rem;
				}

				.tip {
					width: 100%;
					box-sizing: border-box;
					z-index: 101;
					position: absolute;
					bottom: 0.07rem;
					left: 0;
					color: white;
					font-size: 0.12rem;
					text-align: center;
				}
			}

		}

	}
	
	.adw-btn {
		width: 2.68rem;
		height: 0.502rem;
		background-image: url("../assets/home/btn-bg.png");
		background-size: 100%;
		text-align: center;
		line-height: 0.502rem;
		position: fixed;
		bottom: 0.42rem;
		left: 50%;
		transform: translateX(-50%);
		font-size: 0.22rem;
		color: #FFFFFF;
		font-weight: bold;
		&.gray {
			opacity: 0.4;
		}
	}


</style>

 

 

 

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值