React + three.js 实现人脸动捕与3D模型表情同步

系列文章目录

  1. React 使用 three.js 加载 gltf 3D模型 | three.js 入门
  2. React + three.js 3D模型骨骼绑定
  3. React + three.js 3D模型面部表情控制
  4. React + three.js 实现人脸动捕与3D模型表情同步
  5. 结合 react-webcam、three.js 与 electron 实现桌面人脸动捕应用

示例项目(github):https://github.com/couchette/simple-react-three-facial-expression-sync-demo
示例项目(gitcode):https://gitcode.com/qq_41456316/simple-react-three-facial-expression-sync-demo



前言

在本系列的上一篇文章中,我们已经探讨了如何在 React 中利用 three.js 来操作模型面部表情,现在,我们将深入研究如何结合人脸特征点检测与模型表情控制实现人脸动作步骤并与3D模型表情同步。让我们一同探索如何赋予你的 3D 模型更加生动和丰富的表情吧!


一、实现步骤

1、创建项目配置环境

使用 create-reacte-app 创建项目

npx create-react-app simple-react-three-facial-expression-sync-demo
cd simple-react-three-facial-expression-sync-demo

安装three.js

npm i three
npm i @mediapipe/tasks-vision

将示例项目中的public中的内容复制到新创建的项目的public中(相关的模型文件)

2. 创建组件

src目录创建components文件夹,在components文件夹下面创建ThreeContainer.js文件。
首先创建组件,并获取return 元素的ref

import * as THREE from "three";
import { useRef, useEffect } from "react";

function ThreeContainer() {
  const containerRef = useRef(null);
  const isContainerRunning = useRef(false);
  return <div ref={containerRef} />;
}

export default ThreeContainer;

接着将three.js自动创建渲染元素添加到return组件中为子元素(可见container.appendChild(renderer.domElement);),相关逻辑代码在useEffect中执行,完整代码内容如下

import * as THREE from "three";

import { OrbitControls } from "three/addons/controls/OrbitControls.js";
import { RoomEnvironment } from "three/addons/environments/RoomEnvironment.js";

import { GLTFLoader } from "three/addons/loaders/GLTFLoader.js";
import { KTX2Loader } from "three/addons/loaders/KTX2Loader.js";
import { MeshoptDecoder } from "three/addons/libs/meshopt_decoder.module.js";

import { GUI } from "three/addons/libs/lil-gui.module.min.js";
import { useRef, useEffect } from "react";

// Mediapipe

import { FaceLandmarker, FilesetResolver } from "@mediapipe/tasks-vision";

const blendshapesMap = {
  // '_neutral': '',
  browDownLeft: "browDown_L",
  browDownRight: "browDown_R",
  browInnerUp: "browInnerUp",
  browOuterUpLeft: "browOuterUp_L",
  browOuterUpRight: "browOuterUp_R",
  cheekPuff: "cheekPuff",
  cheekSquintLeft: "cheekSquint_L",
  cheekSquintRight: "cheekSquint_R",
  eyeBlinkLeft: "eyeBlink_L",
  eyeBlinkRight: "eyeBlink_R",
  eyeLookDownLeft: "eyeLookDown_L",
  eyeLookDownRight: "eyeLookDown_R",
  eyeLookInLeft: "eyeLookIn_L",
  eyeLookInRight: "eyeLookIn_R",
  eyeLookOutLeft: "eyeLookOut_L",
  eyeLookOutRight: "eyeLookOut_R",
  eyeLookUpLeft: "eyeLookUp_L",
  eyeLookUpRight: "eyeLookUp_R",
  eyeSquintLeft: "eyeSquint_L",
  eyeSquintRight: "eyeSquint_R",
  eyeWideLeft: "eyeWide_L",
  eyeWideRight: "eyeWide_R",
  jawForward: "jawForward",
  jawLeft: "jawLeft",
  jawOpen: "jawOpen",
  jawRight: "jawRight",
  mouthClose: "mouthClose",
  mouthDimpleLeft: "mouthDimple_L",
  mouthDimpleRight: "mouthDimple_R",
  mouthFrownLeft: "mouthFrown_L",
  mouthFrownRight: "mouthFrown_R",
  mouthFunnel: "mouthFunnel",
  mouthLeft: "mouthLeft",
  mouthLowerDownLeft: "mouthLowerDown_L",
  mouthLowerDownRight: "mouthLowerDown_R",
  mouthPressLeft: "mouthPress_L",
  mouthPressRight: "mouthPress_R",
  mouthPucker: "mouthPucker",
  mouthRight: "mouthRight",
  mouthRollLower: "mouthRollLower",
  mouthRollUpper: "mouthRollUpper",
  mouthShrugLower: "mouthShrugLower",
  mouthShrugUpper: "mouthShrugUpper",
  mouthSmileLeft: "mouthSmile_L",
  mouthSmileRight: "mouthSmile_R",
  mouthStretchLeft: "mouthStretch_L",
  mouthStretchRight: "mouthStretch_R",
  mouthUpperUpLeft: "mouthUpperUp_L",
  mouthUpperUpRight: "mouthUpperUp_R",
  noseSneerLeft: "noseSneer_L",
  noseSneerRight: "noseSneer_R",
  // '': 'tongueOut'
};

function ThreeContainer() {
  const containerRef = useRef(null);
  const isContainerRunning = useRef(false);

  useEffect(() => {
    if (!isContainerRunning.current && containerRef.current) {
      isContainerRunning.current = true;
      init();
    }

    async function init() {
      const renderer = new THREE.WebGLRenderer({ antialias: true });
      renderer.setPixelRatio(window.devicePixelRatio);
      renderer.setSize(window.innerWidth, window.innerHeight);
      renderer.toneMapping = THREE.ACESFilmicToneMapping;
      containerRef.current.appendChild(renderer.domElement);

      const camera = new THREE.PerspectiveCamera(
        60,
        window.innerWidth / window.innerHeight,
        1,
        100
      );
      camera.position.z = 5;

      const scene = new THREE.Scene();
      scene.scale.x = -1;

      const environment = new RoomEnvironment(renderer);
      const pmremGenerator = new THREE.PMREMGenerator(renderer);

      scene.background = new THREE.Color(0x666666);
      scene.environment = pmremGenerator.fromScene(environment).texture;

      const controls = new OrbitControls(camera, renderer.domElement);

      // Face

      let face, eyeL, eyeR;
      const eyeRotationLimit = THREE.MathUtils.degToRad(30);

      const ktx2Loader = new KTX2Loader()
        .setTranscoderPath("/basis/")
        .detectSupport(renderer);

      new GLTFLoader()
        .setKTX2Loader(ktx2Loader)
        .setMeshoptDecoder(MeshoptDecoder)
        .load("models/facecap.glb", (gltf) => {
          const mesh = gltf.scene.children[0];
          scene.add(mesh);

          const head = mesh.getObjectByName("mesh_2");
          head.material = new THREE.MeshNormalMaterial();

          face = mesh.getObjectByName("mesh_2");
          eyeL = mesh.getObjectByName("eyeLeft");
          eyeR = mesh.getObjectByName("eyeRight");

          // GUI

          const gui = new GUI();
          gui.close();

          const influences = head.morphTargetInfluences;

          for (const [key, value] of Object.entries(
            head.morphTargetDictionary
          )) {
            gui
              .add(influences, value, 0, 1, 0.01)
              .name(key.replace("blendShape1.", ""))
              .listen(influences);
          }

          renderer.setAnimationLoop(animation);
        });

      // Video Texture

      const video = document.createElement("video");

      // const texture = new THREE.VideoTexture(video);
      // texture.colorSpace = THREE.SRGBColorSpace;

      const geometry = new THREE.PlaneGeometry(1, 1);
      const material = new THREE.MeshBasicMaterial({
        // map: texture,
        depthWrite: false,
      });
      const videomesh = new THREE.Mesh(geometry, material);
      scene.add(videomesh);

      // MediaPipe

      const filesetResolver = await FilesetResolver.forVisionTasks(
        // "https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.0/wasm"
        "fileset_resolver/wasm"
      );

      const faceLandmarker = await FaceLandmarker.createFromOptions(
        filesetResolver,
        {
          baseOptions: {
            modelAssetPath:
              // "https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task",
              "ai_models/face_landmarker.task",
            delegate: "GPU",
          },
          outputFaceBlendshapes: true,
          outputFacialTransformationMatrixes: true,
          runningMode: "VIDEO",
          numFaces: 1,
        }
      );

      if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
        navigator.mediaDevices
          .getUserMedia({ video: { facingMode: "user" } })
          .then(function (stream) {
            video.srcObject = stream;
            video.play();
          })
          .catch(function (error) {
            console.error("Unable to access the camera/webcam.", error);
          });
      }

      const transform = new THREE.Object3D();

      function animation() {
        if (video.readyState >= HTMLMediaElement.HAVE_METADATA) {
          const results = faceLandmarker.detectForVideo(video, Date.now());
          console.log(results);

          if (results.facialTransformationMatrixes.length > 0) {
            const facialTransformationMatrixes =
              results.facialTransformationMatrixes[0].data;

            transform.matrix.fromArray(facialTransformationMatrixes);
            transform.matrix.decompose(
              transform.position,
              transform.quaternion,
              transform.scale
            );

            const object = scene.getObjectByName("grp_transform");

            object.position.x = transform.position.x;
            object.position.y = transform.position.z + 40;
            object.position.z = -transform.position.y;

            object.rotation.x = transform.rotation.x;
            object.rotation.y = transform.rotation.z;
            object.rotation.z = -transform.rotation.y;
          }

          if (results.faceBlendshapes.length > 0) {
            const faceBlendshapes = results.faceBlendshapes[0].categories;

            // Morph values does not exist on the eye meshes, so we map the eyes blendshape score into rotation values
            const eyeScore = {
              leftHorizontal: 0,
              rightHorizontal: 0,
              leftVertical: 0,
              rightVertical: 0,
            };

            for (const blendshape of faceBlendshapes) {
              const categoryName = blendshape.categoryName;
              const score = blendshape.score;

              const index =
                face.morphTargetDictionary[blendshapesMap[categoryName]];

              if (index !== undefined) {
                face.morphTargetInfluences[index] = score;
              }

              // There are two blendshape for movement on each axis (up/down , in/out)
              // Add one and subtract the other to get the final score in -1 to 1 range
              switch (categoryName) {
                case "eyeLookInLeft":
                  eyeScore.leftHorizontal += score;
                  break;
                case "eyeLookOutLeft":
                  eyeScore.leftHorizontal -= score;
                  break;
                case "eyeLookInRight":
                  eyeScore.rightHorizontal -= score;
                  break;
                case "eyeLookOutRight":
                  eyeScore.rightHorizontal += score;
                  break;
                case "eyeLookUpLeft":
                  eyeScore.leftVertical -= score;
                  break;
                case "eyeLookDownLeft":
                  eyeScore.leftVertical += score;
                  break;
                case "eyeLookUpRight":
                  eyeScore.rightVertical -= score;
                  break;
                case "eyeLookDownRight":
                  eyeScore.rightVertical += score;
                  break;
              }
            }

            eyeL.rotation.z = eyeScore.leftHorizontal * eyeRotationLimit;
            eyeR.rotation.z = eyeScore.rightHorizontal * eyeRotationLimit;
            eyeL.rotation.x = eyeScore.leftVertical * eyeRotationLimit;
            eyeR.rotation.x = eyeScore.rightVertical * eyeRotationLimit;
          }
        }

        videomesh.scale.x = video.videoWidth / 100;
        videomesh.scale.y = video.videoHeight / 100;

        renderer.render(scene, camera);

        controls.update();
      }

      window.addEventListener("resize", function () {
        camera.aspect = window.innerWidth / window.innerHeight;
        camera.updateProjectionMatrix();

        renderer.setSize(window.innerWidth, window.innerHeight);
      });
    }
  }, []);
  return <div ref={containerRef} />;
}

export default ThreeContainer;


3. 使用组件

修改App.js的内容如下

import "./App.css";
import ThreeContainer from "./components/ThreeContainer";

function App() {
  return (
    <div>
      <ThreeContainer />
    </div>
  );
}

export default App;


4. 运行项目

运行项目 npm start最终效果如下,模型会随着相机拍摄的人脸表情而变化,拍摄的图像显示部分的代码我已经注释掉了,如果想结合实际图像对比,可以放开相关注释。
请添加图片描述


总结

通过本文的介绍,相信读者对于在 React 中实现人脸动捕和3D模型表情同步有了初步的了解。如果你对此感兴趣,不妨动手尝试一下,可能会有意想不到的收获。同时,也欢迎大家多多探索,将 React 和 Three.js 的强大功能发挥到极致,为网页应用增添更多的乐趣和惊喜。

程序预览

{正在筹备}

  • 15
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

买药弟弟

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值