gpt4 book ai didi

node.js - 如何在 ES6 node.js 环境中运行 mediapipe facemesh

转载 作者:行者123 更新时间:2023-12-05 03:41:34 24 4
gpt4 key购买 nike

我正在尝试运行这个 HTML 示例 https://codepen.io/mediapipe/details/KKgVaPJ来自 https://google.github.io/mediapipe/solutions/face_mesh#javascript-solution-api在创建 react 应用程序中。我已经完成了:

  • npm 安装所有 facemesh mediapipe 包。
  • 已经用 Node 导入替换了 jsdelivr 标签,我得到了定义和函数。
  • 用 react-cam 替换了视频元素

我不知道如何替换这个 jsdelivr,可能会影响:

const faceMesh = new FaceMesh({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
}
});

那么问题是:

  • 为什么没有显示 facemesh?有没有我正在尝试做的事的例子?

这是我的 App.js 代码(抱歉调试脚手架):

import './App.css';
import React, { useState, useEffect } from "react";
import Webcam from "react-webcam";
import { Camera, CameraOptions } from '@mediapipe/camera_utils'
import {
FaceMesh,
FACEMESH_TESSELATION,
FACEMESH_RIGHT_EYE,
FACEMESH_LEFT_EYE,
FACEMESH_RIGHT_EYEBROW,
FACEMESH_LEFT_EYEBROW,
FACEMESH_FACE_OVAL,
FACEMESH_LIPS
} from '@mediapipe/face_mesh'
import { drawConnectors } from '@mediapipe/drawing_utils'

const videoConstraints = {
width: 1280,
height: 720,
facingMode: "user"
};

function App() {
const webcamRef = React.useRef(null);
const canvasReference = React.useRef(null);
const [cameraReady, setCameraReady] = useState(false);
let canvasCtx
let camera

const videoElement = document.getElementsByClassName('input_video')[0];
// const canvasElement = document.getElementsByClassName('output_canvas')[0];

const canvasElement = document.createElement('canvas');

console.log('canvasElement', canvasElement)
console.log('canvasCtx', canvasCtx)

useEffect(() => {
camera = new Camera(webcamRef.current, {
onFrame: async () => {
console.log('{send}',await faceMesh.send({ image: webcamRef.current.video }));
},
width: 1280,
height: 720
});

canvasCtx = canvasReference.current.getContext('2d');
camera.start();
console.log('canvasReference', canvasReference)

}, [cameraReady]);

function onResults(results) {
console.log('results')
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
canvasCtx.drawImage(
results.image, 0, 0, canvasElement.width, canvasElement.height);
if (results.multiFaceLandmarks) {
for (const landmarks of results.multiFaceLandmarks) {
drawConnectors(canvasCtx, landmarks, FACEMESH_TESSELATION, { color: '#C0C0C070', lineWidth: 1 });
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYE, { color: '#FF3030' });
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYEBROW, { color: '#FF3030' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYE, { color: '#30FF30' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYEBROW, { color: '#30FF30' });
drawConnectors(canvasCtx, landmarks, FACEMESH_FACE_OVAL, { color: '#E0E0E0' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LIPS, { color: '#E0E0E0' });
}
}
canvasCtx.restore();
}

const faceMesh = new FaceMesh({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
}
});
faceMesh.setOptions({
selfieMode: true,
maxNumFaces: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
});
faceMesh.onResults(onResults);

// const camera = new Camera(webcamRef.current, {
// onFrame: async () => {
// await faceMesh.send({ image: videoElement });
// },
// width: 1280,
// height: 720
// });
// camera.start();

return (
<div className="App">
<Webcam
audio={false}
height={720}
ref={webcamRef}
screenshotFormat="image/jpeg"
width={1280}
videoConstraints={videoConstraints}
onUserMedia={() => {
console.log('webcamRef.current', webcamRef.current);
// navigator.mediaDevices
// .getUserMedia({ video: true })
// .then(stream => webcamRef.current.srcObject = stream)
// .catch(console.log);

setCameraReady(true)
}}
/>
<canvas
ref={canvasReference}
style={{
position: "absolute",
marginLeft: "auto",
marginRight: "auto",
left: 0,
right: 0,
textAlign: "center",
zindex: 9,
width: 1280,
height: 720,
}}
/>

</div >
);
}

export default App;

最佳答案

不用替换jsdelivr,那段代码就可以了;我还认为您需要稍微重新排序您的代码:

  • 你应该把faceMesh初始化放在useEffect里面,参数是[];因此,该算法将在页面首次呈现时启动
  • 此外,您不需要使用 doc.* 获取 videoElement 和 canvasElement,因为您已经定义了一些 refs

代码示例:

useEffect(() => {
const faceMesh = new FaceDetection({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_detection/${file}`;
},
});

faceMesh.setOptions({
maxNumFaces: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5,
});

faceMesh.onResults(onResults);

if (
typeof webcamRef.current !== "undefined" &&
webcamRef.current !== null
) {
camera = new Camera(webcamRef.current.video, {
onFrame: async () => {
await faceMesh.send({ image: webcamRef.current.video });
},
width: 1280,
height: 720,
});
camera.start();
}
}, []);

最后,在 onResults 回调中,我建议首先打印结果,只是为了检查 Mediapipe 实现是否正常工作。并且不要忘记在绘制之前设置 Canvas 大小。

function onResults(results){
console.log(results)
canvasCtx = canvasReference.current.getContext('2d')
canvas.width = webcamRef.current.video.videoWidth;
canvas.height = webcamRef.current.video.videoHeight;;

...
}

祝你好运! :)

关于node.js - 如何在 ES6 node.js 环境中运行 mediapipe facemesh,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/67674453/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com