我尝试通过face-api.js创建一个面部检测应用程序,我想改为在跟踪显示给我的图像时向我显示该框,例如眼镜或我想定义的任何图像。这是我的代码,我不知道该怎么做
index.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta http-equiv="X-UA-Compatible" content="ie=edge" />
<title>Document</title>
<script defer src="face-api.min.js"></script>
<script defer src="script.js"></script>
<style>
video,
canvas {
margin-left: 10px;
margin-top: 10px;
position: absolute;
}
</style>
</head>
<body>
<video id="video" width="720" height="560" autoplay muted></video>
<canvas id="canvas" width="720" height="560"></canvas>
</body>
</html>
script.js
const video = document.getElementById("video");
var img = document.createElement("img");
img.src = "../build/sunglasses-black-tundra-2_800x.png";
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri("/models"),
faceapi.nets.faceLandmark68Net.loadFromUri("/models"),
faceapi.nets.faceRecognitionNet.loadFromUri("/models"),
faceapi.nets.faceExpressionNet.loadFromUri("/models")
]).then(startVideo);
function startVideo() {
navigator.getUserMedia(
{ video: true },
stream => (video.srcObject = stream),
err => console.error(err)
);
}
startVideo();
video.addEventListener("play", () => {
const displaySize = { width: video.width, height: video.height };
const canvas = document.getElementById("canvas");
faceapi.matchDimensions(canvas, displaySize);
setInterval(async () => {
const detections = await faceapi
.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
.withFaceLandmarks()
.withFaceExpressions();
var context = canvas.getContext("2d");
const resizedDetections = faceapi.resizeResults(detections, displaySize);
context.clearRect(0, 0, canvas.width, canvas.height);
faceapi.draw.drawDetections(canvas, resizedDetections);
console.log(detections);
}, 100);
});
在tracking.js库中,很容易实现以下目标:
window.onload = function() {
var video = document.getElementById("video");
var canvas = document.getElementById("canvas");
var context = canvas.getContext("2d");
var tracker = new tracking.ObjectTracker("face");
tracker.setInitialScale(4.7);
tracker.setStepSize(2);
tracker.setEdgesDensity(0.1);
tracking.track("#video", tracker, { camera: true });
tracker.on("track", function(event) {
context.clearRect(0, 0, canvas.width, canvas.height);
event.data.forEach(function(rect) {
context.drawImage(
img,
rect.x,
rect.y - 85,
rect.width * 1.1,
rect.height * 2
);
});
});