Reputation: 43
Here is the code:
<!DOCTYPE html>
<html lang="en">
<head>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-core"></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-converter"></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/facemesh"></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-webgl"></script>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Document</title>
<script>
async function get_facemesh() {
var canvas = document.getElementById("facemesh");
var draw = canvas.getContext("2d");
const stream = document.getElementById("movie");
const model = await facemesh.load((maxFaces = 1));
while (1) {
const faces = await model.estimateFaces(stream);
if (faces.length != 0) {
canvas.width = canvas.width;
var mesh = faces[0].scaledMesh;
var widthAcross = mesh[454][0] - mesh[234][0];
var heightVertical = mesh[152][1] - mesh[10][1];
console.log(heightVertical);
draw.fillStyle = "red";
for (var i = 0; i < mesh.length; i++) {
draw.fillRect(mesh[i][0], mesh[i][1], 2, 2);
}
} else {
console.log(`No faces have been detected`);
}
await tf.nextFrame();
}
}
</script>
</head>
<body>
<div class="webcam">
<video width="600" height="450" autoplay id="movie"></video>
<canvas width="600" height="450" id="facemesh"> </canvas>
</div>
<script>
video = document.getElementById("movie");
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ video: true }).then(function (stream) {
video.srcObject = stream;
});
}
main();
function main() {
if (video.readyState == 4) {
console.log("video is ready for processing..");
get_facemesh();
} else {
console.log("nope, not loaded yet..");
setTimeout(main, 1000 / 30);
}
}
</script>
</body>
<style>
body {
background-color: limegreen;
margin: 0;
padding: 0;
}
video,
canvas {
-webkit-transform: scaleX(-1);
transform: scaleX(-1);
position: fixed;
margin: 0 auto;
}
.webcam {
width: 600px;
height: 450px;
background-color: limegreen;
margin: auto;
}
</style>
</html>
Here is the codepen.io link for those who like that: https://codepen.io/mdsiam8/pen/gOrPayp.
I tried using the position fixed trick, but it did not work, even though they are exactly on top of one another. It feels like the canvas is a bit stretched out or maybe is transformed to the left slightly. I tried fiddling with this but I couldn't find a fix, so I would really appreciate if any of you guys can help with this. By the way, I know that the little facemesh being offset on top of a webcam is not normal, because I have another file and used the position fixed trick, and that one is perfect, so I am not sure why it isn't the case with this one as well.
Upvotes: 1
Views: 484
Reputation: 136866
The <video>
's width
and height
attributes set "the dimensions of the visual content of the element" ref, this means, only how it will be presented.
Your face detector is working on the video data, at the video's intrinsic dimensions, that is, the ones defined by the media itself and not by the element, which is either stretching or shrinking the visual content when displaying it.
On the other hand, the canvas' width
and height
attributes do set its intrinsic dimensions.
So both media's intrinsic dimensions are actually different, and the coords that are found in the video do not match the ones of the canvas.
To fix that, you can simply set your canvas' width
and height
to the video.videoWidth
and video.videoHeight
respectively for it to match the intrinsic dimensions of your video content, and then through CSS, resize it so it matches the display dimensions of the <video>
element.
However note that MediaStreams can change their dimensions while playing, so you may want to listen for the resize
event of your <video>
.
Link to updated codepen since StackSnippets don't allow gUM.
And the most notable changes:
const canvas = document.getElementById("facemesh");
const video = document.getElementById("movie");
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ video: true })
.then( stream => {
video.srcObject = stream;
video.addEventListener( "resize", (evt) => {
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
} );
// don't poll, use events
video.addEventListener( "loadedmetadata", (evt) => {
get_facemesh();
} );
} );
}
video,
canvas {
-webkit-transform: scaleX(-1);
transform: scaleX(-1);
position: fixed;
margin: 0 auto;
/* here set the same display dimensions */
width: 600px;
height: 450px;
/* force same object fit too */
object-fit: contain;
}
<div class="webcam">
<video autoplay id="movie"></video>
<canvas id="facemesh"> </canvas>
</div>
Upvotes: 1