Reputation: 25
I'm attempting to use TensorFlow face mesh(JS) for a simple face tracker. I've used the standard code on the site but I'm getting an error for height: Uncaught (in promise) TypeError: Cannot read property 'height' of null at facemesh:17 at L.estimateFaces (facemesh:17) at main ((index):76)
I'm new to JS but I can't work out where I am going wrong. Any help would be much appreciated.
TensorFlow code: https://github.com/tensorflow/tfjs-models/tree/master/facemesh
MyCode:
<body>
<div id="container">
<video autoplay="true" id="videoElement">
<script> //starts video stream
var video = document.querySelector("#videoElement");
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
video.srcObject = stream;
})
.catch(function (err0r) {
console.log("Something went wrong!");
});
}
</script>
</video>
</div>
<script type="text/javascript">
//navigator.getUserMedia= navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia || navigator.oGetUserMedia;
</script>
</script>
<script>
const videoFeed = document.querySelector('stream');
// checking the video has loaded in
video.onloadeddata = (event) => {
console.log('video loaded');
};
window.onload = function () {
async function main() {
// Load the MediaPipe facemesh model.
const model = await facemesh.load();
// Pass in a video stream (or an image, canvas, or 3D tensor) to obtain an
// array of detected faces from the MediaPipe graph.
const predictions = await model.estimateFaces(document.querySelector('stream'));
if (predictions.length > 0) {
/*
`predictions` is an array of objects describing each detected face, for example:
[
{
faceInViewConfidence: 1, // The probability of a face being present.
boundingBox: { // The bounding box surrounding the face.
topLeft: [232.28, 145.26],
bottomRight: [449.75, 308.36],
},
mesh: [ // The 3D coordinates of each facial landmark.
[92.07, 119.49, -17.54],
[91.97, 102.52, -30.54],
...
],
scaledMesh: [ // The 3D coordinates of each facial landmark, normalized.
[322.32, 297.58, -17.54],
[322.18, 263.95, -30.54]
],
annotations: { // Semantic groupings of the `scaledMesh` coordinates.
silhouette: [
[326.19, 124.72, -3.82],
[351.06, 126.30, -3.00],
...
],
...
}
}
]
*/
for (let i = 0; i < predictions.length; i++) {
const keypoints = predictions[i].scaledMesh;
// Log facial keypoints.
for (let i = 0; i < keypoints.length; i++) {
const [x, y, z] = keypoints[i];
console.log(`Keypoint ${i}: [${x}, ${y}, ${z}]`);
}
}
}
}
main();
Upvotes: 1
Views: 1839
Reputation: 26
Declare the video element inside the main() function
async function main() {
const model = await facemesh.load();
const videoFeed = document.getElementById('videoElement');
video.onloadeddata = (event) => {
console.log('video loaded');
};
window.onload = function ()
const predictions = await model.estimateFaces(document.querySelector('stream'));
if (predictions.length > 0) {
/*
`predictions` is an array of objects describing each detected face, for example:
[
{
faceInViewConfidence: 1, // The probability of a face being present.
boundingBox: { // The bounding box surrounding the face.
topLeft: [232.28, 145.26],
bottomRight: [449.75, 308.36],
},
mesh: [ // The 3D coordinates of each facial landmark.
[92.07, 119.49, -17.54],
[91.97, 102.52, -30.54],
...
],
scaledMesh: [ // The 3D coordinates of each facial landmark, normalized.
[322.32, 297.58, -17.54],
[322.18, 263.95, -30.54]
],
annotations: { // Semantic groupings of the `scaledMesh` coordinates.
silhouette: [
[326.19, 124.72, -3.82],
[351.06, 126.30, -3.00],
...
],
...
}
}
]
*/
for (let i = 0; i < predictions.length; i++) {
const keypoints = predictions[i].scaledMesh;
// Log facial keypoints.
for (let i = 0; i < keypoints.length; i++) {
const [x, y, z] = keypoints[i];
console.log(`Keypoint ${i}: [${x}, ${y}, ${z}]`);
}
}
}
}
main();
Upvotes: 1