Reputation: 11
I am attempting to run a pose classification model that I trained on a video file, but I can't seem to figure out how to get it working with anything other than the documentation they provide for running inference on webcam input. I am trying to run the model using their provided javascript example code. I added an html video element and specified the height and width parameters, but I can't figure out how to get the predictions working in async function predict(). I commented out the webcam setup provided in the original code, and changed all webcam.canvas objects to video.canvas. I'm pretty new to javascript but the model only exports as tensorflow.js, so that's what I have to work with:)
Here is the my code:
<video id="video" style="width: 540; height: 360;" muted>
<source src="GTSolo2.mp4" type="video/mp4" />
</video>
<div>Teachable Machine Pose Model</div>
<br>
<button type="button" onclick="init()">Start</button>
<div><canvas id="canvas"></canvas></div>
<div id="label-container"></div>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/[email protected]/dist/tf.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/@teachablemachine/[email protected]/dist/teachablemachine-pose.min.js"></script>
<!-- AJA adds the osc lib-->
<script type="text/javascript" src="node_modules/osc-js/lib/osc.min.js"></script>
<script type="text/javascript">
// More API functions here:
// https://github.com/googlecreativelab/teachablemachine-community/tree/master/libraries/pose
// the link to your model provided by Teachable Machine export panel
const URL = "https://teachablemachine.withgoogle.com/models/rpV37-6o2/";
let model, webcam, ctx, labelContainer, maxPredictions, video;
async function init() {
const modelURL = URL + "model.json";
const metadataURL = URL + "metadata.json";
// load the model and metadata
// Refer to tmImage.loadFromFiles() in the API to support files from a file picker
// Note: the pose library adds a tmPose object to your window (window.tmPose)
model = await tmPose.load(modelURL, metadataURL);
maxPredictions = model.getTotalClasses();
// Convenience function to setup a webcam
// const width = 540;
// const height = 360;
// const flip = false; // whether to flip the webcam
// webcam = new tmPose.Webcam(width, height, flip); // width, height, flip - this should actually ideally be a sqaure
// await webcam.setup(); // request access to the webcam
// await webcam.play();
video = document.getElementById("video");
video.width = 540;
video.height = 360;
video.addEventListener('play', loop );
video.play();
window.requestAnimationFrame(loop);
// append/get elements to the DOM
const canvas = document.getElementById("canvas");
//canvas.width = width; canvas.height = height;
ctx = canvas.getContext("2d");
labelContainer = document.getElementById("label-container");
for (let i = 0; i < maxPredictions; i++) { // and class labels
labelContainer.appendChild(document.createElement("div"));
}
}
async function loop(timestamp) {
//webcam.update();
await predict();
window.requestAnimationFrame(loop);
}
async function predict() {
// Prediction #1: run input through posenet
// estimatePose can take in an image, video or canvas html element
const { pose, posenetOutput } = await model.estimatePose(video.canvas); //EDIT THIS ELEMENT TO RUN A VIDEO INSTEAD OF WEBCAM
// Prediction 2: run input through teachable machine classification model
const prediction = await model.predict(posenetOutput); // Add maxPredictions here?
for (let i = 0; i < maxPredictions; i++) {
const classPrediction =
prediction[i].className + ": " + prediction[i].probability.toFixed(2);
}
// finally draw the poses
drawPose(pose);
}
function drawPose(pose) {
if (video.canvas) {
ctx.drawImage(video.canvas, 0, 0);
// draw the keypoints and skeleton
if (pose) {
const minPartConfidence = 0.5;
tmPose.drawKeypoints(pose.keypoints, minPartConfidence, ctx);
tmPose.drawSkeleton(pose.keypoints, minPartConfidence, ctx);
}
}
}
</script>
This is the error that is thrown:
teachablemachine-pose.min.js:58 Uncaught (in promise) TypeError: Cannot read property 'height' of undefined
at y (teachablemachine-pose.min.js:58)
at Object.e.padAndResizeTo (teachablemachine-pose.min.js:58)
at t. (teachablemachine-pose.min.js:51)
at teachablemachine-pose.min.js:34
at Object.next (teachablemachine-pose.min.js:34)
at teachablemachine-pose.min.js:34
at new Promise ()
at r (teachablemachine-pose.min.js:34)
at t.estimatePoseOutputs (teachablemachine-pose.min.js:51)
at t. (teachablemachine-pose.min.js:51)
Upvotes: 1
Views: 911
Reputation: 9451
There's no property canvas
on an HTMLVideoElement
. You likely just need to pass in the video element directly. I'm guessing the original webcam object had a reverence to a canvas element, but if you want to use a video I think it would look like this:
const { pose, posenetOutput } = await model.estimatePose(video);
Upvotes: 0
Reputation: 52
<video id="video" style="width: 540; height: 360;" muted>
<source src="GTSolo2.mp4" type="video/mp4" />
</video>
The value of the src attribute of a video can't be set to "GTSolo2.mp4".
<input id="selectVideo" type="file" accept="video/*"/>
<video id="video" src="" preload autoplay loop muted controls>
</video>
<script>
const video = document.getElementById('video');
const selectVideo = document.getElementById('selectVideo');
selectVideo.onchange = function (event) {
var target = event.target || window.event.srcElement;
var files = target.files;
if (files && files.length) {
var file = files[0];
if (video.canPlayType(file.type)!="")
video.src = URL.createObjectURL(file);
else
console.log("The file type is not supported.");
}
}
</script>
Upvotes: -2