Aritra Mondal
Aritra Mondal

Reputation: 21

source map error in face api.js while running react app to detect face in webcam

I am trying to make a react js face recognition application using face-api.js for a client. but i keep getting the same error no matter what i do

the code in App.js

import * as faceapi from 'face-api.js';
import React from 'react';

function App() {

  const [modelsLoaded, setModelsLoaded] = React.useState(false);
  const [captureVideo, setCaptureVideo] = React.useState(false);

  const videoRef = React.useRef();
  const videoHeight = 480;
  const videoWidth = 640;
  const canvasRef = React.useRef();

  React.useEffect(() => {
    const loadModels = async () => {
      const MODEL_URL = process.env.PUBLIC_URL + '/models';

      Promise.all([
        faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL),
        faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL),
        faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL),
        faceapi.nets.faceExpressionNet.loadFromUri(MODEL_URL),
      ]).then(setModelsLoaded(true));
    }
    loadModels();
  }, []);

  const startVideo = () => {
    setCaptureVideo(true);
    navigator.mediaDevices
      .getUserMedia({ video: { width: 300 } })
      .then(stream => {
        let video = videoRef.current;
        video.srcObject = stream;
        video.play();
      })
      .catch(err => {
        console.error("error:", err);
      });
  }

  const handleVideoOnPlay = () => {
    setInterval(async () => {
      if (canvasRef && canvasRef.current) {
        canvasRef.current.innerHTML = faceapi.createCanvasFromMedia(videoRef.current);
        const displaySize = {
          width: videoWidth,
          height: videoHeight
        }

        faceapi.matchDimensions(canvasRef.current, displaySize);

        const detections = await faceapi.detectAllFaces(videoRef.current, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions();

        const resizedDetections = faceapi.resizeResults(detections, displaySize);

        canvasRef && canvasRef.current && canvasRef.current.getContext('2d').clearRect(0, 0, videoWidth, videoHeight);
        canvasRef && canvasRef.current && faceapi.draw.drawDetections(canvasRef.current, resizedDetections);
        canvasRef && canvasRef.current && faceapi.draw.drawFaceLandmarks(canvasRef.current, resizedDetections);
        canvasRef && canvasRef.current && faceapi.draw.drawFaceExpressions(canvasRef.current, resizedDetections);
      }
    }, 100)
  }

  const closeWebcam = () => {
    videoRef.current.pause();
    videoRef.current.srcObject.getTracks()[0].stop();
    setCaptureVideo(false);
  }

  return (
    <div>
      <div style={{ textAlign: 'center', padding: '10px' }}>
        {
          captureVideo && modelsLoaded ?
            <button onClick={closeWebcam} style={{ cursor: 'pointer', backgroundColor: 'green', color: 'white', padding: '15px', fontSize: '25px', border: 'none', borderRadius: '10px' }}>
              Close Webcam
            </button>
            :
            <button onClick={startVideo} style={{ cursor: 'pointer', backgroundColor: 'green', color: 'white', padding: '15px', fontSize: '25px', border: 'none', borderRadius: '10px' }}>
              Open Webcam
            </button>
        }
      </div>
      {
        captureVideo ?
          modelsLoaded ?
            <div>
              <div style={{ display: 'flex', justifyContent: 'center', padding: '10px' }}>
                <video ref={videoRef} height={videoHeight} width={videoWidth} onPlay={handleVideoOnPlay} style={{ borderRadius: '10px' }} />
                <canvas ref={canvasRef} style={{ position: 'absolute' }} />
              </div>
            </div>
            :
            <div>loading...</div>
          :
          <>
          </>
      }
    </div>
  );
}

export default App;

the error i get after running

WARNING in ./node_modules/face-api.js/build/es6/xception/extractParams.js Module Warning (from ./node_modules/source-map-loader/dist/cjs.js): Failed to parse source map from 'C:\Users\Aritra Mondal\Documents\test react\facerecognition\node_modules\face-api.js\src\xception\extractParams.ts' file: Error: ENOENT: no such file or directory, open 'C:\Users\Aritra Mondal\Documents\test react\facerecognition\node_modules\face-api.js\src\xception\extractParams.ts'

WARNING in ./node_modules/face-api.js/build/es6/xception/extractParamsFromWeigthMap.js Module Warning (from ./node_modules/source-map-loader/dist/cjs.js): Failed to parse source map from 'C:\Users\Aritra Mondal\Documents\test react\facerecognition\node_modules\face-api.js\src\xception\extractParamsFromWeigthMap.ts' file: Error: ENOENT: no such file or directory, open 'C:\Users\Aritra Mondal\Documents\test react\facerecognition\node_modules\face-api.js\src\xception\extractParamsFromWeigthMap.ts'

the package.json file

{
  "name": "facerecognition",
  "version": "0.1.0",
  "private": true,
  "dependencies": {
    "@testing-library/jest-dom": "^5.16.5",
    "@testing-library/react": "^13.4.0",
    "@testing-library/user-event": "^13.5.0",
    "face-api.js": "^0.22.2",
    "react": "^18.2.0",
    "react-dom": "^18.2.0",
    "react-scripts": "5.0.1",
    "web-vitals": "^2.1.4"
  },
  "scripts": {
    "start": "react-scripts start",
    "build": "react-scripts build",
    "test": "react-scripts test",
    "eject": "react-scripts eject"
  },
  "eslintConfig": {
    "extends": [
      "react-app",
      "react-app/jest"
    ]
  },
  "browserslist": {
    "production": [
      ">0.2%",
      "not dead",
      "not op_mini all"
    ],
    "development": [
      "last 1 chrome version",
      "last 1 firefox version",
      "last 1 safari version"
    ]
  }
}

folder structure screenshot of the error

After going through many youtube tutorial and even cloning their repositories to check the code i keep getting the same error

Upvotes: 2

Views: 572

Answers (1)

HUZEFA BIN JUNED
HUZEFA BIN JUNED

Reputation: 1

React js face recognition application using face-api.js for a client


import React, { useState, useEffect, useRef } from "react";
import * as faceapi from "face-api.js";
import Loading from "./Loading";
import Face from "./Face";

const WebcamComponent = () => {
  const [initializing, setInitializing] = useState(true);
  const [expressions, setExpressions] = useState([]);
  const [age, setAge] = useState(null);
  const [gender, setGender] = useState(null);
  const videoRef = useRef();
  const videoCanvasRef = useRef();
  const videoHeight = 400;
  const videoWidth = 400;

  useEffect(() => {
    const loadModels = async () => {
      const MODEL_URL = "/models"; // Update to the correct path
      setInitializing(true);
      await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL);
      await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL);
      await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL);
      await faceapi.nets.faceExpressionNet.loadFromUri(MODEL_URL);
      await faceapi.nets.ageGenderNet.loadFromUri(MODEL_URL);
      startWebcam();
    };

    loadModels();

    return () => {
      if (videoRef.current.srcObject) {
        videoRef.current.srcObject.getTracks().forEach((track) => track.stop());
      }
    };
  }, []);

  const startWebcam = async () => {
    try {
      const stream = await navigator.mediaDevices.getUserMedia({
        video: true,
      });
      videoRef.current.srcObject = stream;
      videoRef.current.onloadedmetadata = () => {
        setInitializing(false);
      };
    } catch (error) {
      console.error("getUserMedia Error: ", error);
    }
  };

  const handleVideoPlay = () => {
    setInterval(async () => {
      if (initializing) {
        setInitializing(false);
      }
      const displaySize = { width: videoWidth, height: videoHeight };
      faceapi.matchDimensions(videoCanvasRef.current, displaySize);
      const detections = await faceapi
        .detectAllFaces(videoRef.current, new faceapi.TinyFaceDetectorOptions())
        .withFaceLandmarks()
        .withFaceExpressions()
        .withAgeAndGender();

      const resizedDetections = faceapi.resizeResults(detections, displaySize);

      // Clear previous dots
      const ctx = videoCanvasRef.current.getContext("2d");
      ctx.clearRect(0, 0, videoWidth, videoHeight);

      faceapi.draw.drawFaceLandmarks(videoCanvasRef.current, resizedDetections);

      console.log("resizedDetections", resizedDetections);
      // Extract and set detected expressions
      if (resizedDetections.length > 0) {
        const expressionsObj = resizedDetections[0].expressions;
        const detectedExpressions = Object.keys(expressionsObj).filter(
          (expression) => expressionsObj[expression] > 0.7
        );

        // Extract and set age and gender
        setAge(resizedDetections[0].age);
        setGender(resizedDetections[0].gender);
        if (detectedExpressions.length > 0) {
          setExpressions(detectedExpressions);
        }
      }
    }, 1000);
  };

  return (
    <div className="h-screen w-screen flex flex-row justify-between items-center">
      {initializing && <Loading />}

      <div className="h-full w-5/12 flex justify-center items-center relative">
        <video
          ref={videoRef}
          className="h-full w-full object-fill"
          autoPlay
          playsInline
          onPlay={handleVideoPlay}
          muted
        />
        <canvas
          ref={videoCanvasRef}
          width="100%"
          height="100%"
          className="absolute w-full h-full"
        ></canvas>
      </div>
  
    </div>
  );

};

export default WebcamComponent;

  • install tailwind
  • install face-api.js
  • import all modals from public folder
  • check modals in /public/modals folder

Upvotes: 0

Related Questions