Cdbavl
Cdbavl

Reputation: 31

How to include cv.imread() when building opencv.js?

I cloned the OpenCV Git repository: git clone https://github.com/opencv/opencv.git and did not change anything.

Then I followed the build opencv.js documentation on OpenCV step by step https://docs.opencv.org/4.5.2/d4/da1/tutorial_js_setup.html and successfully built the opencv.js and tests with the command

python ./platforms/js/build_js.py build_js --build_test

Then I opened with a live server the created test.html and saw that all tests were successful: "581 assertions of 581 passed, 0 failed."

But when I insert the built opencv.js between the <script> </script> tags and use it, it still works except for cv.imread(). I get the error message: `

"Uncaught TypeError: cv.imread is not a function".`

Here is the code block:

let canvas = document.getElementById("canvas");
let ctx = canvas.getContext("2d");
let matrix = cv.imread("canvas");

Then I used the official opencv.js and imread() worked. The problem is, however, that opencv.js is 7.7 mb in size, because it includes most of OpenCV's capabilities and I only need some core and image processing functions.

How can imread() be included in the build? I could not find a solution so far. I only found the build Whitelist: it is in the OpenCV repository and has the name opencv_js.config.py and includes the following. Any idea how imread() can be included in the build? Because the OpenCV JavaScript documentation uses imread() in pretty much every example, there should be an easy way to include it.

# Classes and methods whitelist

core = {
    '': [
        'absdiff', 'add', 'addWeighted', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'cartToPolar',
        'compare', 'convertScaleAbs', 'copyMakeBorder', 'countNonZero', 'determinant', 'dft', 'divide', 'eigen',
        'exp', 'flip', 'getOptimalDFTSize','gemm', 'hconcat', 'inRange', 'invert', 'kmeans', 'log', 'magnitude',
        'max', 'mean', 'meanStdDev', 'merge', 'min', 'minMaxLoc', 'mixChannels', 'multiply', 'norm', 'normalize',
        'perspectiveTransform', 'polarToCart', 'pow', 'randn', 'randu', 'reduce', 'repeat', 'rotate', 'setIdentity', 'setRNGSeed',
        'solve', 'solvePoly', 'split', 'sqrt', 'subtract', 'trace', 'transform', 'transpose', 'vconcat',
        'setLogLevel', 'getLogLevel',
    ],
    'Algorithm': [],
}

imgproc = {'': ['Canny', 'GaussianBlur', 'Laplacian', 'HoughLines', 'HoughLinesP', 'HoughCircles', 'Scharr','Sobel', \
                'adaptiveThreshold','approxPolyDP','arcLength','bilateralFilter','blur','boundingRect','boxFilter',\
                'calcBackProject','calcHist','circle','compareHist','connectedComponents','connectedComponentsWithStats', \
                'contourArea', 'convexHull', 'convexityDefects', 'cornerHarris','cornerMinEigenVal','createCLAHE', \
                'createLineSegmentDetector','cvtColor','demosaicing','dilate', 'distanceTransform','distanceTransformWithLabels', \
                'drawContours','ellipse','ellipse2Poly','equalizeHist','erode', 'filter2D', 'findContours','fitEllipse', \
                'fitLine', 'floodFill','getAffineTransform', 'getPerspectiveTransform', 'getRotationMatrix2D', 'getStructuringElement', \
                'goodFeaturesToTrack','grabCut','initUndistortRectifyMap', 'integral','integral2', 'isContourConvex', 'line', \
                'matchShapes', 'matchTemplate','medianBlur', 'minAreaRect', 'minEnclosingCircle', 'moments', 'morphologyEx', \
                'pointPolygonTest', 'putText','pyrDown','pyrUp','rectangle','remap', 'resize','sepFilter2D','threshold', \
                'undistort','warpAffine','warpPerspective','warpPolar','watershed', \
                'fillPoly', 'fillConvexPoly', 'polylines',
    ],
    'CLAHE': ['apply', 'collectGarbage', 'getClipLimit', 'getTilesGridSize', 'setClipLimit', 'setTilesGridSize'],
    'segmentation_IntelligentScissorsMB': [
        'IntelligentScissorsMB',
        'setWeights',
        'setGradientMagnitudeMaxLimit',
        'setEdgeFeatureZeroCrossingParameters',
        'setEdgeFeatureCannyParameters',
        'applyImage',
        'applyImageFeatures',
        'buildMap',
        'getContour'
    ],
}

objdetect = {'': ['groupRectangles'],
             'HOGDescriptor': ['load', 'HOGDescriptor', 'getDefaultPeopleDetector', 'getDaimlerPeopleDetector', 'setSVMDetector', 'detectMultiScale'],
             'CascadeClassifier': ['load', 'detectMultiScale2', 'CascadeClassifier', 'detectMultiScale3', 'empty', 'detectMultiScale'],
             'QRCodeDetector': ['QRCodeDetector', 'decode', 'decodeCurved', 'detect', 'detectAndDecode', 'detectMulti', 'setEpsX', 'setEpsY']}

video = {'': ['CamShift', 'calcOpticalFlowFarneback', 'calcOpticalFlowPyrLK', 'createBackgroundSubtractorMOG2', \
             'findTransformECC', 'meanShift'],
         'BackgroundSubtractorMOG2': ['BackgroundSubtractorMOG2', 'apply'],
         'BackgroundSubtractor': ['apply', 'getBackgroundImage']}

dnn = {'dnn_Net': ['setInput', 'forward'],
       '': ['readNetFromCaffe', 'readNetFromTensorflow', 'readNetFromTorch', 'readNetFromDarknet',
            'readNetFromONNX', 'readNet', 'blobFromImage']}

features2d = {'Feature2D': ['detect', 'compute', 'detectAndCompute', 'descriptorSize', 'descriptorType', 'defaultNorm', 'empty', 'getDefaultName'],
              'BRISK': ['create', 'getDefaultName'],
              'ORB': ['create', 'setMaxFeatures', 'setScaleFactor', 'setNLevels', 'setEdgeThreshold', 'setFirstLevel', 'setWTA_K', 'setScoreType', 'setPatchSize', 'getFastThreshold', 'getDefaultName'],
              'MSER': ['create', 'detectRegions', 'setDelta', 'getDelta', 'setMinArea', 'getMinArea', 'setMaxArea', 'getMaxArea', 'setPass2Only', 'getPass2Only', 'getDefaultName'],
              'FastFeatureDetector': ['create', 'setThreshold', 'getThreshold', 'setNonmaxSuppression', 'getNonmaxSuppression', 'setType', 'getType', 'getDefaultName'],
              'AgastFeatureDetector': ['create', 'setThreshold', 'getThreshold', 'setNonmaxSuppression', 'getNonmaxSuppression', 'setType', 'getType', 'getDefaultName'],
              'GFTTDetector': ['create', 'setMaxFeatures', 'getMaxFeatures', 'setQualityLevel', 'getQualityLevel', 'setMinDistance', 'getMinDistance', 'setBlockSize', 'getBlockSize', 'setHarrisDetector', 'getHarrisDetector', 'setK', 'getK', 'getDefaultName'],
              # 'SimpleBlobDetector': ['create'],
              'KAZE': ['create', 'setExtended', 'getExtended', 'setUpright', 'getUpright', 'setThreshold', 'getThreshold', 'setNOctaves', 'getNOctaves', 'setNOctaveLayers', 'getNOctaveLayers', 'setDiffusivity', 'getDiffusivity', 'getDefaultName'],
              'AKAZE': ['create', 'setDescriptorType', 'getDescriptorType', 'setDescriptorSize', 'getDescriptorSize', 'setDescriptorChannels', 'getDescriptorChannels', 'setThreshold', 'getThreshold', 'setNOctaves', 'getNOctaves', 'setNOctaveLayers', 'getNOctaveLayers', 'setDiffusivity', 'getDiffusivity', 'getDefaultName'],
              'DescriptorMatcher': ['add', 'clear', 'empty', 'isMaskSupported', 'train', 'match', 'knnMatch', 'radiusMatch', 'clone', 'create'],
              'BFMatcher': ['isMaskSupported', 'create'],
              '': ['drawKeypoints', 'drawMatches', 'drawMatchesKnn']}

photo = {'': ['createAlignMTB', 'createCalibrateDebevec', 'createCalibrateRobertson', \
              'createMergeDebevec', 'createMergeMertens', 'createMergeRobertson', \
              'createTonemapDrago', 'createTonemapMantiuk', 'createTonemapReinhard', 'inpaint'],
        'CalibrateCRF': ['process'],
        'AlignMTB' : ['calculateShift', 'shiftMat', 'computeBitmaps', 'getMaxBits', 'setMaxBits', \
                      'getExcludeRange', 'setExcludeRange', 'getCut', 'setCut'],
        'CalibrateDebevec' : ['getLambda', 'setLambda', 'getSamples', 'setSamples', 'getRandom', 'setRandom'],
        'CalibrateRobertson' : ['getMaxIter', 'setMaxIter', 'getThreshold', 'setThreshold', 'getRadiance'],
        'MergeExposures' : ['process'],
        'MergeDebevec' : ['process'],
        'MergeMertens' : ['process', 'getContrastWeight', 'setContrastWeight', 'getSaturationWeight', \
                          'setSaturationWeight', 'getExposureWeight', 'setExposureWeight'],
        'MergeRobertson' : ['process'],
        'Tonemap' : ['process' , 'getGamma', 'setGamma'],
        'TonemapDrago' : ['getSaturation', 'setSaturation', 'getBias', 'setBias', \
                          'getSigmaColor', 'setSigmaColor', 'getSigmaSpace','setSigmaSpace'],
        'TonemapMantiuk' : ['getScale', 'setScale', 'getSaturation', 'setSaturation'],
        'TonemapReinhard' : ['getIntensity', 'setIntensity', 'getLightAdaptation', 'setLightAdaptation', \
                             'getColorAdaptation', 'setColorAdaptation']
        }

aruco = {'': ['detectMarkers', 'drawDetectedMarkers', 'drawAxis', 'estimatePoseSingleMarkers', 'estimatePoseBoard', 'estimatePoseCharucoBoard', 'interpolateCornersCharuco', 'drawDetectedCornersCharuco'],
        'aruco_Dictionary': ['get', 'drawMarker'],
        'aruco_Board': ['create'],
        'aruco_GridBoard': ['create', 'draw'],
        'aruco_CharucoBoard': ['create', 'draw'],
        'aruco_DetectorParameters': ['create']
        }

calib3d = {'': ['findHomography', 'calibrateCameraExtended', 'drawFrameAxes', 'estimateAffine2D', \
                'getDefaultNewCameraMatrix', 'initUndistortRectifyMap', 'Rodrigues', \
                'solvePnP', 'solvePnPRansac', 'solvePnPRefineLM']}


white_list = makeWhiteList([core, imgproc, objdetect, video, dnn, features2d, photo, aruco, calib3d])

Upvotes: 3

Views: 2919

Answers (3)

陳星谷
陳星谷

Reputation: 21

The reason why cv.imread() can not be used from your self-build opencv.js is because it return cv.ready, which is a Promise (check your self-build opencv.js, line:34). However, the offical build opencv.js return cv (https://docs.opencv.org/4.5.0/opencv.js ). So if you follow the old document to use cv with your self-bould opencv.js, you will get error.

As mentioned in https://github.com/opencv/opencv/issues/21580 There are two ways to fix this problem.

  1. Adopt the Promise usage, try reset cv right after opencv.js is loaded.
<script src="js/opencv.js" onload="onOpenCvReady();" type="text/javascript"></script>
async function onOpenCvReady() { window.cv = await window.cv }
  1. Or you can keep the original usage, but you have to modify opencv.js:

modify opencv.js line:34

return cv.ready

to

return cv

and then use cv after onRuntimeInitialize is called. https://docs.opencv.org/4.x/d0/d84/tutorial_js_usage.html

Upvotes: 2

David Wright
David Wright

Reputation: 494

I've just faced the same problem as well. It looks like the opencv.js documentation hasn't been updated for newer versions of the emscripten Module API (or this is just something I am doing differently than the OpenCV docs and it happens to work).

As shown in the embind docs you can use the onRuntimeInitialized function to detect when the WASM module has finished loading, and thereafter access its exported functions via the Module object. This is how I was able to use opencv.js including cv.imread:

<html>
  <head>
  </head>
  <body>
    <h1>Hello OpenCV.js</h1>
    <div>
      <div>
        <canvas id="canvas"></canvas>
      </div>
      <div>
        <input type="file" id="fileInput" name="file" />
      </div>
    </div>
    <script>
      var Module = {
        onRuntimeInitialized: function() {
          console.log(Module.imread)
          const cv = Module;

          let fileInput = document.getElementById('fileInput');

          fileInput.onchange = (e) => {
            const image = new Image();

            image.src = URL.createObjectURL(e.target.files[0]);

            image.onload = ()  => {
              const canvas = document.getElementById('canvas');
              const context = canvas.getContext('2d');

              canvas.height = image.height;
              canvas.width = image.width;

              context.drawImage(image, 0, 0, image.width, image.height);
              const imageData = context.getImageData(0, 0, image.width, image.height);

              const cvImage = cv.imread("canvas");
              const mean = cv.mean(cvImage)
              console.log(`Mean: ${mean[0]}, ${mean[1]}, ${mean[2]}`)
            }
          };
        }
      };
    </script>
    <script src="opencv.js"></script>
  </body>
</html>

Important: Make sure that you don't compile the OpenCV imcodecs module, as the browser version of imread will be overwritten by the C++ version. The browser implementation (defined in opencv/modules/js/src/helpers.js) loads images from the browser, whereas the C++ version loads them from files.

Bear in mind that I am just starting out with WASM, emscripten and JavaScript, so excuse any potential misuse of terminology please. And if I am doing something wrong or badly, I would be more than happy for some feedback as well.

Upvotes: 0

Norbert Tiborcz
Norbert Tiborcz

Reputation: 281

I had the same problem. I used emscripten to compile opencv from source and it finished without any error, but when I tried to load it in browser, it didn't work at all. Problem was solved with an older version of emscripten.

git clone https://github.com/emscripten-core/emsdk.git
cd emsdk/
./emsdk install 1.39.15
./emsdk activate 1.39.15
source ./emsdk_env.sh
git clone https://github.com/opencv/opencv.git
emcmake python ./opencv/platforms/js/build_js.py build_wasm --build_wasm

I went through all the newer versions but the 1.39.15 is the latest one what worked for me. This way I reduced the opencv.js file until 1.7MB.

Upvotes: 1

Related Questions