johannb75
johannb75

Reputation: 364

Make tensorflow faster on nodejs serve?

I've created a code to detect the objects on an image with tensorflow js but it is really slow. For that i installed npmjs packages:

@tensorflow/tfjs
@tensorflow-models/coco-ssd
@tensorflow-models/mobilenet
get-image-data

And this is my script:

const tf = require('@tensorflow/tfjs')
// Load the binding (CPU computation)
const mobilenet = require('@tensorflow-models/mobilenet');
const cocoSsd = require("@tensorflow-models/coco-ssd");
const events = require('events');
const post_event = new events.EventEmitter();
const start = Date.now()
// for getting the data images
const image = require('get-image-data')

image('./img/cup.jpg',async(err, image)=>{
    const numChannels = 3;
    const numPixels = image.width * image.height;
    const values = new Int32Array(numPixels * numChannels);
    pixels = image.data
    for(let i = 0; i < numPixels; i++) {
        for (let channel = 0; channel < numChannels; ++channel) {
            values[i * numChannels + channel] = pixels[i * 4 + channel];
        }
    }
    const outShape = [image.height, image.width, numChannels];
    const input = tf.tensor3d(values, outShape, 'int32');
    await load(input)
});

const load=async img=>{
    console.log("IMG LOADED in ", (Date.now()-start)/1000,"s")
    let mobilenet_ = cocossd_ = false, post_predictions = []; 

    mobilenet.load().then(async model=>{
        console.log("mobilenet loaded in ",(Date.now()-start)/1000,"s")
        model.classify(img).then(async classify_predictions=>{
            for(i=0;i<classify_predictions.length;i++){
                const element = classify_predictions[i];
                const each_class = element["className"].split(", ")
                each_class.forEach(this_element=>{
                    post_predictions.push([this_element, (element.probability*100)]);
                })
            }
            post_event.emit("mobilenet")
        });        
    });

    cocoSsd.load().then(async model=>{
        console.log("cocossd loaded in ",(Date.now()-start)/1000,"s")
        model.detect(img).then(predictions=>{
            for(i=0;i<predictions.length;i++){
                const this_element = predictions[i];
                post_predictions.unshift([this_element.class, (this_element.score*100)]);
            }
            post_event.emit("cocossd")
        });
    })

    post_event.on("mobilenet", ()=>{
        console.log("mobilenet(longest) finished in ", (Date.now()-start)/1000,"s", post_predictions)
        mobilenet_=true
        if(mobilenet_ && cocossd_){
            post_event.emit("finish")
        }
    }).on("cocossd", ()=>{
        console.log("cocossd finished in ", (Date.now()-start)/1000,"s", post_predictions)
        cocossd_ = true
        if(mobilenet_ && cocossd_){
            post_event.emit("finish")
        }
    }).on("finish", ()=>{
        post_predictions.sort((a, b)=>{
            return b[1]-a[1];
        });
        console.log("Post in ", (Date.now()-start)/1000,"s", post_predictions)
    })
}

This works but when i run it, it's really slow, here are the results:

IMG LOADED in  0.486 s
cocossd loaded in  6.11 s
cocossd finished in  9.028 s [ [ 'cup', 95.68768739700317 ] ]
mobilenet loaded in  10.845 s
mobilenet(longest) finished in  12.795 s [
  [ 'cup', 95.68768739700317 ],
  [ 'cup', 69.30274367332458 ],
  [ 'espresso', 17.099112272262573 ],
  [ 'coffee mug', 13.384920358657837 ]
]
Post in  12.809 s [
  [ 'cup', 95.68768739700317 ],
  [ 'cup', 69.30274367332458 ],
  [ 'espresso', 17.099112272262573 ],
  [ 'coffee mug', 13.384920358657837 ]
]

I've watched some videos and they say that the nodejs version of mobilenet takes 20ms to have the results. But on my app, it takes 10s. Maybe i did something wrong. Can someone help me to fix this problem?

Thanks

Upvotes: 0

Views: 472

Answers (1)

Soeren
Soeren

Reputation: 11

Loading the model takes some time. For example, you could create an express server expecting an image and do object detection. When you start the Server the models can be pre loaded. On each api request, the models are already loaded and detection is done in milliseconds (hopefully:-))

Upvotes: 1

Related Questions