Wang
Wang

Reputation: 11

Inference and get the output (array) with tflite model in c++

I'm trying to convert the code from python to c++. When inference the code in c++, I can't figure out how to get the result (array). The python code and partial c++ are as follows:

Information:

  1. Model: tflite
  2. output_data format(Python): <class 'numpy.ndarray'>
  3. result format (Python): <class 'torch.Tensor'>

Python code:

import numpy as np
import tensorflow as tf
import cv2
import torch

# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="./model.tflite")
interpreter.allocate_tensors()

# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()

img = cv2.imread("image.jpg")
tensor_input =  np.array([img])
tensor_input = np.float32(tensor_input)
interpreter.set_tensor(input_details[0]['index'], tensor_input)

interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
result = torch.from_numpy(output_data)

C++ code:

#include <iostream>
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/kernels/register.h"

#include <fstream>
#include <vector>
#include <numeric>
#include <opencv2/opencv.hpp>

int main() {

    // Load Model
    std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile("model.tflite");

    // Initiate Interpreter
    std::unique_ptr<tflite::Interpreter> interpreter;
    tflite::ops::builtin::BuiltinOpResolver resolver;
    tflite::InterpreterBuilder(*model.get(), resolver)(&interpreter);

    // Allocate tensors.
    if (interpreter->AllocateTensors() != kTfLiteOk)
    {
        fprintf(stderr, "Failed to allocate tensor\n");
        exit(-1);
    }

    // Configure the interpreter
    interpreter->SetAllowFp16PrecisionForFp32(true);
    interpreter->SetNumThreads(1);

    // Get Input Tensor Dimensions
    int input = interpreter->inputs()[0];
    auto height = interpreter->tensor(input)->dims->data[1];
    auto width = interpreter->tensor(input)->dims->data[2];
    auto channels = interpreter->tensor(input)->dims->data[3];

    // Load Input Image
    cv::Mat image;
    auto image = cv::imread("image.jpg");

    // Copy image to input tensor
    image.convertTo(image, CV_32F, 1.0 / 255.0); // Normalize to [0, 1]
    std::memcpy(interpreter->typed_input_tensor<float>(0), image.data, image.total() * image.elemSize());

    // Inference
    interpreter->Invoke();

    // How to get the reult from interpreter and print the Tensor outcome?


    return 0;

}

Upvotes: 1

Views: 86

Answers (1)

Bart
Bart

Reputation: 239

float* output_layer = interpreter->typed_output_tensor<float>(0);  //Get pointer to the 0-th output layer (assuming it contains floats)
//Print first five values of 0-th output layer
for (int i = 0; i < 5; ++i) std::cout << output_layer[i] << " ";

Upvotes: 0

Related Questions