hatati
hatati

Reputation: 399

Problem converting Tensorflow model to tensorflow-lite (.tflite) format

I made a tensorflow model in python for image classification. Im using Windows 10.

I have a Train.py class where i define the graph in build_graph() and train the model in train(). Here is the main.py script:

#import fire
import numpy as np
import data_import as di
import os
import tensorflow as tf


class Train:
    __x_ = []
    __y_ = []
    __logits = []
    __loss = []
    __train_step = []
    __merged_summary_op = []
    __saver = []
    __session = []
    __writer = []
    __is_training = []
    __loss_val = []
    __train_summary = []
    __val_summary = []

    def __init__(self):
        pass

    def build_graph(self):
        self.__x_ = tf.placeholder("float", shape=[None, 60, 60, 3], name='X')
        self.__y_ = tf.placeholder("int32", shape=[None, 3], name='Y')
        self.__is_training = tf.placeholder(tf.bool)


        with tf.name_scope("model") as scope:
            conv1 = tf.layers.conv2d(inputs=self.__x_, filters=64,
                                 kernel_size=[5, 5],
                                 padding="same", activation=tf.nn.relu)
            pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)

            conv2 = tf.layers.conv2d(inputs=pool1, filters=64, kernel_size=[5, 5], padding="same",
                                 activation=tf.nn.relu)

            pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

            conv3 = tf.layers.conv2d(inputs=pool2, filters=32, kernel_size=[5, 5], padding="same",
                                 activation=tf.nn.relu)

            pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)

            pool3_flat = tf.reshape(pool3, [-1, 7 * 7 * 32])

            # FC layers
            FC1 = tf.layers.dense(inputs=pool3_flat, units=128, activation=tf.nn.relu)
            FC2 = tf.layers.dense(inputs=FC1, units=64, activation=tf.nn.relu)
            self.__logits = tf.layers.dense(inputs=FC2, units=3)


        # TensorFlow summary data to display in TensorBoard later
        with tf.name_scope("loss_func") as scope:
            self.__loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(logits=self.__logits, labels=self.__y_))
            self.__loss_val = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(logits=self.__logits, labels=self.__y_))

            # Add loss to tensorboard
            self.__train_summary = tf.summary.scalar("loss_train", self.__loss)
            self.__val_summary = tf.summary.scalar("loss_val", self.__loss_val)


        # summary data to be displayed on TensorBoard during training:
        with tf.name_scope("optimizer") as scope:
            global_step = tf.Variable(0, trainable=False)
            starter_learning_rate = 1e-3
            # decay every 10000 steps with a base of 0.96 function
            learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 1000, 0.9,
                                                       staircase=True)
            self.__train_step = tf.train.AdamOptimizer(learning_rate).minimize(self.__loss, global_step=global_step)
            tf.summary.scalar("learning_rate", learning_rate)
            tf.summary.scalar("global_step", global_step)


        # Merge op for tensorboard
        self.__merged_summary_op = tf.summary.merge_all()
        # Build graph
        init = tf.global_variables_initializer()
        # Saver for checkpoints
        self.__saver = tf.train.Saver(max_to_keep=None)

        # Configure summary to output at given directory
        self.__session = tf.Session()
        self.__writer = tf.summary.FileWriter("./logs/flight_path", self.__session.graph)
        self.__session.run(init)



def train(self, save_dir='./model_files', batch_size=20):
    #Load dataset and labels
    x = np.asarray(di.load_images())
    y = np.asarray(di.load_labels())

    #Shuffle dataset
    np.random.seed(0)
    shuffled_indeces = np.arange(len(y))
    np.random.shuffle(shuffled_indeces)
    shuffled_x = x[shuffled_indeces].tolist()
    shuffled_y = y[shuffled_indeces].tolist()
    shuffled_y = tf.keras.utils.to_categorical(shuffled_y, 3)

    dataset = (shuffled_x, shuffled_y)
    dataset = tf.data.Dataset.from_tensor_slices(dataset)
    #dataset = dataset.shuffle(buffer_size=300)

    # Using Tensorflow data Api to handle batches
    dataset_train = dataset.take(200)
    dataset_train = dataset_train.repeat()
    dataset_train = dataset_train.batch(batch_size)

    dataset_test = dataset.skip(200)
    dataset_test = dataset_test.repeat()
    dataset_test = dataset_test.batch(batch_size)

    # Create an iterator
    iter_train = dataset_train.make_one_shot_iterator()
    iter_train_op = iter_train.get_next()
    iter_test = dataset_test.make_one_shot_iterator()
    iter_test_op = iter_test.get_next()

    # Build model graph
    self.build_graph()


    # Train Loop
    for i in range(10):
        batch_train = self.__session.run([iter_train_op])
        batch_x_train, batch_y_train = batch_train[0]
        # Print loss from time to time
        if i % 100 == 0:
            batch_test = self.__session.run([iter_test_op])
            batch_x_test, batch_y_test = batch_test[0]
            loss_train, summary_1 = self.__session.run([self.__loss,
                                                    self.__merged_summary_op],
                                                   feed_dict={self.__x_:
                                                                  batch_x_train,
                                                              self.__y_:
                                                                  batch_y_train,
                                                              self.__is_training: True})
            loss_val, summary_2 = self.__session.run([self.__loss_val,
                                              self.__val_summary],
                                             feed_dict={self.__x_: batch_x_test,
                                                        self.__y_: batch_y_test,
                                                        self.__is_training: False})
            print("Loss Train: {0} Loss Val: {1}".format(loss_train,
                                                 loss_val))
            # Write to tensorboard summary
            self.__writer.add_summary(summary_1, i)
            self.__writer.add_summary(summary_2, i)

        # Execute train op
        self.__train_step.run(session=self.__session, feed_dict={
            self.__x_: batch_x_train, self.__y_: batch_y_train,
            self.__is_training: True})
        print(i)


    # Once the training loop is over, we store the final model into a checkpoint file with op
    # __saver.save:

    # converter = tf.contrib.lite.TFLiteConverter.from_session(self.__session, [self.__x_], [self.__y_])
    # tflite_model = converter.convert()
    # open("MobileNet/ConvertedModelFile.tflite", "wb").write(tflite_model)

    # Save model
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
        checkpoint_path = os.path.join(save_dir, "model.ckpt")
        filename = self.__saver.save(self.__session, checkpoint_path)
        tf.train.write_graph(self.__session.graph_def, save_dir, "save_graph.pbtxt")
        print("Model saved in file: %s" % filename)


if __name__ == '__main__':
    cnn = Train()
    cnn.train()

I tried exporting the GraphDef to a .tflite file through Exporting a GraphDef from tf.Session, Exporting a GraphDef from file and Exporting a SavedModel. All are describing here Converter Python API guide.

GraphDef from tf.Session

When i try to export with GraphDef from tf.Session guide, i get the following error:

Traceback (most recent call last):
  File "C:/Users/nermi/PycharmProjects/DronePathTracking/main.py", line 226, in <module>
    cnn.train()
  File "C:/Users/nermi/PycharmProjects/DronePathTracking/main.py", line 212, in train
    tflite_model = converter.convert()
  File "C:\Users\nermi\Python\Python36\lib\site-packages\tensorflow\contrib\lite\python\lite.py", line 453, in convert
    **converter_kwargs)
  File "C:\Users\nermi\Python\Python36\lib\site-packages\tensorflow\contrib\lite\python\convert.py", line 342, in toco_convert_impl
    input_data.SerializeToString())
  File "C:\Users\nermi\Python\Python36\lib\site-packages\tensorflow\contrib\lite\python\convert.py", line 135, in toco_convert_protos
    (stdout, stderr))
RuntimeError: TOCO failed see console for info.
b'Traceback (most recent call last):\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\site-packages\\tensorflow\\contrib\\lite\\toco\\python\\tensorflow_wrap_toco.py", line 18, in swig_import_helper\r\n    fp, pathname, description = imp.find_module(\'_tensorflow_wrap_toco\', [dirname(__file__)])\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\imp.py", line 297, in find_module\r\n    raise ImportError(_ERR_MSG.format(name), name=name)\r\nImportError: No module named \'_tensorflow_wrap_toco\'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\runpy.py", line 193, in _run_module_as_main\r\n    "__main__", mod_spec)\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\runpy.py", line 85, in _run_code\r\n    exec(code, run_globals)\r\n  File "C:\\Users\\nermi\\Python\\Python36\\Scripts\\toco_from_protos.exe\\__main__.py", line 5, in <module>\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\site-packages\\tensorflow\\contrib\\lite\\toco\\python\\toco_from_protos.py", line 22, in <module>\r\n    from tensorflow.contrib.lite.toco.python import tensorflow_wrap_toco\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\site-packages\\tensorflow\\contrib\\lite\\toco\\python\\tensorflow_wrap_toco.py", line 28, in <module>\r\n    _tensorflow_wrap_toco = swig_import_helper()\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\site-packages\\tensorflow\\contrib\\lite\\toco\\python\\tensorflow_wrap_toco.py", line 20, in swig_import_helper\r\n    import _tensorflow_wrap_toco\r\nModuleNotFoundError: No module named \'_tensorflow_wrap_toco\'\r\n'
None

Exporting a SavedModel

When i try to export with the Exporting a SavedModel guide in my export_saved_model.py script i get the following error:

Traceback (most recent call last):
  File "C:/Users/nermi/PycharmProjects/DronePathTracking/export_saved_model.py", line 5, in <module>
    converter = tf.contrib.lite.TFLiteConverter.from_saved_model(saved_model_dir)
  File "C:\Users\nermi\Python\Python36\lib\site-packages\tensorflow\contrib\lite\python\lite.py", line 340, in from_saved_model
    output_arrays, tag_set, signature_key)
  File "C:\Users\nermi\Python\Python36\lib\site-packages\tensorflow\contrib\lite\python\convert_saved_model.py", line 239, in freeze_saved_model
    meta_graph = get_meta_graph_def(saved_model_dir, tag_set)
  File "C:\Users\nermi\Python\Python36\lib\site-packages\tensorflow\contrib\lite\python\convert_saved_model.py", line 61, in get_meta_graph_def
    return loader.load(sess, tag_set, saved_model_dir)
  File "C:\Users\nermi\Python\Python36\lib\site-packages\tensorflow\python\saved_model\loader_impl.py", line 196, in load
    loader = SavedModelLoader(export_dir)
  File "C:\Users\nermi\Python\Python36\lib\site-packages\tensorflow\python\saved_model\loader_impl.py", line 212, in __init__
    self._saved_model = _parse_saved_model(export_dir)
  File "C:\Users\nermi\Python\Python36\lib\site-packages\tensorflow\python\saved_model\loader_impl.py", line 82, in _parse_saved_model
    constants.SAVED_MODEL_FILENAME_PB))
OSError: SavedModel file does not exist at: model_files/{saved_model.pbtxt|saved_model.pb}

The export_saved_model.py:

import tensorflow as tf

saved_model_dir = "model_files"

converter = tf.contrib.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
open("MobileNet/converted_model.tflite", "wb").write(tflite_model)

Exporting a GraphDef from file

Finally, i have the following freeze_model.py script to freeze the saved model:

from tensorflow.python.tools import freeze_graph

# Freeze the graph
save_path="C:/Users/nermi/PycharmProjects/DronePathTracking/model_files/" #directory to model files
MODEL_NAME = 'my_model' #name of the model optional
input_graph_path = save_path+'save_graph.pbtxt'#complete path to the input graph
checkpoint_path = save_path+'model.ckpt' #complete path to the model's checkpoint file
input_saver_def_path = ""
input_binary = False
output_node_names = "X, Y" #output node's name. Should match to that mentioned in your code
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_frozen_graph_name = save_path+'frozen_'+MODEL_NAME+'.pb' # the name of .pb file you would like to give
clear_devices = True


def freeze():
    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_frozen_graph_name, clear_devices, "")


freeze()

But when i try to convert my frozen_my_model.pb to tflite with my export_to_tflite.py script:

import tensorflow as tf

grap_def_file = "model_files/frozen_my_model.pb" # the .pb file

input_arrays = ["X"] #Input node
output_arrays = ["Y"] #Output node

converter = tf.contrib.lite.TFLiteConverter.from_frozen_graph(
    grap_def_file, input_arrays, output_arrays
)

tflite_model = converter.convert()

open("MobileNet/my_model.tflite", "wb").write(tflite_model)

i get the following error:

Traceback (most recent call last):
  File "C:/Users/nermi/PycharmProjects/DronePathTracking/export_to_tflite.py", line 12, in <module>
    tflite_model = converter.convert()
  File "C:\Users\nermi\Python\Python36\lib\site-packages\tensorflow\contrib\lite\python\lite.py", line 453, in convert
    **converter_kwargs)
  File "C:\Users\nermi\Python\Python36\lib\site-packages\tensorflow\contrib\lite\python\convert.py", line 342, in toco_convert_impl
    input_data.SerializeToString())
  File "C:\Users\nermi\Python\Python36\lib\site-packages\tensorflow\contrib\lite\python\convert.py", line 135, in toco_convert_protos
    (stdout, stderr))
RuntimeError: TOCO failed see console for info.
b'Traceback (most recent call last):\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\site-packages\\tensorflow\\contrib\\lite\\toco\\python\\tensorflow_wrap_toco.py", line 18, in swig_import_helper\r\n    fp, pathname, description = imp.find_module(\'_tensorflow_wrap_toco\', [dirname(__file__)])\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\imp.py", line 297, in find_module\r\n    raise ImportError(_ERR_MSG.format(name), name=name)\r\nImportError: No module named \'_tensorflow_wrap_toco\'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\runpy.py", line 193, in _run_module_as_main\r\n    "__main__", mod_spec)\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\runpy.py", line 85, in _run_code\r\n    exec(code, run_globals)\r\n  File "C:\\Users\\nermi\\Python\\Python36\\Scripts\\toco_from_protos.exe\\__main__.py", line 5, in <module>\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\site-packages\\tensorflow\\contrib\\lite\\toco\\python\\toco_from_protos.py", line 22, in <module>\r\n    from tensorflow.contrib.lite.toco.python import tensorflow_wrap_toco\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\site-packages\\tensorflow\\contrib\\lite\\toco\\python\\tensorflow_wrap_toco.py", line 28, in <module>\r\n    _tensorflow_wrap_toco = swig_import_helper()\r\n  File "c:\\users\\nermi\\python\\python36\\lib\\site-packages\\tensorflow\\contrib\\lite\\toco\\python\\tensorflow_wrap_toco.py", line 20, in swig_import_helper\r\n    import _tensorflow_wrap_toco\r\nModuleNotFoundError: No module named \'_tensorflow_wrap_toco\'\r\n'
None

Extra Info

When i save the model in the model_files directory it looks like this:

Model_files directory

I have tried many things, but no luck.

Any help is appreciated!

Upvotes: 3

Views: 2028

Answers (1)

Shubham Panchal
Shubham Panchal

Reputation: 4299

TOCO on Windows is problematic. I have faced such problems till I found a solution. The solution is to upload all the saved models or graphdef to a Google Colab notebook. Then,

  1. Connect with a GPU or TPU runtime. ( Change Runtime Type option )
  2. Upload the saved_model in the runtime.( Files section in the top-left corner )
  3. Write the same script in one cell which you have mentioned.
  4. Make sure to create the necessary directories in the runtime. See this answer.
  5. The conversion will take place in the cloud.

So, no problems of TOCO. See this notebook for info.

Upvotes: 1

Related Questions