Reputation: 31
I am trying to convert the saved model from Tensorflow implementation of Superpoint to a tflite model to test it on Android.
I started with downloading the saved model from github: https://github.com/rpautrat/SuperPoint/tree/master/pretrained_models
The model is in SavedModel format. When inspecting inputs and outputs of the model using:
saved_model_cli show --dir sp_v6 --all
I get following output:
MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['image'] tensor_info:
dtype: DT_FLOAT
shape: (-1, -1, -1, 1)
name: superpoint/image:0
The given SavedModel SignatureDef contains the following output(s):
outputs['descriptors'] tensor_info:
dtype: DT_FLOAT
shape: (1, -1, -1, 256)
name: superpoint/descriptors:0
outputs['descriptors_raw'] tensor_info:
dtype: DT_FLOAT
shape: (1, -1, -1, 256)
name: superpoint/descriptors_raw:0
outputs['logits'] tensor_info:
dtype: DT_FLOAT
shape: (1, -1, -1, 65)
name: superpoint/logits:0
outputs['pred'] tensor_info:
dtype: DT_INT32
shape: (1, -1, -1)
name: superpoint/pred:0
outputs['prob'] tensor_info:
dtype: DT_FLOAT
shape: (1, -1, -1)
name: superpoint/prob:0
outputs['prob_nms'] tensor_info:
dtype: DT_FLOAT
shape: (1, -1, -1)
name: superpoint/prob_nms:0
Method name is: tensorflow/serving/predict
According to my knowledge tflite models in Android cannot handle dynamic inputs, so I tried to change the input to fixed input using following code:
#use tensorflow v1
import tensorflow as tf
def frozen_graph_maker(export_dir,output_graph):
with tf.Session(graph=tf.Graph()) as sess:
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_dir)
output_nodes = ['superpoint/logits', 'superpoint/prob', 'superpoint/descriptors_raw', 'superpoint/descriptors', 'superpoint/prob_nms', 'superpoint/pred']
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
sess.graph_def,
output_nodes# The output node names are used to select the usefull nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
if __name__ == "__main__":
export_dir='./sp_v6/'
output_graph = "./frozen_graph.pb"
frozen_graph_maker(export_dir,output_graph)
This game me a frozen graph, in which I change the input size using:
#Use tensorflow v2
import tensorflow.compat.v1 as tf
output_graph = "./new_frozen_graph.pb"
def load_frozen_graph(frozen_file='frozen.pb'):
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(frozen_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
graph = load_frozen_graph('./frozen_graph.pb')
print('Tensor shapes before import map')
input_tensor = graph.get_tensor_by_name('superpoint/image:0')
print(input_tensor)
new_graph = tf.Graph()
with new_graph.as_default():
new_input = tf.placeholder(dtype=tf.float32, shape=[1, 320, 320, 1], name='superpoint/image')
tf.import_graph_def(graph.as_graph_def(), name='', input_map={'superpoint/image:0': new_input}, return_elements=['superpoint/logits:0', 'superpoint/prob:0', 'superpoint/descriptors_raw:0', 'superpoint/descriptors:0', 'superpoint/prob_nms:0', 'superpoint/pred:0'])
with tf.Session(graph=new_graph) as sess:
output_nodes = ['superpoint/logits', 'superpoint/prob', 'superpoint/descriptors_raw', 'superpoint/descriptors', 'superpoint/prob_nms', 'superpoint/pred']
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
sess.graph_def,
output_nodes# The output node names are used to select the usefull nodes
)
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
This frozen graph with changed input size, then I convert to a savedModel format, so I can convert it to tflite format.
import tensorflow as tf
import os
import shutil
from tensorflow.python import ops
def get_graph_def_from_file(graph_filepath):
tf.compat.v1.reset_default_graph()
with ops.Graph().as_default():
with tf.compat.v1.gfile.GFile(graph_filepath, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def convert_graph_def_to_saved_model(export_dir, graph_filepath, input_name, outputs):
graph_def = get_graph_def_from_file(graph_filepath)
with tf.compat.v1.Session(graph=tf.Graph()) as session:
tf.import_graph_def(graph_def, name='')
tf.compat.v1.saved_model.simple_save(
session,
export_dir,# change input_image to node.name if you know the name
inputs={input_name: session.graph.get_tensor_by_name('{}:0'.format(node.name))
for node in graph_def.node if node.op=='Placeholder'},
outputs={t.rstrip(":0"):session.graph.get_tensor_by_name(t) for t in outputs}
)
print('Graph converted to SavedModel!')
tf.compat.v1.enable_eager_execution()
input_name="superpoint/image"
outputs = ['superpoint/logits:0', 'superpoint/prob:0', 'superpoint/descriptors_raw:0', 'superpoint/descriptors:0', 'superpoint/prob_nms:0', 'superpoint/pred:0']
shutil.rmtree('./saved_model', ignore_errors=True)
convert_graph_def_to_saved_model('./saved_model', './new_frozen_graph.pb', input_name, outputs)
Inspecting the inputs and outputs of this modified model, I get the following:
MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['superpoint/image'] tensor_info:
dtype: DT_FLOAT
shape: (1, 320, 320, 1)
name: superpoint/image:0
The given SavedModel SignatureDef contains the following output(s):
outputs['superpoint/descriptors'] tensor_info:
dtype: DT_FLOAT
shape: (1, 320, 320, 256)
name: superpoint/descriptors:0
outputs['superpoint/descriptors_raw'] tensor_info:
dtype: DT_FLOAT
shape: (1, 40, 40, 256)
name: superpoint/descriptors_raw:0
outputs['superpoint/logits'] tensor_info:
dtype: DT_FLOAT
shape: (1, 40, 40, 65)
name: superpoint/logits:0
outputs['superpoint/pred'] tensor_info:
dtype: DT_INT32
shape: unknown_rank
name: superpoint/pred:0
outputs['superpoint/prob'] tensor_info:
dtype: DT_FLOAT
shape: (1, 320, 320)
name: superpoint/prob:0
outputs['superpoint/prob_nms'] tensor_info:
dtype: DT_FLOAT
shape: unknown_rank
name: superpoint/prob_nms:0
Method name is: tensorflow/serving/predict
Input looks good now, but some of the outputs have unknown rank. But I try to continue with tflite conversion as below:
import tensorflow.lite as lite
saved_model_dir = './saved_model'
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
with open('./new_frozen.tflite', 'wb') as w:
w.write(tflite_model)
But then I get a following mismatch error during conversion:
tensorflow.lite.python.convert.ConverterError: <unknown>:0: error: type of return operand 3 ('tensor<?x?x?xi32>') doesn't match function result type ('tensor<1x?x?xi32>') in function @main
<unknown>:0: note: see current operation: "std.return"(%32, %30, %36, %47, %40, %45) : (tensor<1x320x320x256xf32>, tensor<1x40x40x256xf32>, tensor<1x40x40x65xf32>, tensor<?x?x?xi32>, tensor<1x320x320xf32>, tensor<?x?x?xf32>) -> ()
It would be really great if someone could help me, with this and tell me what am I doing wrong, and if there is an easier way of converting this tensorflow model to tflite for android inference. I did try to remove the two outputs with unknown shape from the graph and then the conversion worked but I would like to convert it with those outputs. Thank you.
Upvotes: 1
Views: 878
Reputation: 1
TFLite normally supports fixed size input and output. So for superpoint, only "superpoint/prob" and "superpoint/descriptors" should be converted as output (logits and descriptors_raw are intermediate output which should have no use in tflite) So all in TF2:
import tensorflow as tf
export_dir = "./sp_v6"
H = 240
W = 320
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
tf.compat.v1.saved_model.loader.load(
sess, [tf.compat.v1.saved_model.tag_constants.SERVING], export_dir)
# output_nodes = ['superpoint/logits', 'superpoint/prob', 'superpoint/descriptors_raw',
# 'superpoint/descriptors', 'superpoint/prob_nms', 'superpoint/pred']
# logits: detector 原始输出
# prob: depth_to_space后,pixel-level的特征点score map
# prob_nms: prob+NMS
# pred: prob中置信度 > detection_threshold (0.4)的
# descriptors_raw: descriptor 原始输出
# descriptors: resize_bilinear+l2_normalize
output_nodes = ['superpoint/prob',
'superpoint/descriptors']
# graph def
init_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
sess.graph_def,
output_nodes # The output node names are used to select the usefull nodes
)
# fix input shape
frozen_input = tf.compat.v1.placeholder(dtype=tf.float32, shape=[
1, H, W, 1], name='superpoint/image')
tf.import_graph_def(init_graph_def, name='', input_map={'superpoint/image:0': frozen_input}, return_elements=[
'superpoint/prob:0', 'superpoint/descriptors:0'])
output_nodes = ['superpoint/prob', 'superpoint/descriptors']
output_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
sess.graph_def,
output_nodes # The output node names are used to select the usefull nodes
)
with tf.compat.v1.gfile.GFile("frozen_graph.pb", "wb") as f:
f.write(output_graph_def.SerializeToString())
converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(
graph_def_file='./frozen_graph.pb',
input_arrays=['superpoint/image'],
output_arrays=['superpoint/prob', 'superpoint/descriptors'],
input_shapes={'superpoint/image': [1, H, W, 1]}
)
# Float16, CPU/GPU
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_model = converter.convert()
tflite_model_size = open('superpoint_{}x{}.tflite'.format(
H, W), 'wb').write(tflite_model)
print('TFLite Model is {} bytes'.format(tflite_model_size))
Upvotes: 0
Reputation: 9701
In case someone is still struggling with this. The issue with this particular graph is that the last operation on the superpoint/prob_nms
branch causes a shape mismatch (I believe they are reshaping an array so that it has shape [1, H, W]
without taking into consideration the batch size, so for an input signature with a batch dimension being None
, it causes a shape mismatch error. This can also be reproduced by feeding a tensor with a batch size larger than one directly on the saved model.
A quick solution is to skip the last operation and forcing a non-batched input. This can be achieved simply with the following code (after exporting to a frozen graph):
converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(
graph_def_file=graph_def_file,
input_arrays=['superpoint/image'],
output_arrays=['superpoint/unstack_4', 'superpoint/descriptors'],
input_shapes={'superpoint/image' : [1, None, None, 1]}
)
The output shape for superpoint/unstack_4
won't be batched and the model will only be able to process a single image at a time, but this should suffice for most cases.
Upvotes: 0
Reputation: 691
From the code section "This gave me a frozen graph, in which I change the input size using:"... you can replace all the following code with this:
Source: https://github.com/tensorflow/tensorflow/issues/38388#issuecomment-642388448
(The following code works in TensorFlow version 1 or 2)
import tensorflow as tf
converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(
graph_def_file='./frozen_graph.pb',
input_arrays=['superpoint/image'],
output_arrays=['superpoint/logits', 'superpoint/prob', 'superpoint/descriptors_raw', 'superpoint/descriptors', 'superpoint/prob_nms', 'superpoint/pred'],
input_shapes={'superpoint/image' : [1, 320, 320, 1]}
)
tflite_model = converter.convert()
tflite_model_size = open('model.tflite', 'wb').write(tflite_model)
print('TFLite Model is %d bytes' % tflite_model_size)
Upvotes: 1