Reputation: 2923
Is it common to see a dramatic loss of accuracy following the freezing of a graph for serving? During training and evaluation of the flowers dataset using a pretrained inception-resnet-v2, my accuracy is 98-99%, with a probability of 90+% for the correct predictions. However, after freezing my graph and predicting it again, my model was not as accurate and the right labels are only predicted with a confidence of 30-40%.
After model training, I had several items:
As I was unable to run the official freeze graph file located in the tensorflow repository on GitHub (I guess it was because I have a pbtxt file and not pb file after my training), I am reusing the code from this tutorial instead.
Here is the code I modified to freeze my graph:
import os, argparse
import tensorflow as tf
from tensorflow.python.framework import graph_util
dir = os.path.dirname(os.path.realpath(__file__))
def freeze_graph(model_folder, input_checkpoint):
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_folder)
# input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_folder = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_folder + "/frozen_model.pb"
# Before exporting our graph, we need to precise what is our output node
# This is how TF decides what part of the Graph he has to keep and what part it can dump
# NOTE: this variable is plural, because you can have multiple output nodes
output_node_names = "InceptionResnetV2/Logits/Predictions"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We import the meta graph and retrieve a Saver
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We retrieve the protobuf graph definition
graph = tf.get_default_graph()
input_graph_def = graph.as_graph_def()
# We start a session and restore the graph weights
with tf.Session() as sess:
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
input_graph_def, # The graph_def is used to retrieve the nodes
output_node_names.split(",") # The output node names are used to select the usefull nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_folder", type=str, help="Model folder to export")
parser.add_argument("--input_checkpoint", type = str, help = "Input checkpoint name")
args = parser.parse_args()
freeze_graph(args.model_folder, args.input_checkpoint)
This is the code I use to run my prediction, where I feed in only one image as intended by the user:
import tensorflow as tf
from scipy.misc import imread, imresize
import numpy as np
img = imread("./dandelion.jpg")
img = imresize(img, (299,299,3))
img = img.astype(np.float32)
img = np.expand_dims(img, 0)
labels_dict = {0:'daisy', 1:'dandelion',2:'roses', 3:'sunflowers', 4:'tulips'}
#Define the filename of the frozen graph
graph_filename = "./frozen_model.pb"
#Create a graph def object to read the graph
with tf.gfile.GFile(graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
#Construct the graph and import the graph from graphdef
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
#We define the input and output node we will feed in
input_node = graph.get_tensor_by_name('import/batch:0')
output_node = graph.get_tensor_by_name('import/InceptionResnetV2/Logits/Predictions:0')
with tf.Session() as sess:
predictions = sess.run(output_node, feed_dict = {input_node: img})
print predictions
label_predicted = np.argmax(predictions[0])
print 'Predicted Flower:', labels_dict[label_predicted]
print 'Prediction probability:', predictions[0][label_predicted]
And the output I received from running my prediction:
2017-04-11 17:38:21.722217: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2017-04-11 17:38:21.722608: I tensorflow/core/common_runtime/gpu/gpu_device.cc:887] Found device 0 with properties:
name: GeForce GTX 860M
major: 5 minor: 0 memoryClockRate (GHz) 1.0195
pciBusID 0000:01:00.0
Total memory: 3.95GiB
Free memory: 3.42GiB
2017-04-11 17:38:21.722624: I tensorflow/core/common_runtime/gpu/gpu_device.cc:908] DMA: 0
2017-04-11 17:38:21.722630: I tensorflow/core/common_runtime/gpu/gpu_device.cc:918] 0: Y
2017-04-11 17:38:21.722642: I tensorflow/core/common_runtime/gpu/gpu_device.cc:977] Creating TensorFlow device (/gpu:0) -> (device: 0, name: GeForce GTX 860M, pci bus id: 0000:01:00.0)
2017-04-11 17:38:22.183204: I tensorflow/compiler/xla/service/platform_util.cc:58] platform CUDA present with 1 visible devices
2017-04-11 17:38:22.183232: I tensorflow/compiler/xla/service/platform_util.cc:58] platform Host present with 8 visible devices
2017-04-11 17:38:22.184007: I tensorflow/compiler/xla/service/service.cc:183] XLA service 0xb85a1c0 executing computations on platform Host. Devices:
2017-04-11 17:38:22.184022: I tensorflow/compiler/xla/service/service.cc:191] StreamExecutor device (0): <undefined>, <undefined>
2017-04-11 17:38:22.184140: I tensorflow/compiler/xla/service/platform_util.cc:58] platform CUDA present with 1 visible devices
2017-04-11 17:38:22.184149: I tensorflow/compiler/xla/service/platform_util.cc:58] platform Host present with 8 visible devices
2017-04-11 17:38:22.184610: I tensorflow/compiler/xla/service/service.cc:183] XLA service 0xb631ee0 executing computations on platform CUDA. Devices:
2017-04-11 17:38:22.184620: I tensorflow/compiler/xla/service/service.cc:191] StreamExecutor device (0): GeForce GTX 860M, Compute Capability 5.0
[[ 0.1670652 0.46482906 0.12899996 0.12481128 0.11429448]]
Predicted Flower: dandelion
Prediction probability: 0.464829
Potential source of problem: I first trained my model using TF 0.12, but I believe it is compatible with Tf 1.01, the version I'm using now. As a safety precaution, I upgraded my files to TF 1.01 and retrained the model to obtain new sets of checkpoint files (with the same accuracy), and then used these checkpoint files for freezing. I compiled my tensorflow from source. Is the issue coming from the fact that I use a pbtxt file instead of a pb file? I have no idea how I could get a pb file from training my model.
Upvotes: 4
Views: 3052
Reputation: 71
I had a similar issue and the accuracy was 1.5% lower when using a frozen model. The problem was about the saver object in the code that freeze the model. You need to pass Moving average decay to the saver as an argument. I use the code from Inception model and this is how I create the saver in the freezing script:
variable_averages = tf.train.ExponentialMovingAverage(0.9997)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
For me it solved the problem.
Upvotes: 1
Reputation: 2614
I believe the problem is not related to freezing the model. Instead, it is related to the way you pre-process your image.
I recommend you to use the default pre-processing function in InceptionResnet V2.
Below, I will post a code that takes an image path (JPG or PNG) and returns a preprocessed images. You can modify it to make it receive a batch of images. It is not a professional code. It needs some optimization. However, it is working well.
First, Loading the image:
def load_img(path_img):
"""
Load an image to tensorflow
:param path_img: image path on the disk
:return: 3D tensorflow image
"""
filename_queue = tf.train.string_input_producer([path_img]) # list of files to read
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
my_img = tf.image.decode_image(value) # use png or jpg decoder based on your files.
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(1): # length of your filename list
image = my_img.eval() # here is your image Tensor :)
print(image.shape)
# Image.fromarray(np.asarray(image)).show()
coord.request_stop()
coord.join(threads)
return image
Then, the pre-processing code:
def preprocess(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would cropt the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details)
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
Finally, for my case, I had to convert the processed tensor into a numpy array:
image = tf.Session().run(image)
So, this image can be fed to the freezed model
persistent_sess = tf.Session(graph=graph) # , config=sess_config)
input_node = graph.get_tensor_by_name('prefix/batch:0')
output_node = graph.get_tensor_by_name('prefix/InceptionResnetV2/Logits/Predictions:0')
predictions = persistent_sess.run(output_node, feed_dict={input_node: [image]})
print(predictions)
label_predicted = np.argmax(predictions[0])
print(label_predicted)
Upvotes: 1