Reputation: 2601
all the follow question are based on tensorflow 1.0 API
I am now be able to write images under directory which named by class name, and this is I generate tfrecords codes:
def _convert_to_example(filename, image_buffer, label, text, height, width):
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
this is the main method, so here I stored height, widht, channels(this value con't readout) etc.
And I am able read tfrecords out, this is my code:
def read_tfrecords():
print('reading from tfrecords file {}'.format(FLAGS.record_file))
record_iterator = tf.python_io.tf_record_iterator(path=FLAGS.record_file)
with tf.Session() as sess:
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
height_ = int(example.features.feature['image/height'].int64_list.value[0])
width_ = int(example.features.feature['image/width'].int64_list.value[0])
channels_ = int(example.features.feature['image/channels'].int64_list.value[0])
image_bytes_ = example.features.feature['image/encoded'].bytes_list.value[0]
label_ = int(example.features.feature['image/class/label'].int64_list.value[0])
text_bytes_ = example.features.feature['image/class/text'].bytes_list.value[0]
# image_array_ = np.fromstring(image_bytes_, dtype=np.uint8).reshape((height_, width_, 3))
image_ = tf.image.decode_jpeg(image_bytes_)
image_ = sess.run(image_)
text_ = text_bytes_.decode('utf-8')
print('tfrecords height {0}, width {1}, channels {2}: '.format(height_, width_, channels_))
print('decode image shape: ', image_.shape)
print('label text: ', text_)
print('label: ', label_)
# io.imshow(image_)
# plt.show()
All goes fair, however, the problem occurs when I try load tfrecords data into batches and feed it into network
Here is all the code I load batches:
tf.app.flags.DEFINE_integer('target_image_height', 150, 'train input image height')
tf.app.flags.DEFINE_integer('target_image_width', 200, 'train input image width')
tf.app.flags.DEFINE_integer('batch_size', 12, 'batch size of training.')
tf.app.flags.DEFINE_integer('num_epochs', 100, 'epochs of training.')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'learning rate of training.')
FLAGS = tf.app.flags.FLAGS
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized=serialized_example,
features={
'image/height': tf.FixedLenFeature([], tf.int64),
'image/width': tf.FixedLenFeature([], tf.int64),
'image/channels': tf.FixedLenFeature([], tf.int64),
'image/encoded': tf.FixedLenFeature([], tf.string),
'image/class/label': tf.FixedLenFeature([], tf.int64),
})
image = tf.decode_raw(features['image/encoded'], out_type=tf.uint8)
height = tf.cast(features['image/height'], dtype=tf.int32)
width = tf.cast(features['image/width'], dtype=tf.int32)
channels = tf.cast(features['image/channels'], dtype=tf.int32)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
# cast image int64 to float32 [0, 255] -> [-0.5, 0.5]
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
image_shape = tf.stack([height, width, 3])
image = tf.reshape(image, image_shape)
return image, label
def inputs(train, batch_size, num_epochs):
if not num_epochs:
num_epochs = None
filenames = ['./data/tiny_5_tfrecords/train-00000-of-00002',
'./data/tiny_5_tfrecords/train-00001-of-00002']
print(filenames)
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(filenames, num_epochs=num_epochs)
print(filename_queue)
image, label = read_and_decode(filename_queue)
images, sparse_labels = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=2,
capacity=1000 + 3 * batch_size,
min_after_dequeue=1000)
return images, sparse_labels
def run_training():
images, labels = inputs(train=True, batch_size=FLAGS.batch_size,
num_epochs=FLAGS.num_epochs)
images = tf.Print(images, [images], message='this is images:')
images.eval()
predictions = inference.lenet(images=images, num_classes=5, activation_fn='relu')
slim.losses.softmax_cross_entropy(predictions, labels)
total_loss = slim.losses.get_total_loss()
tf.summary.scalar('loss', total_loss)
optimizer = tf.train.RMSPropOptimizer(0.001, 0.9)
train_op = slim.learning.create_train_op(total_loss=total_loss,
optimizer=optimizer,
summarize_gradients=True)
slim.learning.train(train_op=train_op, save_summaries_secs=20)
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
I run this program, got this error:
raceback (most recent call last):
File "train_tiny5_tensorflow.py", line 111, in <module>
tf.app.run()
File "/usr/local/lib/python3.6/site-packages/tensorflow/python/platform/app.py", line 44, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "train_tiny5_tensorflow.py", line 107, in main
run_training()
File "train_tiny5_tensorflow.py", line 88, in run_training
num_epochs=FLAGS.num_epochs)
File "train_tiny5_tensorflow.py", line 81, in inputs
min_after_dequeue=1000)
File "/usr/local/lib/python3.6/site-packages/tensorflow/python/training/input.py", line 1165, in shuffle_batch
name=name)
File "/usr/local/lib/python3.6/site-packages/tensorflow/python/training/input.py", line 724, in _shuffle_batch
dtypes=types, shapes=shapes, shared_name=shared_name)
File "/usr/local/lib/python3.6/site-packages/tensorflow/python/ops/data_flow_ops.py", line 624, in __init__
shapes = _as_shape_list(shapes, dtypes)
File "/usr/local/lib/python3.6/site-packages/tensorflow/python/ops/data_flow_ops.py", line 77, in _as_shape_list
raise ValueError("All shapes must be fully defined: %s" % shapes)
ValueError: All shapes must be fully defined: [TensorShape([Dimension(None), Dimension(None), Dimension(3)]), TensorShape([])]
apparently, program didn't got tfrecords file at all.
I have tried this:
1. I thought it maybe filenames
not right, I change it into both relative path and absolute path, either works;
2. I places tfrecords file just beside script, and write tfrecords file name directly didn't work.
So, basicly, I got this problem:
1. What's the officially and reasonable way to write a as short as possiable program load tfrecords file into batches and feed into network
2. BTW, what's the simplest and elegantest way to write tensorflow layer? slim is a good choice, original way is ugly and complicated!
Upvotes: 2
Views: 3033
Reputation: 494
I don't know if your own answer implies what I'm writing here since I don't fully understand your own answer. However, the issue causing the ValueError is that shuffle_batch
needs the image sizes to all be the same and known. Thus what would fix the issue is in the function read_and_decode
, simply make image_shape
have the same values for all images, for example:
image_shape = tf.stack([FLAGS.target_image_height, FLAGS.target_image_width, 3])
and then using:
image = tf.reshape(image, image_shape)
to make all images have the same size.
Upvotes: 1
Reputation: 2601
For anyone maybe occured same questions, I made some mistakes in above code.
Simply do not using decode_raw
, using tf.image.decode_jpeg
instead, and my code function
def inputs(train, batch_size, num_epochs):
if not num_epochs:
num_epochs = None
filenames = ['./data/tiny_5_tfrecords/train-00000-of-00002',
'./data/tiny_5_tfrecords/train-00001-of-00002']
print(filenames)
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(filenames, num_epochs=num_epochs)
print(filename_queue)
image, label = read_and_decode(filename_queue)
images, sparse_labels = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=2,
capacity=1000 + 3 * batch_size,
min_after_dequeue=1000)
return images, sparse_labels
I missed a tab for the last 2 lines.
Upvotes: 2