Reputation: 426
I'm storing a variable numbers of jpg frames per example into TFRecord using SequenceExample:
tf.compat.as_bytes(cv2.imencode(".jpg", frame)[1].tobytes()))
Then I parse these frames back by using:
images = tf.map_fn(lambda x: tf.image.decode_jpeg(x, channels=3), sequence_features['frames'], dtype=tf.uint8)
But the image values are somehow shifted:
When I just parse the raw byte string and then decode it with opencv later, the picture looks normal:
for img in images:
img = np.frombuffer(img, dtype=np.uint8)
img = cv2.imdecode(img, 1)
More complete example:
def write(videos, tfr_path):
with tf.python_io.TFRecordWriter(tfr_path) as writer:
for video in videos:
label = get_label()
frames = []
for frame in video:
frames.append(tf.compat.as_bytes(cv2.imencode(".jpg", prec_img)[1].tobytes()))
feature_list = {
'label': (_float_list_feature_list(label),),
'frames': _bytes_feature_list(encoded_frames)
}
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
example = tf.train.SequenceExample(feature_lists=feature_lists, context=None)
writer.write(example.SerializeToString())
def _parse_tfr_data(example, size):
sequence_features = {
'label': tf.FixedLenSequenceFeature([size], dtype=tf.float32),
'frames': tf.FixedLenSequenceFeature([], dtype=tf.string)
}
features, sequence_features = tf.parse_single_sequence_example(example, context_features=None,
sequence_features=sequence_features)
images = tf.map_fn(lambda x: tf.image.decode_jpeg(x, channels=3), sequence_features['frames'], dtype=tf.uint8)
label = sequence_features['label']
return images, label
Upvotes: 1
Views: 1171
Reputation: 426
Thanks to Dan Mašek! tf.image.decode_jpeg uses RGB cv2.imencode BGR, so swapping it beforehand worked.
Upvotes: 1