Reputation: 49
I'm using tf2.0
and want to use tf.keras
and tf.data.dataset
to train mine network. However I'm struggling in using tf.keras.fit
with tf.data.dataset
with multi-output and with custom-loss function together.
Mine tensorflow version is tf2.0
and here is the example code that I tried but failed.
import tensorflow as tf
import numpy as np
# define model
inputs = tf.keras.Input((512,512,3), name='model_input')
x = tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same',kernel_initializer=tf.random_normal_initializer(stddev=0.01), name='conv1')(inputs)
x = tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same',kernel_initializer=tf.random_normal_initializer(stddev=0.01), name='conv2')(x)
output1 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same',kernel_initializer=tf.random_normal_initializer(stddev=0.01), name='output1')(x)
output2 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same',kernel_initializer=tf.random_normal_initializer(stddev=0.01), name='output2')(x)
model = tf.keras.Model(inputs, [output1, output2])
# define dataset
def parse_func(single_data): # just for example case
input = single_data
output1 = single_data
output2 = single_data
weight1 = output1
weight2 = output2
return input, output1, output2, weight1, weight2
def tf_parse_func(single_data):
return tf.py_function(parse_func, [single_data], [tf.float32, tf.flaot32, tf.float32, tf.flaot32, tf.float32])
data = np.random.rand(10, 512, 512, 3)
dataset = tf.data.Dataset.from_tensor_slices(data)
dataset = dataset.map(tf_parse_func, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(2, drop_remainder=True)
# def loss func
def loss_fn1(label, pred):
return tf.reduce_mean(tf.keras.losses.MSE(label, pred))
def loss_fn2(label, pred):
return tf.nn.l2_loss(label-pred)
# start training
model.compile(loss={'output1':loss_fn1, 'output2':loss_fn2},
loss_weights={'output1':1, 'output2':2},
optimizer=tf.keras.optimizers.Adam())
model.fit(dataset, epochs=5)
Actually, I want to pass
loss_weights={'output1': 1, 'output2': 2}
like this
loss_weights={'output1': weight1, 'output2': weight2}
,
but I didn't know how to do this. It's better to pass weight1/weight2
as loss function paramter but i didn't know how todo. I want loss_fn1
using output1, weight1
from dataset
, and loss_fn2
using output2, weight2
.
When I run above code I get an error like this:
2019-10-22 20:47:40.551618: W tensorflow/core/framework/cpu_allocator_impl.cc:81] Allocation of 62914560 exceeds 10% of system memory.
1/Unknown - 0s 28ms/stepTraceback (most recent call last):
File "tools/keras_train_test.py", line 65, in <module>
model.fit(dataset, epochs=5)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training.py", line 728, in fit
use_multiprocessing=use_multiprocessing)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training_v2.py", line 324, in fit
total_epochs=epochs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training_v2.py", line 123, in run_one_epoch
batch_outs = execution_function(iterator)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 86, in execution_function
distributed_function(input_fn))
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/def_function.py", line 457, in __call__
result = self._call(*args, **kwds)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/def_function.py", line 503, in _call
self._initialize(args, kwds, add_initializers_to=initializer_map)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/def_function.py", line 408, in _initialize
*args, **kwds))
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/function.py", line 1848, in _get_concrete_function_internal_garbage_collected
graph_function, _, _ = self._maybe_define_function(args, kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/function.py", line 2150, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/function.py", line 2041, in _create_graph_function
capture_by_value=self._capture_by_value),
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/func_graph.py", line 915, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/eager/def_function.py", line 358, in wrapped_fn
return weak_wrapped_fn().__wrapped__(*args, **kwds)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 66, in distributed_function
model, input_iterator, mode)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 112, in _prepare_feed_values
inputs, targets, sample_weights = _get_input_from_iterator(inputs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training_v2_utils.py", line 145, in _get_input_from_iterator
x, y, sample_weights = next_element
ValueError: too many values to unpack (expected 3)
I have tried many approaches and other ways that I have found I but can not make this work. So could someone can help me? Thanks a lot!
Upvotes: 0
Views: 1239
Reputation: 49
I can use model.fit_generator
instead of model.fit
to make it work. Here is the source code that runs successfully:
import tensorflow as tf
import numpy as np
# define model
inputs = tf.keras.Input((112, 112, 3), name='model_input')
x = tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same',
kernel_initializer=tf.random_normal_initializer(stddev=0.01), name='conv1')(inputs)
x = tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same',
kernel_initializer=tf.random_normal_initializer(stddev=0.01), name='conv2')(x)
output1 = tf.keras.layers.Conv2D(filters=3, kernel_size=3, padding='same',
kernel_initializer=tf.random_normal_initializer(stddev=0.01), name='output1')(x)
output2 = tf.keras.layers.Conv2D(filters=3, kernel_size=3, padding='same',
kernel_initializer=tf.random_normal_initializer(stddev=0.01), name='output2')(x)
model = tf.keras.Model(inputs, [output1, output2])
# define dataset
def parse_func(single_data): # just for example case
input = single_data
output1 = single_data
output2 = single_data
weight1 = output1
weight2 = output2
return input, output1, output2, weight1, weight2
def tf_parse_func(single_data):
input, output1, output2, weight1, weight2 = tf.py_function(parse_func, [single_data], [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32])
return input, output1, output2, weight1, weight2
data = np.random.rand(10, 112, 112, 3).astype(np.float32)
dataset = tf.data.Dataset.from_tensor_slices(data).repeat(-1)
dataset = dataset.map(tf_parse_func, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(2, drop_remainder=True)
def generator():
for input, output1, output2, weight1, weight2 in dataset:
output1 = tf.concat([output1, weight1], axis=-1)
output2 = tf.concat([output2, weight2], axis=-1)
yield input, [output1, output2]
# def loss func
def loss_fn1(label, pred):
weight = label[..., 3:]
label = label[..., :3]
return tf.reduce_mean(tf.keras.losses.MSE(label*weight, pred*weight))
def loss_fn2(label, pred):
weight = label[..., 3:]
label = label[..., :3]
return tf.nn.l2_loss(label*weight - pred*weight)
# start training
model.compile(loss={'output1': loss_fn1, 'output2': loss_fn2},
loss_weights={'output1': 1, 'output2': 2},
optimizer=tf.keras.optimizers.Adam())
# model.fit(dataset, epochs=5)
model.fit_generator(generator(), steps_per_epoch=10, epochs=5)
The output is:
Epoch 1/5
10/10 [==============================] - 7s 661ms/step - loss: 6814.9424 - output1_loss: 0.0673 - output2_loss: 3407.4375
Epoch 2/5
10/10 [==============================] - 7s 656ms/step - loss: 1858.2006 - output1_loss: 0.0669 - output2_loss: 929.0669
Epoch 3/5
10/10 [==============================] - 7s 658ms/step - loss: 1141.7914 - output1_loss: 0.0403 - output2_loss: 570.8755
Epoch 4/5
10/10 [==============================] - 7s 656ms/step - loss: 854.0343 - output1_loss: 0.0341 - output2_loss: 427.0001
Epoch 5/5
10/10 [==============================] - 7s 656ms/step - loss: 708.3558 - output1_loss: 0.0179 - output2_loss: 354.1689
I think this is a gimmick, and still wish anyone's any answers.
Upvotes: 1