Reputation: 498
I have trained a keras model with a custom layer. The model trains perfectly fine and gets stored. But when I am trying to load the model, I am unable to load it. Below is the code from the custom class:
from keras.engine.base_layer import Layer
class AttentionLayer(Layer):
def __init__(self, attention_dim, **kwargs):
super(AttentionLayer, self).__init__(name="attention_layer")
self.init = initializers.get("normal")
self.supports_masking = True
self.attention_dim = attention_dim
super(AttentionLayer, self).__init__(**kwargs)
def get_config(self):
config = {
"init": self.init,
"supports_masking": self.supports_masking,
"attention_dim": self.attention_dim,
}
base_config = super(AttentionLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
I then try to load the model in the following way:
model = keras.models.load_model("model.h5",
custom_objects={"AttentionLayer": AttentionLayer})
But I keep getting
ValueError: Unknown config_item: RandomNormal. Please ensure this object is passed to the 'custom_objects' argument.
Almost all the questions that I referred to on StackOverflow suggested the same, but unfortunately, it is not working in my case. Could someone point out if I am making any mistake?
The link to my colab is here.
Upvotes: 1
Views: 1208
Reputation: 17239
I've tried to resolve your issue. But before that here's one thing I want you to look at
preds = Dense(2, activation="softmax")(l_att_sent)
model = Model(review_input, preds)
model.compile(loss="binary_crossentropy",
optimizer="rmsprop", metrics=["accuracy"])
If you set (..2, activations='softmax')
, normally you should use categorical_cross_entropy
and corresponding metrics (above metric is ok as you have used string identifier). But I saw that you've used binary_crossentropy
as loss function, so I assumed that you probably need as follows in your last layer: (..1, activations='sigmoid')
. Here is some reference for that: a). Selecting loss and metrics for the Tensorflow model. b). Neural Network and Binary classification Guidance.
In your code, I think the problem comes by using the "init": self.init,
in get_config
method; you don't need to anyway.
from tensorflow.keras import initializers
self.init = initializers.get("normal")
For future reference, here is the working code end-to-end.
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras import initializers
from tensorflow.keras import layers
from tensorflow.keras.layers import (Embedding, Dense, Input, GRU,
Bidirectional, TimeDistributed)
from tensorflow.keras.models import Model
class AttentionLayer(layers.Layer):
def __init__(self, attention_dim, supports_masking=True, **kwargs):
super(AttentionLayer, self).__init__(name="attention_layer")
self.init = initializers.get("normal")
self.supports_masking = supports_masking
self.attention_dim = attention_dim
super(AttentionLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = K.variable(self.init((input_shape[-1], self.attention_dim)), name="W")
self.b = K.variable(self.init((self.attention_dim, )), name="b")
self.u = K.variable(self.init((self.attention_dim, 1)), name="u")
self._trainable_weights = [self.W, self.b, self.u]
super(AttentionLayer, self).build(input_shape)
def call(self, x, mask=None):
uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b))
ait = K.dot(uit, self.u)
ait = K.squeeze(ait, -1)
ait = K.exp(ait)
if mask is not None:
ait *= K.cast(mask, K.floatx())
ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())
ait = K.expand_dims(ait)
weighted_input = x * ait
output = K.sum(weighted_input, axis=1)
return output
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
def get_config(self):
config = {
"supports_masking": self.supports_masking,
"attention_dim": self.attention_dim,
}
base_config = super(AttentionLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
MAX_SENTENCE_LEN = 10
MAX_SENTENCES = 15
MAX_NUM_WORDS = 200
EMBEDDING_DIM = 10
VALIDATION_SPLIT = 0.2
# we use Embedding layer to convert positive integers into dense vectors of
# fixed size
embedding_layer = Embedding(
100,
EMBEDDING_DIM,
input_length=MAX_SENTENCE_LEN,
trainable=True,
mask_zero=True
)
sentence_input = Input(shape=(MAX_SENTENCE_LEN, ), dtype="int32")
embedded_sequences = embedding_layer(sentence_input)
l_lstm = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences)
l_att = AttentionLayer(100)(l_lstm)
sentEncoder = Model(sentence_input, l_att)
sentEncoder.summary() # OK
review_input = Input(shape=(MAX_SENTENCES, MAX_SENTENCE_LEN), dtype="int32")
review_encoder = TimeDistributed(sentEncoder)(review_input)
l_lstm_sent = Bidirectional(GRU(100, return_sequences=True))(review_encoder)
l_att_sent = AttentionLayer(100)(l_lstm_sent)
preds = Dense(1, activation="sigmoid")(l_att_sent)
model = Model(review_input, preds)
model.summary() # OK
DummyData
import tensorflow as tf
import numpy as np
x_train = np.random.randint(0, 10, (100,15,10)); print(x_train.shape)
y_train = np.random.randint(2, size=(100, 1)); print(y_train.shape)
(100, 15, 10)
(100, 1)
Train
filepath = "model.h5"
model.compile(loss="binary_crossentropy", optimizer="rmsprop",
metrics=["accuracy"])
model.fit(x_train, y_train, epochs=2, verbose=2)
model.save(filepath)
Epoch 1/2
142ms/step - loss: 0.6964 - accuracy: 0.4100
Epoch 2/2
144ms/step - loss: 0.5919 - accuracy: 0.5500
Reload and Check
from tensorflow.keras.models import load_model
new_model = load_model(filepath,
custom_objects={"AttentionLayer": AttentionLayer})
# Let's check:
np.testing.assert_allclose(
model.predict(x_train), new_model.predict(x_train)
) # OK
Upvotes: 2