isaaccs
isaaccs

Reputation: 103

Could not interpret regularizer identifier:

I'm trying to use Attention Layer on Tensorflow 2 but I got an error:


import tensorflow as tf
import numpy as np

from tensorflow.keras.callbacks import EarlyStopping,History 
import os
from src.helpers.initializer import Initializer


from tensorflow.keras import initializers 
from tensorflow.keras import regularizers
from tensorflow.keras import constraints

from tensorflow.keras import activations 
from tensorflow.keras import backend as K

from tensorflow.keras.layers import Layer

def dot_product(x, kernel):
    if K.backend() == 'tensorflow':
        return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)
    else:
        return K.dot(x, kernel)

class AttentionWithContext(tf.keras.layers.Layer):
    """
    # Input shape
        3D tensor with shape: `(samples, steps, features)`.
    # Output shape
        2D tensor with shape: `(samples, features)`.
    """
    def __init__(self,
                 W_regularizer=None, u_regularizer=None, b_regularizer=None,
                 W_constraint=None, u_constraint=None, b_constraint=None,
                 bias=True, **kwargs):

        self.supports_masking = True
        self.init = tf.keras.initializers.get('glorot_uniform')

        self.W_regularizer = tf.keras.regularizers.get(W_regularizer)
        self.u_regularizer = tf.keras.regularizers.get(u_regularizer)
        self.b_regularizer = tf.keras.regularizers.get(b_regularizer)

        self.W_constraint = tf.keras.constraints.get(W_constraint)
        self.u_constraint = tf.keras.constraints.get(u_constraint)
        self.b_constraint = tf.keras.constraints.get(b_constraint)

        self.bias = bias
        super(AttentionWithContext, self).__init__(**kwargs)

    def build(self, input_shape):
        assert len(input_shape) == 3

        self.W = self.add_weight(shape=(input_shape[-1], input_shape[-1],),
                                 initializer=self.init,
                                 name='{}_W'.format(self.name),
                                 regularizer=self.W_regularizer,
                                 constraint=self.W_constraint)
        if self.bias:
            self.b = self.add_weight(shape=(input_shape[-1],),
                                     initializer='zero',
                                     name='{}_b'.format(self.name),
                                     regularizer=self.b_regularizer,
                                     constraint=self.b_constraint)

        self.u = self.add_weight(shape=(input_shape[-1],),
                                 initializer=self.init,
                                 name='{}_u'.format(self.name),
                                 regularizer=self.u_regularizer,
                                 constraint=self.u_constraint)

        super(AttentionWithContext, self).build(input_shape)

    def compute_mask(self, input, input_mask=None):

        return None

    def call(self, x, mask=None):
        uit = dot_product(x, self.W)

        if self.bias:
            uit += self.b

        uit = K.tanh(uit)
        ait = dot_product(uit, self.u)

        a = K.exp(ait)


        if mask is not None:

            a *= K.cast(mask, K.floatx())


        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = x * a
        return K.sum(weighted_input, axis=1)

    def compute_output_shape(self, input_shape):
        return input_shape[0], input_shape[-1]

inputs = tf.keras.Input(shape=(windows_size, 1))
x = tf.keras.layers.LSTM(64, dropout=0.1, return_sequences=True, activation='relu')(inputs)
x = tf.keras.layers.LSTM(64, dropout=0.1, return_sequences=True, activation='relu')(x)
x=AttentionWithContext()(x)
o = tf.keras.layers.Dense(output_size, activation='softmax')(x)

model = tf.keras.Model(inputs=inputs, outputs=o)
optim = tf.keras.optimizers.Adam()
model.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])



~/anomaly_detection_benchmark/src/models/TF_Deeplog.py in init(self, W_regularizer, >u_regularizer, b_regularizer, W_constraint, u_constraint, b_constraint, bias, **kwargs) 38 self.init = tf.keras.initializers.get('glorot_uniform') 39 ---> 40 self.W_regularizer = tf.keras.regularizers.get(W_regularizer) 41 self.u_regularizer = tf.keras.regularizers.get(u_regularizer) 42 self.b_regularizer = tf.keras.regularizers.get(b_regularizer)

/opt/anaconda3/envs/logpai/lib/python3.7/site-packages/tensorflow/python/keras/regularizers.py >in get(identifier) 371 else: 372 raise ValueError( --> 373 'Could not interpret regularizer identifier: {}'.format(identifier))

ValueError: Could not interpret regularizer identifier: Tensor("lstm_1/PartitionedCall:1", >shape=(None, 10, 64), dtype=float32)

Upvotes: 1

Views: 1300

Answers (1)

runDOSrun
runDOSrun

Reputation: 11005

You're using x=AttentionWithContext(x)() instead of x=AttentionWithContext()(x) which passes the LSTM to the first regularizer (hence the error message). The first parenthesis' parameters are always for the parameters, the second parenthesis' parameters are the input for the call function of the layer:

x = AttentionWithContext(W_regularizer=None)(x=x)

Upvotes: 2

Related Questions