beginnerofdl
beginnerofdl

Reputation: 11

How to Call This Loss Function from Decode?

I want to ask. I'm building a model implement BiLSTM and Masked CRF. The BiLSTM I used based on BiLSTM from tensorflow keras, and masked CRF I used from this link. I have build this model as following: This is the Class MaskedCRF

import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow_addons.layers import crf

class MaskedCRF(tf.keras.layers.Layer):
def __init__(self, num_output, use_mask, label2idx_map, kwargs):
super(MaskedCRF, self).__init__(kwargs)
self.num_output = num_output
self.label2idx_map = label2idx_map

        self.mask_tran_matrix = None
        if use_mask:
            self.mask_tran_matrix = self.get_mask_trans()
    def decode(self, logits, label_ids, lengths):
      initializer = tf.keras.initializers.TruncatedNormal(stddev=0.02)
      trans = tf.Variable(
          initial_value=tf.random.normal(shape=[self.num_output, self.num_output], stddev=0.02),
          trainable=True,
          name="crf_transitions"
      )
    
      if self.mask_tran_matrix is not None:
          trans = tf.minimum(trans, self.mask_tran_matrix)
    
      log_likelihood, trans = tfa.text.crf_log_likelihood(
          inputs=logits,
          tag_indices=label_ids,
          sequence_lengths=lengths,
          transition_params=trans
      )
      print(trans)
      per_example_loss = -log_likelihood
      loss = tf.math.reduce_mean(per_example_loss)
      label_pred, score, _ = tfa.text.crf_decode(potentials=logits, transition_params=trans, sequence_length=lengths)
    
      return loss, per_example_loss, label_pred
    
    
    def get_mask_trans(self):
        size = len(self.label2idx_map)
        tag_lst = self.label2idx_map.keys()
    
        mask_mat = np.ones(shape=(size, size), dtype=np.float32)
        mask_tran_matrix = np.ones(shape=(size, size), dtype=np.float32)
    
        is_scheme_bioes = False
        flag_e = False
        flag_s = False
        for tag in tag_lst:
            if tag.startswith("E-"):
                flag_e = True
    
            if tag.startswith("S-"):
                flag_s = True
    
        if flag_e and flag_s:
            is_scheme_bioes = True
            print("Skema penandaan format BIOES terdeteksi.")
        else:
            print("Skema penandaan format BIO terdeteksi.")
    
        for col_tag, col_index in self.label2idx_map.items():
            if col_tag.startswith("I-"):
                slot_name = col_tag.replace("I-", "")
                begin_slot = "B-" + slot_name
                for row_tag, row_index in self.label2idx_map.items():
                    # Adjust your logic here based on your actual requirements
                    # Example: Set mask_tran_matrix based on conditions
                    if is_scheme_bioes:
                        # Adjust this condition based on your actual logic
                        if row_tag != begin_slot and row_tag != col_tag:
                            row_index = min(row_index, size - 1)  # Ensure row_index is within bounds
                            col_index = min(col_index, size - 1)  # Ensure col_index is within bounds
                            mask_tran_matrix[row_index, col_index] = -1.0
                    else:
                        # Adjust this condition based on your actual logic
                        if row_tag != col_tag:
                            row_index = min(row_index, size - 1)  # Ensure row_index is within bounds
                            col_index = min(col_index, size - 1)  # Ensure col_index is within bounds
                            mask_tran_matrix[row_index, col_index] = -1.0
    
        # Convert mask matrices to TensorFlow tensors
        mask_mat = tf.convert_to_tensor(mask_mat, dtype=tf.float32)
        mask_tran_matrix = tf.convert_to_tensor(mask_tran_matrix, dtype=tf.float32)
    
        # Element-wise minimum using TensorFlow operations
        result_mask = tf.minimum(mask_mat, mask_tran_matrix)
    
        # Print shapes for debugging
        print("mask_mat shape:", mask_mat.shape)
        print("mask_tran_matrix shape:", mask_tran_matrix.shape)
    
        return 100 * result_mask

The model of BiLSTM-Masked CRF that I have build as follow:


\`from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Embedding, LSTM, Dense, Dropout, TimeDistributed, Bidirectional

# Input layer

input_layer = Input(shape=(MAX_LEN,), dtype='int32')

# Embedding layer

embedding_layer = Embedding(input_dim=number_words+1,
input_length=MAX_LEN,
output_dim=DIM_EMBEDDINGS,trainable=True)(input_layer)

# BiLSTM layer

bilstm_layer = Bidirectional(LSTM(units=DIM_EMBEDDINGS,
return_sequences=True,
dropout=0.5,
recurrent_dropout=0.5))(embedding_layer)

lstm_layer = LSTM(units=DIM_EMBEDDINGS\*2,
return_sequences=True,
dropout=0.5,
recurrent_dropout=0.5)(bilstm_layer)

# TimeDistributed layer

# dense_layer = Dense(number_tags+1, activation="relu")(lstm_layer)

dense_layer = Dense(number_tags,activation='softmax')(lstm_layer)

masked_crf = MaskedCRF(num_output=number_tags, use_mask=True, label2idx_map=tag2idx)

# loss, per_example_loss, label_pred = masked_crf.decode(logits=dense_layer, label_ids=y, lengths=MAX_LEN)

output = masked_crf(dense_layer)
base_model = Model(input_layer, output)\`

I want to compile this model, as follow:


\`loss,_,_ = masked_crf.decode(logits=dense_layer, label_ids=y, lengths=MAX_LEN)
base_model.compile(
optimizer="adam",
loss=loss
)

base_model.summary()\`

But I face this problem on line loss function. The message of error :


TypeError                                 Traceback (most recent call last)
\<ipython-input-185-89009da12e28\> in \<cell line: 1\>()
\----\> 1 loss,_,_ = masked_crf.decode(logits=dense_layer, label_ids=y, lengths=MAX_LEN)
2 base_model.compile(
3     optimizer="adam",
4     loss=loss
5 )

5 frames
/usr/local/lib/python3.10/dist-packages/keras/src/utils/traceback_utils.py in error_handler(\*args, \*\*kwargs)
68             # To get the full stack trace, call:
69             # `tf.debugging.disable_traceback_filtering()`
\---\> 70             raise e.with_traceback(filtered_tb) from None
71         finally:
72             del filtered_tb

TypeError: Exception encountered when calling layer "tf.cond_19" (type TFOpLambda).

To be compatible with tf.function, Python functions must return zero or more Tensors or ExtensionTypes or None values; in compilation of \<function crf_sequence_score.\<locals\>.\_single_seq_fn at 0x7a396c1249d0\>, found return value of type KerasTensor, which is not a Tensor or ExtensionType.

Call arguments received by layer "tf.cond_19" (type TFOpLambda):
• pred=tf.Tensor(shape=(), dtype=bool)
• true_fn=\<function crf_sequence_score.\<locals\>.\_single_seq_fn at 0x7a396c1249d0\>
• false_fn=\<function crf_sequence_score.\<locals\>.\_multi_seq_fn at 0x7a396dac11b0\>
• name=None

May I have suggest to fix this problems?

Upvotes: 1

Views: 27

Answers (0)

Related Questions