Leo
Leo

Reputation: 173

callback causes ValueError

The codes were working fine for the past months but somehow went wrong after something I have done but I cannot restore it.

def bi_LSTM_model(X_train, y_train, X_test, y_test, num_classes, loss,batch_size=68, units=128, learning_rate=0.005,epochs=20, dropout=0.2, recurrent_dropout=0.2):
    
    class myCallback(tf.keras.callbacks.Callback):
        def on_epoch_end(self, epoch, logs={}):
            if (logs.get('acc') > 0.90):
                print("\nReached 90% accuracy so cancelling training!")
                self.model.stop_training = True
                
    callbacks = myCallback()

    model = tf.keras.models.Sequential()
    model.add(Masking(mask_value=0.0, input_shape=(X_train.shape[1], X_train.shape[2])))
    model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout)))
    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss=loss,
                  optimizer=adamopt,
                  metrics=['accuracy'])

    history = model.fit(X_train, y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        validation_data=(X_test, y_test),
                        verbose=1,
                        callbacks=[callbacks])

    score, acc = model.evaluate(X_test, y_test,
                                batch_size=batch_size)

    yhat = model.predict(X_test)

    return history, yhat





def duo_bi_LSTM_model(X_train, y_train, X_test, y_test, num_classes, loss,batch_size=68, units=128, learning_rate=0.005,epochs=20, dropout=0.2, recurrent_dropout=0.2):
    
    class myCallback(tf.keras.callbacks.Callback):
        def on_epoch_end(self, epoch, logs={}):
            if (logs.get('acc') > 0.90):
                print("\nReached 90% accuracy so cancelling training!")
                self.model.stop_training = True
                     
    callbacks = myCallback()

        
    
    model = tf.keras.models.Sequential()
    model.add(Masking(mask_value=0.0, input_shape=(X_train.shape[1], X_train.shape[2])))
    model.add(Bidirectional(
        LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True)))
    model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout)))
    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss=loss,
                  optimizer=adamopt,
                  metrics=['accuracy'])

    history = model.fit(X_train, y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        validation_data=(X_test, y_test),
                        verbose=1,
                        callbacks=[callbacks])

    score, acc = model.evaluate(X_test, y_test,
                                batch_size=batch_size)

    yhat = model.predict(X_test)

    return history, yhat

Basically, I have defined two models and whenever the second one runs, the error comes up.

BTW, I use tf.keras.backend.clear_session() between the models.

ValueError: Tensor("Adam/bidirectional/forward_lstm/kernel/m:0", shape=(), dtype=resource) must be from the same graph as Tensor("bidirectional/forward_lstm/kernel:0", shape=(), dtype=resource).

The only modifications I ever done to the codes was that I tried to bring the callback class out of the two models, and put it before them, reducing the redundancy of the code.

Upvotes: 1

Views: 364

Answers (1)

Tinu
Tinu

Reputation: 2513

The problem is not the callback function. The error shows up because you pass the same optimizer to two different models, which is not possible since they are two different computational graphs.

Try to define the optimizer inside the function where you define the model before the model.compile() call.

Upvotes: 1

Related Questions