Chandra
Chandra

Reputation: 37

Why am I getting the error "ValueError: No gradients provided for any variable:" while using train_step() in keras?

I'm facing trouble with tensorFlow.keras. While executing the following code

class Whole_model(tf.keras.Model):
    def __init__(self, EEG_gen_model, emg_feature_extractor, eeg_feature_extractor, seq2seq_model):
        super(Whole_model, self).__init__()
        self.EEG_gen_model= EEG_gen_model
        self.emg_feature_extractor= emg_feature_extractor
        self.eeg_feature_extractor= eeg_feature_extractor
        self.seq2seq_model=seq2seq_model

   def compile(self, EEG_gen_optimizer, emg_feature_optim, eeg_feature_optim, seq2seq_optim, EEG_gen_loss, seq2seq_loss_fn, gen_mae, accuracy):
      super(Whole_model, self).compile()
      self.EEG_gen_optimizer = EEG_gen_optimizer
      self.emg_feature_optim=emg_feature_optim
      self.eeg_feature_optim=eeg_feature_optim
      self.seq2seq_optim=seq2seq_optim
      self.EEG_gen_loss = EEG_gen_loss
      self.seq2seq_loss_fn=seq2seq_loss_fn
      self.gen_mae=gen_mae
      self.accuracy=accuracy
      #we can use diffrent optimizer for each model

  def train_step(self, data):
      no_Epochs=3
      x_train, [y_train_eeg, y]= data
      y = tf.reshape(y, [-1, no_Epochs , 5])
      n_samples_per_epoch=x_train.shape[1]
      print(n_samples_per_epoch)
      emg_input=tf.reshape(x_train, [-1, n_samples_per_epoch, 1])
      y_eeg_true= tf.reshape(y_train_eeg, [-1, n_samples_per_epoch, 1])
      print(emg_input.shape, y_eeg_true.shape)

      #tf.argmax(pred_classes,1)
      # Train the EEG generator
      with tf.GradientTape() as tape:
          EEG_Gen= self.EEG_gen_model(emg_input)
          print(EEG_Gen.shape, y_eeg_true.shape)
          gen_model_loss= self.EEG_gen_loss(y_eeg_true, EEG_Gen)
          gen_MAE= self.gen_mae(y_eeg_true, EEG_Gen)
          grads = tape.gradient(gen_model_loss, self.EEG_gen_model.trainable_weights)
          self.EEG_gen_optimizer.apply_gradients(zip(grads, self.EEG_gen_model.trainable_weights))

          #SEQ2SEQ 
          emg_inp = x_train
          eeg_inp = self.EEG_gen_model(emg_inp)
          emg_enc_seq=self.emg_feature_extractor(emg_inp)
          eeg_enc_seq=self.eeg_feature_extractor(eeg_inp)
    
          len_epoch=input_layer.shape[1] 
          inputs=tf.reshape(input_layer, [-1, no_Epochs ,len_epoch]) 
    
          # Train the discriminator
         with tf.GradientTape() as tape:
             outputs=self.seq2seq_model(inputs)
             seq2seq_loss= self.seq2seq_loss_fn(y, outputs)
             print('loss',  seq2seq_loss)
             accuracy=self.accuracy(y, outputs)
        
        
      grads = tape.gradient(seq2seq_loss, self.seq2seq_model.trainable_weights)
     self.seq2seq_optim.apply_gradients(zip(grads, self.seq2seq_model.trainable_weights))

      #fEATURE EXTRACTOR
      emg_inp = x_train
      eeg_inp = self.EEG_gen_model(emg_inp)
      eeg_enc_seq=self.emg_feature_extractor(emg_inp)
   
      with tf.GradientTape() as tape:
          eeg_enc_seq=self.eeg_feature_extractor(eeg_inp)
          len_epoch=input_layer.shape[1] 
          inputs=tf.reshape(input_layer, [-1, no_Epochs ,len_epoch])
          outputs=self.seq2seq_model(inputs)
          seq2seq_loss= self.seq2seq_loss_fn(y, outputs)
          print('loss',  seq2seq_loss)
     grads = tape.gradient(seq2seq_loss, self.eeg_feature_extractor.trainable_weights)
    
     self.eeg_feature_optim.apply_gradients(zip(grads, self.eeg_feature_extractor.trainable_weights))     



       emg_inp = x_train
       eeg_inp = self.EEG_gen_model(emg_inp)
       with tf.GradientTape() as tape:
            eeg_enc_seq=self.emg_feature_extractor(emg_inp)
            eeg_enc_seq=self.eeg_feature_extractor(eeg_inp)
            len_epoch=input_layer.shape[1] 
            inputs=tf.reshape(input_layer, [-1, no_Epochs ,len_epoch])
            outputs=self.seq2seq_model(inputs)
            seq2seq_loss= self.seq2seq_loss_fn(y, outputs)
            print('loss',  seq2seq_loss)
            accuracy=self.accuracy(y, outputs)
      grads = tape.gradient(seq2seq_loss, self.emg_feature_extractor.trainable_weights)
     print('check', outputs.shape, y.shape, grads)
     self.emg_feature_optim.apply_gradients(zip(grads, self.emg_feature_extractor.trainable_weights))     
    

     return {"seq2seq_loss": seq2seq_loss, 'gen_model_loss':gen_model_loss, "gen_MAE": gen_MAE, 
                      "accuracy": accuracy}

  def test_step(self, data):
      x_emg, y = data
      no_Epochs=3
      y = tf.reshape(y, [-1, no_Epochs , 5])
      emg_inp = tf.keras.layers.Input(3000, 1)
      eeg_inp = self.EEG_gen_model(emg_inp)
      emg_enc_seq=self.emg_feature_extractor(emg_inp)
      eeg_enc_seq=self.eeg_feature_extractor(eeg_inp)
      len_epoch=input_layer.shape[1] 
      inputs=tf.reshape(input_layer, [-1, no_Epochs ,len_epoch]) 
      outputs=self.seq2seq_model(inputs)
      sleep_classifier_model=tf.keras.Model(inputs=emg_inp, outputs=outputs)

      y_pred=sleep_classifier_model(x_emg,  training=False)# Forward pass
      # Compute our own loss
      loss = self.seq2seq_loss_fn(y, y_pred, 
      regularization_losses=self.seq2seq_loss_fn)
      accuracy=accuracy(y, y_pred)

      return {"seq2seq_loss": seq2seq_loss, "accuracy": accuracy}

 

    model = Whole_model( EEG_gen_model=EEG_gen_model, emg_feature_extractor=emg_feature_extractor, 
                eeg_feature_extractor=eeg_feature_extractor, seq2seq_model=seq2seq_model)
model.compile(
EEG_gen_optimizer=tf.optimizers.Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False),
                                     
emg_feature_optim=tf.optimizers.Adam(lr=1e-3,  beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False),
                                     
eeg_feature_optim=tf.optimizers.Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False),
                                     
seq2seq_optim=tf.optimizers.Adam(lr=1e-3,  beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False),
seq2seq_loss_fn=tf.keras.losses.CategoricalCrossentropy(),
EEG_gen_loss=tf.keras.losses.MSE, 
gen_mae=tf.keras.losses.MAE,                                   
accuracy=tf.keras.metrics.Accuracy())
model.fit(x_train_emg, [x_train_eeg, y_train],  batch_size=3, epochs=1,  validation_split=None, 
                                              validation_data=(x_test_emg, y_test), shuffle=False)

After executing this code, I am getting the following error

ValueError: No gradients provided for any variable: ['conv1d_8/kernel:0', 'conv1d_8/bias:0', 'conv1d_12/kernel:0', 'conv1d_12/bias:0', 'conv1d_9/kernel:0', 'conv1d_9/bias:0', 'conv1d_13/kernel:0', 'conv1d_13/bias:0', 'conv1d_10/kernel:0', 'conv1d_10/bias:0', 'conv1d_14/kernel:0', 'conv1d_14/bias:0', 'conv1d_11/kernel:0', 'conv1d_11/bias:0', 'conv1d_15/kernel:0', 'conv1d_15/bias:0'].

How to fix it? Please help me. Thank you in advance.

Thanks, Andrey. But I have tried by defining all submodules like self.EEG_gen_model as tf.keras.layers.Layers which has a call() method as follows:

    class EEG_gen_layer(tf.keras.layers.Layer):
         def __init__(self):
            super(EEG_gen_layer, self).__init__()
            n_samples_per_epoch=3000
            print(n_samples_per_epoch)
            inputs=tf.keras.layers.Input(batch_shape=[None, 
                                      n_samples_per_epoch, 1], name="input")    
            lstm1=tf.keras.layers.Bidirectional(tf.keras.layers.LSTM( 
                       units=128, return_sequences=True), 
                        merge_mode='concat')(inputs)
            lstm2=tf.keras.layers.Bidirectional(tf.keras.layers.LSTM( 
            units=128, return_sequences=True), merge_mode='concat')(lstm1)
            dens1=tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(32, 
            kernel_initializer=tf.keras.initializers.glorot_normal(), 
            activation=tf.nn.relu))(lstm2)
            dens2=tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(1, 
            kernel_initializer=tf.keras.initializers.glorot_normal(), 
              activation=None))(dens1)
            self.EEG_gen_model=tf.keras.Model(inputs=inputs, outputs=dens2)

        def call(self, inp_emg, training=False):
            x = self.EEG_gen_model(inp_emg)
            return x

Like that, I defined all submodule of the model as tf.keras.layers.Layers. But again I am getting the same error.

Please help me in fixing this error.

Upvotes: 0

Views: 99

Answers (1)

Andrey
Andrey

Reputation: 6367

You are calling your model in train_step:

eeg_inp = self.EEG_gen_model(emg_inp)

But your model has no call() method.

Upvotes: 1

Related Questions