David
David

Reputation: 21

TFWhisperForConditionalGeneration model.generate() returns repetitions of first word in sequence after finetuning

I fine-tuned TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") on the German version of mozilla-foundation/common_voice_11_0. The training process looks fine (validation loss decreases asymptotically and WER drops) but when doing inference with the fine-tuned model, the output just consists of repeating words:

['<|startoftranscript|><|de|><|transcribe|><|notimestamps|> der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der der']

When executing inference on the pretrained-only version of whisper-tiny (TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")) , I get a more or less proper transcription. Also when passing decoder_input_ids and doing inference by calling the model's forward method model(input_features_tensor,decoder_input_ids=input_ids_tensor, training=False), the transcription works. But once I load my finetuned weights via model.load_weights(), the model shows above repetitions.

It seems like the autoregressive behavior in the decoder does not work properly anymore after passing finetuned weights to the model since it always repeats the first word of the sequence.

I'm wondering if my way of finetuning (especially the DataCollator part where I pass the decoder_input_ids) is correct and why model.generate() in the inference part shows this behavior?

Here is is an excerpt of my training code:

requirements: Python 3.10.13 tensorflow 2.9.1 transformers 4.37.2

import json
import os
from typing import Union
from tqdm.auto import tqdm
from pathlib import Path
from datasets import load_from_disk
from transformers import (
    WhisperProcessor,
    WhisperFeatureExtractor,
    WhisperTokenizerFast,
    TFWhisperForConditionalGeneration,
    WhisperConfig
)
from whisper_normalizer.basic import BasicTextNormalizer
import tensorflow as tf
import evaluate
from fire import Fire
import wandb
from wandb.integration.keras import WandbMetricsLogger

class DataCollator:
    def __init__(self, processor: WhisperProcessor):
        self.processor = processor

    def __call__(self, features):
        # convert list of examples to batch
        label_features = [{"input_ids": feature["labels"]} for feature in features]
        
        # pad the labels
        labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="tf")
        labels = labels_batch.input_ids

        # remove the BOS token if necessary
        first_tokens_equal_bos = tf.reduce_all(tf.equal(labels[:, 0],      self.processor.tokenizer.bos_token_id))
        first_tokens_equal_bos = first_tokens_equal_bos.numpy()
        if first_tokens_equal_bos:
            labels = labels[:, 1:]

        # process input features similarly
        input_features = [{"input_features": feature["input_features"]} for feature in features]
        batch = self.processor.feature_extractor.pad(input_features, return_tensors="tf")

        # add processed labels and decoder input ids to the batch
        batch["labels"] = labels
        batch["decoder_input_ids"] = labels
        batch["decoder_attention_mask"] = labels_batch.attention_mask

        return batch

def train(language: str, data_path: str, load_from_checkpoint: bool = False, num_proc: int = None):

    model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")
        # should be learned by the model
        model.config.forced_decoder_ids = None
        model.config.suppress_tokens = []
        model.config.use_cache = False

    feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-tiny")
    tokenizer = WhisperTokenizerFast.from_pretrained(
        "openai/whisper-tiny", language=language_whisper, task="transcribe"
    )

    dataset = load_from_disk(data_path)

    data_collator = DataCollator(processor)
    tf_ds_train = dataset["train"].to_tf_dataset(
            columns=["input_features", 'decoder_input_ids', "decoder_attention_mask"],
            label_cols=["labels"],
            shuffle=True,
            batch_size=BATCH_SIZE,
            collate_fn=data_collator,
            )

    tf_ds_val = dataset["validation"].to_tf_dataset(
            columns=["input_features",'decoder_input_ids', "decoder_attention_mask"],
            label_cols=["labels"],
            shuffle=False,
            batch_size=BATCH_SIZE,
            collate_fn=data_collator,
            )
    
    tf_ds_test = dataset["test"].to_tf_dataset(
            columns=["input_features",'decoder_input_ids', "decoder_attention_mask"],
            label_cols=["labels"],
            shuffle=False,
            batch_size=BATCH_SIZE,
            collate_fn=data_collator,
            )

    learning_rate = 6.25e-6
    warmup_steps = 50

    optimizer = tf.keras.optimizers.legacy.Adam(
                                        learning_rate=learning_rate,
                                        decay=0.01,
                                          ) 
    
    loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)


    model.compile(
        optimizer=optimizer, # add learning rate and decay
        loss=loss
        )

    print("Finetuning...")
    history = model.fit(
        tf_ds_train,
        validation_data=tf_ds_val,
        epochs=EPOCHS
    )

My inference code looks as follows:

import json
import os
from transformers import (
    WhisperProcessor,
    WhisperFeatureExtractor,
    WhisperTokenizer,
    TFWhisperForConditionalGeneration,
    WhisperConfig
)
import tensorflow as tf
from datasets import load_from_disk

LANGUAGE = "de"
SAVE_DIR = f"./tf-whisper-tiny-{LANGUAGE}"

processor = WhisperProcessor.from_pretrained(
    "openai/whisper-tiny", language="german", task="transcribe"
)

forced_decoder_ids = processor.get_decoder_prompt_ids(language=LANGUAGE, task="transcribe", no_timestamps=False)

print(f"Load model weights and config from {SAVE_DIR}")

with open(os.path.join(SAVE_DIR, "config.json"), "r") as json_file:
    config_dict = json.load(json_file)
model_config = WhisperConfig.from_dict(config_dict)
model = TFWhisperForConditionalGeneration(model_config).from_pretrained("openai/whisper-tiny")
model.load_weights(os.path.join(SAVE_DIR, "model_weights.h5"))

model.generation_config.forced_decoder_ids = forced_decoder_ids
model.config.forced_decoder_ids = forced_decoder_ids


feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-tiny")
tokenizer = WhisperTokenizer.from_pretrained(
    "openai/whisper-tiny", language="german", task="transcribe"
)

dataset = load_from_disk("common-voice-de-preprocessed-debug")
input_features =  dataset["test"][0]["input_features"]
input_ids = dataset["test"][0]["labels"]

input_features_tensor = tf.convert_to_tensor(input_features, dtype=tf.float32)
input_features_tensor = tf.expand_dims(input_features_tensor, axis=0)

input_ids_tensor = tf.convert_to_tensor([processor.tokenizer.bos_token_id], dtype=tf.int32)
input_ids_tensor = tf.expand_dims(input_ids_tensor, axis=0)

generated_ids = model.generate(inputs=input_features_tensor,
                               max_new_tokens=150,
                               )
print(generated_ids)

transcription = tokenizer.batch_decode(generated_ids,skip_special_tokens=False)
print(transcription)

Upvotes: 0

Views: 38

Answers (0)

Related Questions