Dawood Ahmad
Dawood Ahmad

Reputation: 3

CGAN Training Issues: Discriminator Accuracy at 100% and Generator Loss at 0

I am trying to train a Conditional Generative Adversarial Network (CGAN) to generate synthetic leaf images. However, during training, my discriminator's accuracy quickly reaches 100%, and the generator's loss drops to 0. I am using TensorFlow and Keras for this implementation. My goal is to understand why this is happening and how to address it to ensure proper training of both the generator and the discriminator. Here is the relevant code for my CGAN model and the training loop.

Dataset Details I am using a Leaf image datasets of image dimensions 256x256, across 11 classes.

Model Code:

import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Embedding, LeakyReLU, Reshape, Concatenate, Conv2D, Conv2DTranspose, Flatten, Dropout, BatchNormalization
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers.schedules import ExponentialDecay

def build_discriminator():
    input_image = Input(shape=(256, 256, 3), name='Input')
    embedding_input = Input(shape=(1,), name='Embedding_Input')
    embedding = Embedding(input_dim=11, output_dim=3, input_length=1, name='Embedding')(embedding_input)
    dense_embedding = Dense(256*256*3, activation='relu', name='Dense')(embedding)
    reshaped_embedding = Reshape((256, 256, 3), name='Reshape')(dense_embedding)
    concatenated = Concatenate(name='Concatenate')([input_image, reshaped_embedding])
    leaky_relu = LeakyReLU(alpha=0.2, name='Leaky_ReLU')(concatenated)
    
    conv1 = Conv2D(32, (3, 3), strides=(2, 2), padding='same', name='Conv2D_1')(leaky_relu)
    bn1 = BatchNormalization(name='BatchNorm_1')(conv1)
    act1 = LeakyReLU(alpha=0.2, name='LeakyReLU_1')(bn1)
    conv2 = Conv2D(64, (3, 3), strides=(2, 2), padding='same', name='Conv2D_2')(act1)
    bn2 = BatchNormalization(name='BatchNorm_2')(conv2)
    act2 = LeakyReLU(alpha=0.2, name='LeakyReLU_2')(bn2)
    conv3 = Conv2D(128, (3, 3), strides=(2, 2), padding='same', name='Conv2D_3')(act2)
    bn3 = BatchNormalization(name='BatchNorm_3')(conv3)
    act3 = LeakyReLU(alpha=0.2, name='LeakyReLU_3')(bn3)
    conv4 = Conv2D(256, (3, 3), strides=(2, 2), padding='same', name='Conv2D_4')(act3)
    bn4 = BatchNormalization(name='BatchNorm_4')(conv4)
    act4 = LeakyReLU(alpha=0.2, name='LeakyReLU_4')(bn4)
    conv5 = Conv2D(512, (3, 3), strides=(2, 2), padding='same', name='Conv2D_5')(act4)
    bn5 = BatchNormalization(name='BatchNorm_5')(conv5)
    act5 = LeakyReLU(alpha=0.2, name='LeakyReLU_5')(bn5)
    flatten = Flatten(name='Flatten')(act5)
    dropout = Dropout(0.4, name='Dropout')(flatten)
    output = Dense(1, activation='sigmoid', name='Output')(dropout)
    
    model = Model(inputs=[input_image, embedding_input], outputs=output, name='Discriminator')
    return model

def build_generator():
    input_layer = Input(shape=(8, 8, 3), name='Input')
    dense = Dense(256, activation='relu', name='Dense')(input_layer)
    embedding_input = Input(shape=(1,), name='Embedding_Input')
    embedding = Embedding(input_dim=11, output_dim=1024, input_length=1, name='Embedding')(embedding_input)
    reshaped_embedding = Reshape((8, 8, 16), name='Reshape_Embedding')(embedding)
    leaky_relu = LeakyReLU(alpha=0.2, name='Leaky_ReLU')(dense)
    reshaped = Reshape((8, 8, 256), name='Reshape')(leaky_relu)
    concatenated = Concatenate(name='Concatenate')([reshaped, reshaped_embedding])
    
    conv1 = Conv2DTranspose(1024, (3, 3), strides=(2, 2), padding='same', name='Conv2D_Transpose_1')(concatenated)
    bn1 = BatchNormalization()(conv1)
    act1 = LeakyReLU(alpha=0.2)(bn1)
    conv2 = Conv2DTranspose(512, (3, 3), strides=(2, 2), padding='same', name='Conv2D_Transpose_2')(act1)
    bn2 = BatchNormalization()(conv2)
    act2 = LeakyReLU(alpha=0.2)(bn2)
    conv3 = Conv2DTranspose(256, (3, 3), strides=(2, 2), padding='same', name='Conv2D_Transpose_3')(act2)
    bn3 = BatchNormalization()(conv3)
    act3 = LeakyReLU(alpha=0.2)(bn3)
    conv4 = Conv2DTranspose(128, (3, 3), strides=(2, 2), padding='same', name='Conv2D_Transpose_4')(act3)
    bn4 = BatchNormalization()(conv4)
    act4 = LeakyReLU(alpha=0.2)(bn4)
    conv5 = Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same', name='Conv2D_Transpose_5')(act4)
    bn5 = BatchNormalization()(conv5)
    act5 = LeakyReLU(alpha=0.2)(bn5)
    output = Conv2D(3, (32, 32), activation='tanh', padding='same', name='Output')(act5)
    
    model = Model(inputs=[input_layer, embedding_input], outputs=output, name='Generator')
    return model

def build_cgan(generator, discriminator):
    discriminator.trainable = False
    input_image = Input(shape=(8, 8, 3), name='CGAN_Input_Image')
    embedding_input = Input(shape=(1,), name='CGAN_Embedding_Input')
    generated_image = generator([input_image, embedding_input])
    output = discriminator([generated_image, embedding_input])
    
    model = Model(inputs=[input_image, embedding_input], outputs=output, name='CGAN')
    return model

# Learning rate schedules and optimizers
initial_learning_rate_d = 0.00005
decay_steps_d = 500
decay_rate_d = 0.96
lr_schedule_d = ExponentialDecay(
    initial_learning_rate=initial_learning_rate_d,
    decay_steps=decay_steps_d,
    decay_rate=decay_rate_d,
    staircase=True)
discriminator_optimizer = Adam(learning_rate=lr_schedule_d, beta_1=0.5)

initial_learning_rate_g = 0.0002
decay_steps_g = 500
decay_rate_g = 0.96
lr_schedule_g = ExponentialDecay(
    initial_learning_rate=initial_learning_rate_g,
    decay_steps=decay_steps_g,
    decay_rate=decay_rate_g,
    staircase=True)
generator_optimizer = Adam(learning_rate=lr_schedule_g, beta_1=0.5)

discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy', optimizer=discriminator_optimizer, metrics=['accuracy'])
generator = build_generator()
cgan = build_cgan(generator, discriminator)
cgan.compile(loss='binary_crossentropy', optimizer=generator_optimizer)

discriminator.summary()
generator.summary()
cgan.summary()

Training Code

import numpy as np
from tensorflow.keras.utils import to_categorical

def preprocess_images(images_dict, image_size=(256, 256)):
    X = []
    y = []
    for label, images in images_dict.items():
        for image in images:
            image = image.resize(image_size)
            image_array = np.array(image) / 255.0  # Normalize to [0, 1]
            X.append(image_array)
            y.append(class_names.index(label))
    return np.array(X), np.array(y)

# Prepare dataset
X_train, y_train = preprocess_images(images_dict)

# Define training parameters
batch_size = 32
epochs = 500
num_classes = len(class_names)

def train(generator, discriminator, cgan, X_train, y_train, batch_size, epochs):
    valid = np.ones((batch_size, 1))
    fake = np.zeros((batch_size, 1))

    for epoch in range(epochs):
        d_losses, g_losses, d_accuracies = [], [], []
        num_steps = len(X_train) // batch_size

        for step in range(num_steps):
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs, labels = X_train[idx], y_train[idx]

            noise = np.random.normal(0, 1, (batch_size, 8, 8, 3))
            gen_imgs = generator.predict([noise, labels])

            d_loss_real = discriminator.train_on_batch([imgs, labels], valid)
            d_loss_fake = discriminator.train_on_batch([gen_imgs, labels], fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
            d_losses.append(d_loss[0])
            d_accuracies.append(100 * d_loss[1])

            g_loss = cgan.train_on_batch([noise, labels], valid)
            g_losses.append(g_loss)

            if (step + 1) % 10 == 0:
                print(f"Epoch: {epoch + 1}/{epochs} | Step: {step + 1}/{num_steps} | D Loss: {d_loss[0]:.4f} | D Acc: {d_loss[1] * 100:.2f}% | G Loss: {g_loss:.4f}")

        avg_d_loss = np.mean(d_losses)
        avg_g_loss = np.mean(g_losses)
        avg_d_accuracy = np.mean(d_accuracies)

        print(f"Epoch: {epoch + 1}/{epochs} | Avg D Loss: {avg_d_loss:.4f} | Avg D Acc: {avg_d_accuracy:.2f}% | Avg G Loss: {avg_g_loss:.4f}")

train(generator, discriminator, cgan, X_train, y_train, batch_size, epochs)

I have implemented the CGAN using TensorFlow and Keras, following standard architectures for the generator and discriminator. I used the Adam optimizer with learning rate schedules for both models. I expected the discriminator and generator to learn progressively, resulting in a balanced game where the discriminator can't easily distinguish between real and generated images. However, during training, the discriminator quickly achieves 100% accuracy, and the generator loss drops to 0, indicating that the discriminator is overpowering the generator, and the generator isn't learning effectively.

Upvotes: 0

Views: 55

Answers (0)

Related Questions