Saqib Nazir
Saqib Nazir

Reputation: 11

Error when checking input: expected input_49 to have shape (512, 512, 1) but got array with shape (28, 28, 1)

i'm working with a model that takes MNIST dataset and generates the output, but i wanna feed my own dataset to the model, images in my dataset are of size (512x512), but the model takes the images with size(28x28). Now when i convert my dataset images to (28x28) model works fine but i have to feed the images with size 512x512. Can anyone help me with this problem. i'm sharing the complete code here as well. You can see on (Load dataset) i'm loading my own dataset and converting it to 28x28 but actually i wanna load images of size 512x512.

class DCGAN():
def init(self):
# Input shape
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100

    optimizer = Adam(0.0002, 0.5)

    # Build and compile the discriminator
    self.discriminator = self.build_discriminator()
    self.discriminator.compile(loss='binary_crossentropy',
        optimizer=optimizer,
        metrics=['accuracy'])

    # Build the generator
    self.generator = self.build_generator()

    # The generator takes noise as input and generates imgs
    z = Input(shape=(self.latent_dim,))
    img = self.generator(z)

    # For the combined model we will only train the generator
    self.discriminator.trainable = False

    # The discriminator takes generated images as input and determines validity
    valid = self.discriminator(img)

    # The combined model  (stacked generator and discriminator)
    # Trains the generator to fool the discriminator
    self.combined = Model(z, valid)
    self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)

def build_generator(self):

    model = Sequential()

    model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
    model.add(Reshape((7, 7, 128)))
    model.add(UpSampling2D())
    model.add(Conv2D(128, kernel_size=3, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Activation("relu"))
    model.add(UpSampling2D())
    model.add(Conv2D(64, kernel_size=3, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Activation("relu"))
    model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
    model.add(Activation("tanh"))

    model.summary()

    noise = Input(shape=(self.latent_dim,))
    img = model(noise)

    return Model(noise, img)

def build_discriminator(self):

    model = Sequential()

    model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
    model.add(ZeroPadding2D(padding=((0,1),(0,1))))
    model.add(BatchNormalization(momentum=0.8))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))

    model.summary()

    img = Input(shape=self.img_shape)
    validity = model(img)

    return Model(img, validity)

def train(self, epochs, batch_size=128, save_interval=50):

    # Load the dataset
    #(X_train, _), (_, _) = mnist.load_data()
    files_name = os.listdir('./Dataset/Train')
    train_x = []
    #train_y = []
    for _,i in enumerate(files_name):
      train_x.append(cv2.imread(os.path.join('./Dataset/Train',i),0))
    train_x = np.asarray(train_x).reshape(-1, 512,512, 1)

    train_x_28 = []
    for i in range(len(train_x)):
      train_x_28.append(cv2.resize(train_x[i], (28, 28)))

    X_train = np.asarray(train_x_28).reshape(-1, 28,28)

    # Rescale -1 to 1
    X_train = X_train / 127.5 - 1.
    X_train = np.expand_dims(X_train, axis=3)

    # Adversarial ground truths
    valid = np.ones((batch_size, 1))
    fake = np.zeros((batch_size, 1))

    for epoch in range(epochs):

        # ---------------------
        #  Train Discriminator
        # ---------------------

        # Select a random half of images
        idx = np.random.randint(0, X_train.shape[0], batch_size)
        imgs = X_train[idx]

        # Sample noise and generate a batch of new images
        noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
        gen_imgs = self.generator.predict(noise)

        # Train the discriminator (real classified as ones and generated as zeros)
        d_loss_real = self.discriminator.train_on_batch(imgs, valid)
        d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
        d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

        # ---------------------
        #  Train Generator
        # ---------------------

        # Train the generator (wants discriminator to mistake images as real)
        g_loss = self.combined.train_on_batch(noise, valid)

        # Plot the progress
        print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))

        # If at save interval => save generated image samples
        if epoch % save_interval == 0:
            self.save_imgs(epoch)

def save_imgs(self, epoch):
    r, c = 2, 2
    noise = np.random.normal(0, 1, (r * c, self.latent_dim))
    gen_imgs = self.generator.predict(noise)

    # Rescale images 0 - 1
    gen_imgs = 0.5 * gen_imgs + 0.5

    fig, axs = plt.subplots(r, c)
    cnt = 0
    for i in range(r):
        for j in range(c):
            axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
            axs[i,j].axis('off')
            cnt += 1
    fig.savefig("dcgan/images/mnist_%d.png" % epoch) 
    plt.close()
if name == 'main':
dcgan = DCGAN()
dcgan.train(epochs=4000, batch_size=32, save_interval=50)

Error when checking input: expected input_49 to have shape (512, 512, 1) but got array with shape (28, 28, 1)

Upvotes: 1

Views: 159

Answers (1)

DomJack
DomJack

Reputation: 4183

You'll need to modify your generator to generate images of the same size as your dataset. If you just want something that runs you could just keep repeating your upscaling block:

model.add(UpSampling2D())
model.add(Conv2D(num_filters, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))

Typical networks halve the number of filters for each upsample. This keeps the number of operations constant for each resolution (though doubles memory requirements for each layer).

The deeper you go the more trouble you'll have. You may need to increase your latent dimension size, or the number of filters you use in the initial dense/reshape. How to do this well is an open research question.

Upvotes: 1

Related Questions