Sakshi Singhal
Sakshi Singhal

Reputation: 47

Input 0 of layer lstm_9 is incompatible with the layer: expected ndim=3, found ndim=4. Full shape received: [None, 300, 300, 1]

The code is exectuted over the image dataset and on execution of the below code i am getting the value error.Help me to figure out how to fix this error.

import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, LSTM
import pickle
import numpy as np

X = np.array(pickle.load(open("X.pickle","rb")))
Y = np.array(pickle.load(open("Y.pickle","rb")))

#scaling our image data
X = X/255.0

model = Sequential()
print(X.shape)
print(Y.shape)
#model.add(Conv2D(64 ,(3,3), input_shape = X.shape[1:]))
model.add(Conv2D(64 ,(3,3), input_shape = X.shape[1:]))
# print(X.shape)
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
print(X.shape)
print(Y.shape)

model.add(Conv2D(128 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
print(X.shape)
print(Y.shape)

model.add(Conv2D(256 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
print(X.shape)
print(Y.shape)

model.add(Conv2D(512 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
print(X.shape)
print(Y.shape)

model.add(Flatten())
print(X.shape)
print(Y.shape)

model = Sequential()
model.add(LSTM(128, input_shape=(X.shape[1:]), activation='relu', return_sequences=True))
model.add(Dropout(0.2))

model.add(LSTM(128, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(10, activation='softmax'))

opt = tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5)


model.compile(loss='binary_crossentropy', optimizer=opt,
             metrics=['accuracy'])

model.fit(X, Y, batch_size=32, epochs = 2, validation_split=0.1)

The above code produces following outputs. I was printing shape again and to again just to see the resultant matrix. (90, 300, 300, 1) (90,) (90, 300, 300, 1) (90,) (90, 300, 300, 1) (90,) (90, 300, 300, 1) (90,) (90, 300, 300, 1) (90,) (90, 300, 300, 1) (90,)

Here a very small change is needed in input dimension of an image in the first LSTM as per the error in the code. Can you please make changes into the same code.

Upvotes: 0

Views: 319

Answers (1)

Marco Cerliani
Marco Cerliani

Reputation: 22021

model = Sequential()

model.add(Conv2D(64 ,(3,3), input_shape = (300,300,1)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))

model.add(Conv2D(128 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))

model.add(Conv2D(256 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))

model.add(Conv2D(512 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))

model.add(Reshape((16, 16*512)))
model.add(LSTM(128, activation='relu', return_sequences=True))
model.add(Dropout(0.2))

model.add(LSTM(128, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(10, activation='softmax'))

opt = tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5)


model.compile(loss='binary_crossentropy', optimizer=opt,
             metrics=['accuracy'])

model.summary()

Upvotes: 1

Related Questions