Reputation: 896
I have successfully created my Keras sequential model and trained it for a while. Now I am trying to make some predictions but it fails even using the same data as used on the training phase.
I am getting this error: {ValueError}Error when checking input: expected embedding_1_input to have shape (2139,) but got array with shape (1,)
However, when checking the input I am trying to use, it says (2139,). I would like to know if anyone knows what this might be
df = pd.read_csv('../../data/parsed-data/data.csv')
df = ModelUtil().remove_entries_based_on_threshold(df, 'Author', 2)
#show_column_distribution(df, 'Author')
y = df.pop('Author')
le = LabelEncoder()
le.fit(y)
encoded_Y = le.transform(y)
tokenizer, padded_sentences, max_sentence_len \
= PortugueseTextualProcessing().convert_corpus_to_number(df)
ModelUtil().save_tokenizer(tokenizer)
vocab_len = len(tokenizer.word_index) + 1
glove_embedding = PortugueseTextualProcessing().load_vector(tokenizer)
embedded_matrix = PortugueseTextualProcessing().build_embedding_matrix(glove_embedding, vocab_len, tokenizer)
cv_scores = []
kfold = StratifiedKFold(n_splits=4, shuffle=True, random_state=7)
models = []
nn = NeuralNetwork()
nn.build_baseline_model(embedded_matrix, max_sentence_len, vocab_len, len(np_utils.to_categorical(encoded_Y)[0]))
# Separate some validation samples
val_data, X, Y = ModelUtil().extract_validation_data(padded_sentences, encoded_Y)
for train_index, test_index in kfold.split(X, Y):
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(Y)
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = dummy_y[train_index], dummy_y[test_index]
nn.train(X_train, y_train, 100)
scores = nn.evaluate_model(X_test, y_test)
cv_scores.append(scores[1] * 100)
models.append(nn)
print("%.2f%% (+/- %.2f%%)" % (np.mean(cv_scores), np.std(cv_scores)))
best_model = models[cv_scores.index(max(cv_scores))]
best_model.save_model()
best_model.predict_entries(X[0])
Method that performs the prediction and model creation
def build_baseline_model(self, emd_matrix, long_sent_size, vocab_len, number_of_classes):
self.model = Sequential()
embedding_layer = Embedding(vocab_len, 100, weights=[emd_matrix], input_length=long_sent_size,
trainable=False)
self.model.add(embedding_layer)
self.model.add(Dropout(0.2))
self.model.add(Flatten())
# softmax performing better than relu
self.model.add(Dense(number_of_classes, activation='softmax'))
self.model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
return self.model
def predict_entries(self, entry):
predictions = self.model.predict_classes(entry)
# show the inputs and predicted outputs
print("X=%s, Predicted=%s" % (entry, predictions[0]))
return predictions
X[0].shape evaluates to : (2139,)
Upvotes: 0
Views: 63
Reputation: 26
In you case you should apply a reshape so you can get an array with a unique element that contains the sentence.
X_reshape = X[0].reshape(1, 2139)
best_model.predict_entries(X_reshape)
Upvotes: 1