Reputation: 177
I am doing text-classification. I have used Conv1D
layer on top of Keras Embedding
layer. I am getting a validation accuracy of 0.68.This is the dataset I am using. This is the code I am using:
#Processing
import pandas as pd
import pickle
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense
from sklearn.preprocessing import LabelEncoder
from keras.layers import Embedding,Flatten,Dense,Conv1D,MaxPooling1D,GlobalMaxPooling1D
from keras.models import load_model
# df = pd.read_csv('text_emotion.csv')
#
# df.drop(['tweet_id', 'author'], axis=1, inplace=True)
# df = df[~df['sentiment'].isin(['empty', 'enthusiasm', 'boredom', 'anger'])]
# df = df.sample(frac=1).reset_index(drop=True)
df=pd.read_csv('emotion_merged_dataset.csv')
labels = df['sentiment']
# texts = df['content']
texts=df['text']
print (texts.shape)
#############################################
tokenizer = Tokenizer(3000)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
# print(sequences)
word_index = tokenizer.word_index
with open('tokenizer.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
# with open('word_index.pickle', 'rb') as handle:
# word_index_new = pickle.load(handle)
# print (word_index == word_index_new)
# print('Word index: '+str(word_index))
# print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=37)
encoder = LabelEncoder()
encoder.fit(labels)
encoded_Y = encoder.transform(labels)
from keras.utils import np_utils
labels = np_utils.to_categorical(encoded_Y)
print ('Labels: '+str(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
print('data: '+str(data))
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
# print ('data:'+str(data[0]))
labels = labels[indices]
print(labels.shape)
model = Sequential()
model.add(Embedding(3000, 300, input_length=37))
# model.add(Flatten())
model.add(Conv1D(32,7,activation='relu'))
model.add(MaxPooling1D(3))
model.add(Conv1D(32,7,activation='relu'))
model.add(GlobalMaxPooling1D())
model.add(Dense(labels.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(data, labels, validation_split=0.2, epochs=10, batch_size=100)
model.save('model_keras_embedding_cnn.h5')
print (model.summary())
I am pickling the tokenizer and saving the model. Then I am using the tokenizer to pre-process a sample input sentence to check my model.Here is the code below for testing:
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
import pickle
model = load_model('model_keras_embedding_cnn.h5')
texts='I am really sad'
with open('tokenizer.pickle', 'rb') as handle:
tokenizer_new = pickle.load(handle)
tokenizer_new.fit_on_texts(texts)
sequences = tokenizer_new.texts_to_sequences(texts)
data = pad_sequences(sequences, maxlen=37)
print (model.predict_classes(data,verbose=10))
I am getting an output like:
[5 5 5 5 5 5 5 5 5 5 5 5 5 5 5].
How can I get true class labels(like fear,anger,etc)? Is my approach correct i.e. saving the tokenizer and using it again? Have I messed up somewhere conceptually? [Edit] I used inverse_transform at the recommendation of JARS:
print (encoder_new.inverse_transform(pred))
The output was this:
['neutral' 'neutral' 'neutral' 'neutral' 'neutral' 'neutral' 'neutral'
'neutral' 'neutral' 'neutral' 'neutral' 'neutral' 'neutral' 'neutral'
'neutral']
Can someone explain the output?
Upvotes: 1
Views: 1682
Reputation: 177
The credit of this answer goes to JARS. So my input should have been
texts=['I am very happy with the result']
And the result is:
['joy']
The whole code is:
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
import pickle
model = load_model('model_keras_embedding_cnn.h5')
texts=['I am very happy with the result']
with open('tokenizer.pickle', 'rb') as handle:
tokenizer_new = pickle.load(handle)
with open('encoder.pickle', 'rb') as handle:
encoder_new = pickle.load(handle)
tokenizer_new.fit_on_texts(texts)
sequences = tokenizer_new.texts_to_sequences(texts)
data = pad_sequences(sequences, maxlen=37)
pred=model.predict_classes(data)
print (encoder_new.inverse_transform(pred))
If you count the number of characters in the sentence you will find: 15 characters and that is the number of output I was getting.
Upvotes: 2