Reputation: 213
I'm building a CNN model to classify images, however, I guess that my model is not learning because of the constant values of accuracy and loss function. See my code below:
Building the images train, test and validation datasets
import pandas as pd
from keras_preprocessing.image import ImageDataGenerator
import numpy as np
#Creating three datasets from the the 3 .txt files.
trainingfile = pd.read_table('data/training.txt', delim_whitespace=True, names=('class', 'image'))
testingfile = pd.read_table('data/testing.txt', delim_whitespace=True, names=('class', 'image'))
validationfile = pd.read_table('data/validation.txt', delim_whitespace=True, names=('class', 'image'))
#Changing target variable type
trainingfile = trainingfile.replace([0, 1, 2], ['class0', 'class1', 'class2'])
testingfile = testingfile.replace([0, 1, 2], ['class0', 'class1', 'class2'])
validationfile = validationfile.replace([0, 1, 2], ['class0', 'class1', 'class2'])
#Data augmentation
datagen=ImageDataGenerator()
train_datagen = ImageDataGenerator(
#Apliquem una mica de rotació no gaire ja que generalment les fotografies estaran centrades
rotation_range=5,
zoom_range=0.1)
#Final datasets containing the images
train=train_datagen.flow_from_dataframe(dataframe=trainingfile, directory="data/", x_col="image", y_col="class", class_mode="categorical", target_size=(256,256),color_mode='rgb',batch_size=32)
test=datagen.flow_from_dataframe(dataframe=testingfile, directory="data/", x_col="image", y_col="class", class_mode="categorical", target_size=(256,256),color_mode='rgb',batch_size=32)
#No data augmentation to the validation dataset.
validation=datagen.flow_from_dataframe(dataframe=validationfile, directory="data/", x_col="image", y_col="class", class_mode="categorical", target_size=(256,256),color_mode='rgb', batch_size=32)
CNN model
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten, Activation, Dropout, MaxPooling2D, BatchNormalization
from keras.constraints import maxnorm
#Creació del model
model = Sequential()
#1r bloc convolució
model.add(Conv2D(32, kernel_size = (3, 3), activation='relu', input_shape=(256, 256,3)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
#2n bloc convolució
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
#3r bloc convolució
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
#4t bloc convolució
model.add(Conv2D(96, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
#5e bloc convolució
model.add(Conv2D(32, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
#Dropout
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
#model.add(Dropout(0.3))
model.add(Dense(3, activation = 'softmax'))
from keras import regularizers, optimizers
from keras.optimizers import RMSprop
from keras.callbacks import EarlyStopping
# Compile model
model.compile(optimizer='adam',loss="categorical_crossentropy",metrics=["accuracy"])
# Early stopping
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1,patience=10)
Training the model
h=model.fit_generator(generator=train,
validation_data=validation,
epochs=50,
callbacks=[es])
Results
It is the first time that I use fit_generator and perhaps I'm not using it properly?
Upvotes: 0
Views: 411
Reputation: 1235
As I can see from the results images, you are training just for 1 epoch. This could be because the EarlyStopping is too strict. Try adding patience=3
to the EarlyStopping callback.
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3)
Overfitting example:
Check this post to know more about how to deal with overfitting.
Upvotes: 1