Reputation: 39
I use the following code for training my model which is supposed to detect cats and dogs. when I train it, each time it gives a different prediction score and the output is mostly wrong. here is my code
#Imports
import os
from PIL import Image
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout
from tensorflow.keras import layers
from tensorflow.keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
import csv
import pathlib
from IPython.display import display
import random
plt.style.use('fivethirtyeight')
batchsize = 64
imagewidth = 32
imageheight = 32
def define_model():
model = Sequential()
model.add(layers.experimental.preprocessing.Rescaling(1./255, input_shape=(imageheight, imageheight, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(2, activation='softmax'))
# compile model
model.compile(optimizer='adam', loss = 'binary_crossentropy', metrics=['accuracy'])
return model
dataseturl = 'E:\AI\\'
data_dir = pathlib.Path("")
print(data_dir)
print("0101")
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(imageheight, imagewidth),
batch_size=batchsize)
#random.shuffle(train_ds)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(imageheight, imagewidth),
batch_size=batchsize)
class_names = train_ds.class_names
model = define_model()
#model.load_weights("trained.ckpt")
#model.compile(loss = 'binary_crossentropy', optimizer= 'adam', metrics = ['accuracy'])
epochs=10
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs,
batch_size = 32
)
img = keras.preprocessing.image.load_img(
"Test3.jpg", target_size=(imageheight, imagewidth)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = img_array / 255.0
print(img_array.shape)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = model.predict(img_array)
score = predictions[0]
print(class_names)
print(score)
print(
"This image most likely belongs to {} with a {:.2f} percent confidence."
.format(class_names[np.argmax(predictions[0])], 100 * np.max(predictions[0]))
)
model.save_weights("trained.ckpt")
This code either sticks to one output such as a cat prediction for all pictures, or it predicts the picture wrong, for example a cat is predicted dog and vice versa
Upvotes: 1
Views: 314
Reputation: 167
I cannot post comments yet, so writing an answer.
I see your model has only one convolutional layer with 32 filters. Your model would not have learned good features with only one layer. You can try to increase the convolutional layers.
How many images does your training set have? Did you check if your class_names variable values are corresponding to your images? I mean try to plot few images of your dataset with your labels just to be sure, you are setting the labels correctly to your images.
Monitor your training and validation accuracy scores. If your validation score is lesser and your training accuracy is huge, chances are that your model is over-fitting. Use regularization methods to avoid overfitting. (For E.g., add a dropout layer or use kernel_regularizer).
Look at this post and see if you can get the results
Upvotes: 3