Reputation: 683
I would like to identify trees in an image with the image size 6950 x 3715 and 3 channels (R,G,B) using keras model with training images with the size 256 x 256 and 3 channels (R,G,B).However, when predicting for the image with the size (6950 x 3715), it has error "Error when checking input: expected conv2d_input to have 4 dimensions, but got array with shape (25006, 17761, 3)".
How can I predict the image using the model has been built and export these trees identified into shapefile?
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.models import Sequential, model_from_json
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Dense, Flatten, Dropout, Activation,
Conv2D, MaxPooling2D
import cv2, glob, os, random
import numpy as np
import pandas as pd
tf.enable_eager_execution()
AUTOTUNE = tf.data.experimental.AUTOTUNE
def read_labeled_list(label_list_file):
labels =[]
for label in label_list_file:
with open(label) as f_input:
for line in f_input:
labels.append(int(line.split()[0]))
return labels
def load_and_preprocess_image(path):
image = tf.read_file(path)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize_images(image, [256, 256])
image /= 255.0
return image
all_image_paths=list(glob.glob('C:/LEARN_TENSORFLOW/images/*.jpg'))
all_image_paths = [str(path) for path in all_image_paths]
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = path_ds.map(load_and_preprocess_image,
num_parallel_calls=AUTOTUNE)
all_image_labels =
read_labeled_list(glob.glob('C:/LEARN_TENSORFLOW/labels/*.txt'))
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels,
tf.int64))
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
BATCH_SIZE = 32
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
######BUILD THE MODEL:
model = Sequential()
model.add(Conv2D(32,(3,3), activation = 'relu',input_shape=[256,256,3]))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(64,(3,3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
#########COMPILE MODEL: Step2 - COMPILE MODEL
model.compile(optimizer="adam",
loss='binary_crossentropy',
metrics=['accuracy'])
len(model.trainable_variables)
model.summary()
steps_per_epoch=tf.ceil(len(all_image_paths)/10).numpy()
model.fit(ds, epochs=1, steps_per_epoch=2)
####PREDICT TEST IMAGE
img_array = cv2.imread('C:/deeplearning/test_stack.jpg')
img_array= np.array(img_array).reshape(-1,6950,3715,3)
img_array = img_array/255.0
predictions=model.predict(img_array)
Upvotes: 0
Views: 302
Reputation: 30
Since you have trained the model with 256 x 256 x 3 images (the first layer being the Conv2d layer and its input is of the shape 256 x 256 x 3), the image to be predicted should be a 256 x 256 x 3 image. You will have to reshape the image to the input size.
Upvotes: 1
Reputation: 956
It looks like the problem is that you are trying to evaluate on an image which doesn't have the right size. Generally, you should apply the same preprocessing to the images you evaluate on as to the images you train on, because the underlying assumption is that the training set and test set are drawn from the same distribution. For example, this gave me a prediction:
g = tf.Graph()
with g.as_default():
t = load_and_preprocess_image('C:/deeplearning/test_stack.jpg')
t = tf.reshape(t, [1, 256, 256, 3]) # make single image into a batch of images
with tf.Session() as sess:
img_array = sess.run(t)
predictions=model.predict(img_array)
Upvotes: 1