Reputation: 21
I want to use the code from https://morioh.com/p/a07857cbc76d to match faces from chokepoint dataset for my face project, I extracted the faces and want to use VGGface and cosine to match them, but got this error, would you please help me?
ValueError: in user code:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:1478 predict_function *
return step_function(self, iterator)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:1468 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:1461 run_step **
outputs = model.predict_step(data)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:1434 predict_step
return self(x, training=False)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py:998 __call__
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/input_spec.py:274 assert_input_compatibility
', found shape=' + display_shape(x.shape))
ValueError: Input 0 is incompatible with layer vggface_resnet50: expected shape=(None, 224, 224, 3), found shape=(None, 1, 224, 224, 3)
here is the code for extract faces:
def extract_face_from_image(image_path, required_size=(224, 224)):
image = plt.imread(image_path)
detector = MTCNN()
faces = detector.detect_faces(image)
face_images = []
for face in faces:
x1, y1, width, height = face['box']
x2, y2 = x1 + width, y1 + height
face_boundary = image[y1:y2, x1:x2]
face_image = Image.fromarray(face_boundary)
face_image = face_image.resize(required_size)
face_array = asarray(face_image)
face_images.append(face_array)
return face_images
extracted_face = extract_face_from_image('/content/301.jpg')
plt.imshow(extracted_face[0])
plt.show()
and this the code for match faces:
def get_model_scores(faces):
samples = asarray(faces, 'float32')
samples = preprocess_input(samples, version=2)
model = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3), pooling='avg')
return model.predict(samples)
faces = [extract_face_from_image(image_path) for image_path in ['/content/125.jpg',
'/content/126.jpg']]
model_scores = get_model_scores(faces)
if cosine(model_scores[0], model_scores[1]) <= 0.4:
print("Faces Matched")
Upvotes: 2
Views: 1369
Reputation: 172
Old question but changing the extract_face_from_image
to this:
def extract_face_from_image(image_path, required_size=(224, 224)):
# load image and detect faces
image = plt.imread(image_path)
detector = MTCNN()
faces = detector.detect_faces(image)
# extract the bounding box from the requested face
x1, y1, width, height = faces[0]['box']
x2, y2 = x1 + width, y1 + height
# extract the face
face_boundary = image[y1:y2, x1:x2]
image = cv2.resize(face_boundary, required_size)
return image
worked perfectly fine.
Upvotes: 1