Reputation: 39
I want to place a transparent image on the location of where cv2
detects my eyes. I've accomplished the main two steps, and now I need to combine them.
For instance, here is the output with the image transparency working, and here is the output with the eye detection working. The script and images are below, I'm not sure what to do.
import os
import numpy
import cv2
from PIL import Image
from os.path import join, dirname, realpath
def upload_files():
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('/Users/matt/Python/LazerEyes/haarcascade_eye.xml')
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
eye_cascade = cv2.CascadeClassifier('/Users/matt/Python/LazerEyes/haarcascade_eye.xml')
img = cv2.imread('new.png')
dot = cv2.imread('dot_transparent.png', cv2.IMREAD_UNCHANGED)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray_to_place = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img_h, img_w = gray.shape
img_to_place_h, img_to_place_w = gray_to_place.shape
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
dot = cv2.resize(dot, (eh, ew))
# Prepare pixel-wise alpha blending
dot_alpha = dot[..., :3] / 255.0
dot_alpha = numpy.repeat(dot_alpha[..., numpy.newaxis], 3, axis=2)
dot = dot[..., :3]
resized_img = cv2.resize(dot, (eh, ew), interpolation = cv2.INTER_AREA)
resized_img_h, resized_img_w, _ = resized_img.shape
#pointsOnFace = []
#integersToAppend = eh
#pointsOnFace.append(integersToAppend)
#print(pointsOnFace)
roi_color[ey:ey+resized_img_h, ex:ex+resized_img_w, :] = resized_img
cv2.imwrite('out.png', img)
Upvotes: 1
Views: 113
Reputation: 18925
Incorporating my earlier answer into the given code (and minimizing the resulting code), the solution might look like this:
import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread('new.jpg')
dot = cv2.imread('dot_transparent.png', cv2.IMREAD_UNCHANGED)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
eyes = eye_cascade.detectMultiScale(gray[y:y+h, x:x+w])
for (ex, ey, ew, eh) in eyes:
# Filter out small detections, if you're only want to have the eyes
if ew < 100 or eh < 100:
continue
d = cv2.resize(dot.copy(), (eh, ew))
d_alpha = d[..., 3] / 255.0
d_alpha = np.repeat(d_alpha[..., np.newaxis], 3, axis=2)
d = d[..., :3]
img[y+ey:y+ey+eh, x+ex:x+ex+ew, :] = \
img[y+ey:y+ey+eh, x+ex:x+ex+ew, :] * (1 - d_alpha) + d * d_alpha
cv2.imwrite('out.png', img)
That's the output (I filtered out small detections, such that only the actual eyes are overlayed):
Finetuning the exact locations might still be needed, but I think that's an issue coming from the Haar cascade classifier itself.
----------------------------------------
System information
----------------------------------------
Platform: Windows-10-10.0.16299-SP0
Python: 3.9.1
NumPy: 1.20.1
OpenCV: 4.5.1
----------------------------------------
Upvotes: 1