Kirk Bentish
Kirk Bentish

Reputation: 21

How to achieve realistic lip color change using OpenCV and Mediapipe?

I need help with changing the lip color of a person in a video using Mediapipe. I've used Mediapipe for facial landmark detection and tracking, but I'm not sure how to proceed with changing the lip color. I couldn't find any resources on how to achieve this in the Mediapipe documentation.

This has to do more with OpenCV than Mediapipe. You might want to search for how to fill a polygon using cv2.fillPoly. You will need the landmarks to define the contour you can refer to this image here to find which landmarks.

I'm using Python and OpenCV. Running the code on Google Colab. I did try the method suggested by @fadiaburaid but the result was not up to the mark. The polygons seems to dance as the coords detected by Mediapipe were continuously changing and the polygons drawn on the image seemed visibly heterogenous. I tried feathering, but it didn't bring quality of results to an acceptable level.

Any suggestion to improve and stabilize the polygon blending are welcome!!

Face Cropping

from google.colab import output
from google.colab.patches import cv2_imshow

import cv2
import mediapipe as mp

# Load the MediaPipe Face Detection model
mp_face_detection = mp.solutions.face_detection

# Initialize the Face Detection model
face_detection = mp_face_detection.FaceDetection()

# Load the image
image = cv2.imread('/content/wallpaper.png')

# Convert the image to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# Detect faces in the image
results = face_detection.process(image)

# Get the first detected face
face = results.detections[0]

# Get the bounding box of the face
x1 = int(face.location_data.relative_bounding_box.xmin * image.shape[1])
y1 = int(face.location_data.relative_bounding_box.ymin * image.shape[0])
x2 = int(x1 + face.location_data.relative_bounding_box.width * image.shape[1])
y2 = int(y1 + face.location_data.relative_bounding_box.height * image.shape[0])

# Calculate the size of the square bounding box
size = max(x2 - x1, y2 - y1)

# Calculate the center of the bounding box
center_x = (x1 + x2) // 2
center_y = (y1 + y2) // 2

# Calculate the coordinates of the square bounding box
x1_square = center_x - size // 2
y1_square = center_y - size // 2

x2_square = x1_square + size 
y2_square= y1_square + size 

# Crop and show square face region from original image 
square_face_region=image[y1_square:y2_square,x1_square:x2_square]

resized_image=cv2.resize(square_face_region,(480,480))
resized_image_bgr = cv2.cvtColor(resized_image, cv2.COLOR_RGB2BGR)

# Save the image
cv2.imwrite('resized_image.jpg', resized_image_bgr)

Mask Generation

import itertools
import numpy as np

# Load the MediaPipe Face Mesh model
mp_face_mesh = mp.solutions.face_mesh

# Initialize the Face Mesh model
face_mesh = mp_face_mesh.FaceMesh( static_image_mode=True,refine_landmarks=True,min_detection_confidence=0.5)

image = resized_image_bgr

# Define the left eye landmark indices
# LIPS = list(set(itertools.chain(*mp_face_mesh.FACEMESH_LIPS)))

# upper = [409,405,375,321,314,267,269,270,291,146,181,185,91,84,61,37, 39, 40,0,17]
# lower = [402,415,312,311,310,308,324,318,317,178,191,80, 81, 82,87, 88,95,78,13, 14]

upper_new = [0,267,269,270,409,291,375,321,405,314,17,84,181,91,146,61,185,40,39,37]
lower_new = [13,312,311,310,415,308,324,318,402,317,14,87,178,88,95,78,191,80,81,82]


# Detect the face landmarks
results = face_mesh.process(image)

# Create an empty mask with the same shape as the image
mask_upper = np.zeros(image.shape[:2], dtype=np.uint8)

# Draw white polygons on the mask using the upper landmarks
for face_landmarks in results.multi_face_landmarks:
    points_upper = []
    for i in upper_new:
        landmark = face_landmarks.landmark[i]
        x = int(landmark.x * image.shape[1])
        y = int(landmark.y * image.shape[0])
        points_upper.append((x, y))
    cv2.fillConvexPoly(mask_upper, np.int32(points_upper), 255)


# Create an empty mask with the same shape as the image
mask_lower = np.zeros(image.shape[:2], dtype=np.uint8)

# Draw white polygons on the mask using the lower landmarks
for face_landmarks in results.multi_face_landmarks:
    points_lower = []
    for i in lower_new:
        landmark = face_landmarks.landmark[i]
        x = int(landmark.x * image.shape[1])
        y = int(landmark.y * image.shape[0])
        points_lower.append((x, y))
    cv2.fillPoly(mask_lower, np.int32([points_lower]), 255)

# Subtract the lower mask from the upper mask
mask_diff = cv2.subtract(mask_upper, mask_lower)

# Apply morphology operations to smooth mask 
kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5 ,5))
mask_diff=cv2.morphologyEx(mask_diff,cv2.MORPH_OPEN,kernel)
mask_diff=cv2.morphologyEx(mask_diff,cv2.MORPH_CLOSE,kernel)

cv2_imshow(mask_diff)

Mask Blending

# Convert the mask to 3 channels
mask_diff_3ch = cv2.cvtColor(mask_diff, cv2.COLOR_GRAY2BGR)

image = cv2.imread('/content/resized_image.jpg')

# Apply the mask to the original image
masked_image = cv2.bitwise_and(image, mask_diff_3ch)

cv2_imshow(masked_image)

def create_colored_mask(hex_color, shape):
    # Convert the hex color code to an RGB tuple
    rgb_color = tuple(int(hex_color[i:i+2], 16) for i in (0, 2 ,4))
    
    # Create a blank mask with the given shape
    colored_mask = np.zeros(shape, dtype=np.uint8)
    
    # Set the color channels according to the chosen RGB color
    colored_mask[:,:,0] = rgb_color[2]
    colored_mask[:,:,1] = rgb_color[1]
    colored_mask[:,:,2] = rgb_color[0]
    
    return colored_mask

# Create a 3-channel version of your mask_diff array
mask_diff_3ch = cv2.cvtColor(mask_diff,cv2.COLOR_GRAY2BGR)

# Ask the user to enter a hex color code for their mask
hex_color = input('Enter a hex color code for your mask (e.g. FF0000 for red): ')

# Create a colored mask with the chosen hex color and same shape as your original mask
colored_mask = create_colored_mask(hex_color, mask_diff_3ch.shape)

# Apply the colored mask where your original mask is True
masked_image = cv2.bitwise_and(colored_mask,colored_mask ,mask=mask_diff)

# Superimpose the colored mask on your original image
final_image = cv2.addWeighted(image, 1 , masked_image ,1 ,0)

cv2_imshow(final_image)

I'm getting following results from above code. But I want much higher quality result from both video or photo input.

enter image description here

Input Image: Input Image

Cropped Input Image: Cropped Input Image

Mask Image: Mask Image

Final Image: Final Image with Masking

Upvotes: 1

Views: 1526

Answers (4)

Vladimir Kuzmenkov
Vladimir Kuzmenkov

Reputation: 1

@fmw42 solution works excellent for me.

However, what was not answered is how to get atomatically a nice mask for the lips. Let me share a complete solution with obtaining a proper lips mask and afterwards recolor it.

1. Imports and face detection via mediapipe

import mediapipe as mp
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
import cv2
import numpy as np
import skimage.exposure

Run face parts landmark detection, how to run official documentation

base_options = python.BaseOptions(model_asset_path='models/face_landmarker.task')
options = vision.FaceLandmarkerOptions(base_options=base_options,
                                       output_face_blendshapes=True,
                                       output_facial_transformation_matrixes=True,
                                       num_faces=1)
detector = vision.FaceLandmarker.create_from_options(options)

# read in image of the lady and detect face parts landmarks
image_data = cv2.imread('lady.jpg')
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=image_data)

# Detect face landmarks from the input image.
detection_result = detector.detect(mp_image)

image_height, image_width, channels = image_data.shape
face_landmarks = detection_result.face_landmarks[0]

2. Get crude lips mask from mediapipe

Landmark indexes of upper and bottom lips. Full mapping of face indexes.

# Upper lip indexes
upper_lip_indexes = [61,185,40,39,37,0,267,269,270,409,291,308,415,310,311,312,13,82,81,80,191,78]
# Bottom lip indexes
bottom_lip_indexes = [78,95,88,178,87,14,317,402,318,324,308,291,375,321,405,314,17,84,181,91,146,61]

Define a function gets x,y coords for each indexes, creates polygon and fills the polygon to make a segmentation mask.

# Create segmented mask from polygon function
def create_segmented_mask(indexes_list):
    # Initialize a list to store the landmarks of the face part.
    landmarks = []
    
    # Iterate over the indexes of the landmarks of the face part. 
    for index in indexes_list:
        
        # Append the landmark into the list.
        landmarks.append([int(face_landmarks[index].x * image_width),
                               int(face_landmarks[index].y * image_height)])
    shape = (image_height, image_width)
    mask = np.zeros(shape)

    polygon = [np.array(landmarks).astype(int)]
    segmented_mask = cv2.fillPoly(mask, polygon, 1)
    return segmented_mask

Run function and obtain segmentation masks for lips

segmented_mask_top_lip = create_segmented_mask(upper_lip_indexes)
plt.imshow(segmented_mask_top_lip)

upper lip segmentation mask

segmented_mask_bottom_lip = create_segmented_mask(bottom_lip_indexes)
plt.imshow(segmented_mask_bottom_lip )

bottom lip segmentation mask

Combine both upper and bottom lips masks

# Combine both upper and bottom lip masks into one via addition
segmented_mask_lips_full = cv2.add(segmented_mask_top_lip, segmented_mask_bottom_lip)
# change anything that is higher than 0 to 255
segmented_mask_lips_full[segmented_mask_lips_full>0]=255
segmented_mask_lips_full=segmented_mask_lips_full.astype('uint8')
plt.imshow(segmented_mask_lips_full)

You can see the segmentation mask is very crude: full lips segmentation mask

3. Get improved mask based on the previous one.

We will use this crude mask to crop lips from the original image, and then based on the extracted color range we can build a better softer mask.

# Crop lips from the face using mediapipe mask
image_data_me_lips = image_data_me.copy()
image_full_lips_cropped = cv2.bitwise_and(image_data_me_lips, image_data_me_lips, mask=segmented_mask_lips_full)

lips cropped

Get better mask for lips: extract min and max color values from cropped lips to create lower and upper color threshold range.

# The min_threshold is to filter any way too dark colors
min_threshold = 10
min_Red = int(resultant_image_full_lips_rgb[:,:,0][resultant_image_full_lips_rgb[:,:,0]>min_threshold].min())
print(min_Red)
min_Ggreen = int(resultant_image_full_lips_rgb[:,:,1][resultant_image_full_lips_rgb[:,:,1]>min_threshold].min())
print(min_Ggreen)
min_Blue = int(resultant_image_full_lips_rgb[:,:,2][resultant_image_full_lips_rgb[:,:,2]>min_threshold].min())
print(min_Blue)

# light_colors_substraction is to filter out any way too bright colors
light_colors_substraction = 10
max_Red = int(resultant_image_full_lips_rgb[:,:,0].max())
print(max_Red)
max_Ggreen = int(resultant_image_full_lips_rgb[:,:,1].max() - light_colors_substraction)
print(max_Ggreen)
max_Blue = int(resultant_image_full_lips_rgb[:,:,2].max() - light_colors_substraction)
print(max_Blue)

# threshold on lip color, the order is BGR
lower = (min_Blue,min_Ggreen,min_Red)
upper = (max_Blue,max_Ggreen,max_Red)

Create new mask from cropped lips for given color range and apply morphology and antialiasing

#mask for given range
mask = cv2.inRange(image_full_lips_cropped, lower, upper)

# apply morphology open and close
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)

# antialias mask, convert to float in range 0 to 1
mask = cv2.GaussianBlur(mask, (0,0), sigmaX=3, sigmaY=3, borderType = cv2.BORDER_DEFAULT)
mask = skimage.exposure.rescale_intensity(mask, in_range=(128,255), out_range=(0,1)).astype(np.float32)
mask_255 = (255*mask).clip(0,255).astype(np.uint8)
cv2.imwrite('lady_mask_full_lips_improved.png', mask_255)

improved lips mask

4. Apply color to lips

Apply smart lips colorization provided by @fmw42

Notice shift color options, sfact=1.2 and vfact=0.7 seems to produce best results (tested on various pictures).

# specify desired bgr color for lips and make into array
desired_color = (170,130,255)    # pink
#desired_color = (255,0,0)        # blue
#desired_color = (0,255,0)         # green

# shift input image color, experiment yourself
sfact=1.2 # 1-1.5
vfact=0.7 # 0.7-0.9 seem good

print(desired_color)

# create swatch
swatch = np.full((200,200,3), desired_color, dtype=np.uint8)

# read image
#img = cv2.imread("media/me2.jpg")
img = image_data_me.copy()

# read mask
#mask = cv2.imread("media/me_new_lips_mask.png", cv2.IMREAD_GRAYSCALE)
mask = mask_255


# convert input to HSV and separate channels
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_img)

# dilate mask to make it better fit the lips
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, kernel)

# get average bgr color of lips as array
ave_color = cv2.mean(img, mask=mask)[:3]
print(ave_color)

# create 1 pixel image of average color
ave_color_img = np.full((1,1,3), ave_color, dtype=np.float32)
print(ave_color_img)

# create 1 pixel image of desired color
desired_color_img = np.full((1,1,3), desired_color, dtype=np.float32)
print(desired_color_img)

# convert desired color image to HSV
desired_hsv = cv2.cvtColor(desired_color_img, cv2.COLOR_BGR2HSV)

# convert average color image to HSV
ave_hsv = cv2.cvtColor(ave_color_img, cv2.COLOR_BGR2HSV)

# compute difference in HSV color arrays and separate channel values
diff_hsv = desired_hsv - ave_hsv
diff_h, diff_s, diff_v = cv2.split(diff_hsv)
print(diff_hsv)

# shift input image color
hnew = np.mod(h + diff_h/2, 180).astype(np.uint8)
snew = (sfact*(s + diff_s)).clip(0,255).astype(np.uint8)
vnew = (vfact*(v + diff_v)).clip(0,255).astype(np.uint8)

# merge channels back to HSV image
hsv_new = cv2.merge([hnew,snew,vnew])

# convert new HSV image to BGR
new_img = cv2.cvtColor(hsv_new, cv2.COLOR_HSV2BGR)

# antialias mask, convert to float in range 0 to 1 and make 3-channels
mask = cv2.GaussianBlur(mask, (0,0), sigmaX=5, sigmaY=5, borderType = cv2.BORDER_DEFAULT)
mask = skimage.exposure.rescale_intensity(mask, in_range=(128,255), out_range=(0,1)).astype(np.float32)
mask = cv2.merge([mask,mask,mask])

# combine img and new_img using mask 
result = (img * (1 - mask) + new_img * mask)
result = result.clip(0,255).astype(np.uint8)

# save result
cv2.imwrite('lady_recolor.jpg', result)

plt.figure(figsize=[15,15])
plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))

result pink lips

Upvotes: 0

fmw42
fmw42

Reputation: 53164

Here is a further improvement that adds gains on the saturation and brightness so that one can deepen or lighten the colors. In the following I use sfact=3 and vfact=1.5 to make a deeper green color.

import cv2
import numpy as np
import skimage.exposure

# specify desired bgr color for lips and make into array
#desired_color = (170,130,255)    # pink
#desired_color = (255,0,0)        # blue
desired_color = (0,255,0)         # green

print(desired_color)

# create swatch
swatch = np.full((200,200,3), desired_color, dtype=np.uint8)

# read image
img = cv2.imread("lady2.jpg")

# read mask
mask = cv2.imread("lady2_mask.png", cv2.IMREAD_GRAYSCALE)


# convert input to HSV and separate channels
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_img)

# dilate mask to make it better fit the lips
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, kernel)

# get average bgr color of lips as array
ave_color = cv2.mean(img, mask=mask)[:3]
print(ave_color)

# create 1 pixel image of average color
ave_color_img = np.full((1,1,3), ave_color, dtype=np.float32)
print(ave_color_img)

# create 1 pixel image of desired color
desired_color_img = np.full((1,1,3), desired_color, dtype=np.float32)
print(desired_color_img)

# convert desired color image to HSV
desired_hsv = cv2.cvtColor(desired_color_img, cv2.COLOR_BGR2HSV)

# convert average color image to HSV
ave_hsv = cv2.cvtColor(ave_color_img, cv2.COLOR_BGR2HSV)

# compute difference in HSV color arrays and separate channel values
diff_hsv = desired_hsv - ave_hsv
diff_h, diff_s, diff_v = cv2.split(diff_hsv)
print(diff_hsv)

# shift input image color
sfact=3
vfact=1.5
hnew = np.mod(h + diff_h/2, 180).astype(np.uint8)
snew = (sfact*(s + diff_s)).clip(0,255).astype(np.uint8)
vnew = (vfact*(v + diff_v)).clip(0,255).astype(np.uint8)

# merge channels back to HSV image
hsv_new = cv2.merge([hnew,snew,vnew])

# convert new HSV image to BGR
new_img = cv2.cvtColor(hsv_new, cv2.COLOR_HSV2BGR)

# antialias mask, convert to float in range 0 to 1 and make 3-channels
mask = cv2.GaussianBlur(mask, (0,0), sigmaX=5, sigmaY=5, borderType = cv2.BORDER_DEFAULT)
mask = skimage.exposure.rescale_intensity(mask, in_range=(128,255), out_range=(0,1)).astype(np.float32)
mask = cv2.merge([mask,mask,mask])

# combine img and new_img using mask 
result = (img * (1 - mask) + new_img * mask)
result = result.clip(0,255).astype(np.uint8)

# save result
cv2.imwrite('lady2_swatch.png', swatch)
cv2.imwrite('lady2_recolor.jpg', result)

cv2.imshow('swatch', swatch)
cv2.imshow('mask', mask)
cv2.imshow('new_img', new_img)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()

Deep Green Result:

enter image description here

Upvotes: -1

fmw42
fmw42

Reputation: 53164

Here is a revised script that works better for most colors. It does the color difference in HSV color space.

Input:

enter image description here

Mask:

enter image description here

import cv2
import numpy as np
import skimage.exposure

# specify desired bgr color for lips and make into array
#desired_color = (170,130,255)    # pink
#desired_color = (255,0,0)        # blue
desired_color = (0,255,0)         # green

print(desired_color)

# create swatch
swatch = np.full((200,200,3), desired_color, dtype=np.uint8)

# read image
img = cv2.imread("lady2.jpg")

# read mask
mask = cv2.imread("lady2_mask.png", cv2.IMREAD_GRAYSCALE)


# convert input to HSV and separate channels
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_img)

# dilate mask to make it better fit the lips
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, kernel)

# get average bgr color of lips as array
ave_color = cv2.mean(img, mask=mask)[:3]
print(ave_color)

# create 1 pixel image of average color
ave_color_img = np.full((1,1,3), ave_color, dtype=np.float32)
print(ave_color_img)

# create 1 pixel image of desired color
desired_color_img = np.full((1,1,3), desired_color, dtype=np.float32)
print(desired_color_img)

# convert desired color image to HSV
desired_hsv = cv2.cvtColor(desired_color_img, cv2.COLOR_BGR2HSV)

# convert average color image to HSV
ave_hsv = cv2.cvtColor(ave_color_img, cv2.COLOR_BGR2HSV)

# compute difference in HSV color arrays and separate channel values
diff_hsv = desired_hsv - ave_hsv
diff_h, diff_s, diff_v = cv2.split(diff_hsv)
print(diff_hsv)

# shift input image color
hnew = np.mod(h + diff_h/2, 180).astype(np.uint8)
snew = (s + diff_s).clip(0,255).astype(np.uint8)
vnew = (v + diff_v).clip(0,255).astype(np.uint8)

# merge channels back to HSV image
hsv_new = cv2.merge([hnew,snew,vnew])

# convert new HSV image to BGR
new_img = cv2.cvtColor(hsv_new, cv2.COLOR_HSV2BGR)

# antialias mask, convert to float in range 0 to 1 and make 3-channels
mask = cv2.GaussianBlur(mask, (0,0), sigmaX=5, sigmaY=5, borderType = cv2.BORDER_DEFAULT)
mask = skimage.exposure.rescale_intensity(mask, in_range=(128,255), out_range=(0,1)).astype(np.float32)
mask = cv2.merge([mask,mask,mask])

# combine img and new_img using mask 
result = (img * (1 - mask) + new_img * mask)
result = result.clip(0,255).astype(np.uint8)

# save result
cv2.imwrite('lady2_swatch.png', swatch)
cv2.imwrite('lady2_recolor.jpg', result)

cv2.imshow('swatch', swatch)
cv2.imshow('mask', mask)
cv2.imshow('new_img', new_img)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()

Pink Result:

enter image description here

Blue Result:

enter image description here

Green Result:

enter image description here

Upvotes: -1

fmw42
fmw42

Reputation: 53164

Here is a slight variation and I hope improvement over my method posted in How can I change the color of the lip that got its landmarks without disturbing its texture? in opencv python.

The main differences are: 1) the mask is provided, but does not match the lips as well as possible. So I dilate them a little. 2) I change the cv2.add to cv2.addWeighted to blend the new color with the lips. The weight on the new color determines the amount of lip color applied. I mixed the image (weight 1) with the new color (weight 0.75). Change weight 0.75 as desired. 3) I increased the anti-alias distance on the mask for doing a soft blend at the edges of the lips.

Input:

enter image description here

Mask:

enter image description here

import cv2
import numpy as np
import skimage.exposure

# specify desired bgr color for lips and make into array
desired_color = (170, 130, 255)
desired_color = np.asarray(desired_color, dtype=np.float64)

# create swatch
swatch = np.full((200,200,3), desired_color, dtype=np.uint8)

# read image
img = cv2.imread("lady2.jpg")

# read mask
mask = cv2.imread("lady2_mask.png", cv2.IMREAD_GRAYSCALE)

# dilate mask to make it better fit the lips
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, kernel)

# get average bgr color of lips
ave_color = cv2.mean(img, mask=mask)[:3]
print(ave_color)

# compute difference colors and make into an image the same size as input
diff_color = desired_color - ave_color
diff_color = np.full_like(img, diff_color, dtype=np.uint8)

# shift input image color
new_img = cv2.addWeighted(img, 1.0, diff_color, 0.75, 0)

# antialias mask, convert to float in range 0 to 1 and make 3-channels
mask = cv2.GaussianBlur(mask, (0,0), sigmaX=15, sigmaY=15, borderType = cv2.BORDER_DEFAULT)
mask = skimage.exposure.rescale_intensity(mask, in_range=(128,255), out_range=(0,1)).astype(np.float32)
mask = cv2.merge([mask,mask,mask])

# combine img and new_img using mask
result = (img * (1 - mask) + new_img * mask)
result = result.clip(0,255).astype(np.uint8)

# save result
cv2.imwrite('lady2_swatch.png', swatch)
cv2.imwrite('lady2_mask.png', (255*mask).clip(0,255).astype(np.uint8))
cv2.imwrite('lady2_recolor.jpg', result)

cv2.imshow('swatch', swatch)
cv2.imshow('mask', mask)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()

New Lip Color Swatch:

enter image description here

Result:

enter image description here

Upvotes: 1

Related Questions