Reputation: 13
I have received a homework assignment which involves an image as shown below, and my task is to divide this image into two halves and find the differences between them. I have successfully split the image using the code below, and both resulting images have the same size. However, when I use SSIM (Structural Similarity Index) to find the differences between the two images, everything becomes confusing because the similarity between the two images is extremely low. Is there any way to avoid this issue when splitting the image into two halves? Original Image
Here is my code to divide image:
import cv2
def crop_into_two_equal_parts(image):
if image is None:
raise ValueError("Failed to load image.")
# Lấy kích thước hiện tại của ảnh
height, width = image.shape[:2]
# Tính toán điểm cắt theo chiều rộng
split_point = width // 2
# Cắt ảnh thành hai phần bằng nhau
left_image = image[:, :split_point]
right_image = image[:, split_point:]
return left_image, right_image
# Đường dẫn đến file ảnh
image_path = 'C:/Users/HP ZBOOK 15 G3/Desktop/FE_CPV/drive-download-20230629T090921Z-001/1.png'
# Đọc ảnh từ file
image = cv2.imread(image_path)
try:
# Cắt ảnh thành hai phần bằng nhau
left_image, right_image = crop_into_two_equal_parts(image)
# Hiển thị ảnh gốc và hai ảnh đã cắt
cv2.imshow("Original Image", image)
cv2.imshow("Left Image", left_image)
cv2.imshow("Right Image", right_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
left_image_path = 'left_image.jpg'
right_image_path = 'right_image.jpg'
cv2.imwrite(left_image_path, left_image)
cv2.imwrite(right_image_path, right_image)
except ValueError as e:
print("Error:", str(e))
And here it's two image after i divide original image: Left image [Right image](https://i.sstatic.net/ozz50.jpg
SSIM code to spot the difference
from skimage.metrics import structural_similarity
import cv2
import numpy as np
# Load images
before = cv2.imread('left_image.jpg')
after = cv2.imread('right_image.jpg')
# Convert images to grayscale
before_gray = cv2.cvtColor(before, cv2.COLOR_BGR2GRAY)
after_gray = cv2.cvtColor(after, cv2.COLOR_BGR2GRAY)
# Compute SSIM between the two images
(score, diff) = structural_similarity(before_gray, after_gray, full=True)
print("Image Similarity: {:.4f}%".format(score * 100))
# The diff image contains the actual image differences between the two images
# and is represented as a floating point data type in the range [0,1]
# so we must convert the array to 8-bit unsigned integers in the range
# [0,255] before we can use it with OpenCV
diff = (diff * 255).astype("uint8")
diff_box = cv2.merge([diff, diff, diff])
# Threshold the difference image, followed by finding contours to
# obtain the regions of the two input images that differ
thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
mask = np.zeros(before.shape, dtype='uint8')
filled_after = after.copy()
for c in contours:
area = cv2.contourArea(c)
if area > 40:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(before, (x, y), (x + w, y + h), (36,255,12), 2)
cv2.rectangle(after, (x, y), (x + w, y + h), (36,255,12), 2)
cv2.rectangle(diff_box, (x, y), (x + w, y + h), (36,255,12), 2)
cv2.drawContours(mask, [c], 0, (255,255,255), -1)
cv2.drawContours(filled_after, [c], 0, (0,255,0), -1)
cv2.imshow('before', before)
cv2.imshow('after', after)
cv2.imshow('diff', diff)
cv2.imshow('diff_box', diff_box)
cv2.imshow('mask', mask)
cv2.imshow('filled after', filled_after)
cv2.waitKey()
Upvotes: 0
Views: 84