Fadil
Fadil

Reputation: 51

How to measure the similarity and find out which image matches the corresponding image that have the highest similarity?

I have two folders of images, each containing about 145 images. I want to measure the similarity and find out which image matches the corresponding image and which images have the highest similarity. Let us assume that, depending on the distance between the centers of these two points. But the metric to measure the similarity should not depend on the size of the diameter of the two points that appear in each of these images.

I attached an image to show you the sample of data.

This is the code that I have used to find similarities depending on Euclidean distance!

 from skimage.metrics import structural_similarity as ssim
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import scipy.spatial.distance as dist
import os
np.seterr(divide='ignore', invalid='ignore')



images_list = []
images_list2 = []
resultlist = []
Maxlist= []
SIZE = 512


path = r"C:\Users\fadil\Desktop\Project\Segmentation\Exp1_v1_30062021 - Copy-2\CT&DCT\CT\U_Net_Segmentation_CT\result1\*.*"
for file in glob.glob(path):
    print(file)  # just stop here to see all file names printed
    img = cv2.imread(file, 0)  # now, we can read each file since we have the full path
    images_list.append(img)
images_list = np.array(images_list)

path3 = r"C:\Users\fadil\Desktop\Project\Segmentation\Exp1_v1_30062021 - Copy-2\CT&DCT\CT\U_Net_Segmentation_CT\result2\*.*"
for file2 in glob.glob(path3):
    print(file2)  # just stop here to see all file names printed
    img2 = cv2.imread(file2, 0)  # now, we can read each file since we have the full path
    images_list2.append(img2)
images_list2 = np.array(images_list2)




img_num:int= 1
for img2 in range(images_list2.shape[0]):
    input_img2 = images_list2[img2, :, :]
    print(img2)
    img_number = 1
    resultlist.clear()
    for image in range(images_list.shape[0]):
               input_img = images_list[image, :, :]  # Grey images. For color add another dim.
               s = dist.euclidean(input_img, input_img2)
               resultlist.append(float(s))
               img_number += 1
    print(resultlist)

    with open(r"C:\Users\fadil\Desktop\Project\Segmentation\Exp1_v1_30062021 - Copy-2\CT&DCT\CT\Similartiy Data result  euclidean\file"+str(img_num)+".txt", "w") as f:
        for M in resultlist:
            f.write(str(M) + "\n")

 #print(len(resultlist))
    ll=max(resultlist)
    Maxlist.append(float(ll))
    print(ll)
    print(Maxlist)
    MM=resultlist.index(ll)
    print (MM)
    list=images_list[MM]
    ###############################################

    from skimage import data, img_as_float
    im1 = images_list[MM]
    im2=input_img2
    im1 = img_as_float(im1)
    rows, cols = im1.shape
    im2 = img_as_float(im2)
    rows, cols = im2.shape
    fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 4),
                             sharex=True, sharey=True)
    ax = axes.ravel()

    mse_none = mean_squared_error(im1, im2)
    ssim_none = ssim(im1, im1, data_range=im1.max() - im1.min())

    mse_noise = mean_squared_error(im1, im2)
    ssim_noise = ssim(im1, im2,
                      data_range=im2.max() - im2.min())

    mse_const = mean_squared_error(im1, im2)
    ssim_const = ssim(im1, im2,
                      data_range=im2.max() - im2.min())

    label = 'MSE: {:.16f},\n SSIM: {:.16f},\n MM'

    ax[0].imshow(im1, cmap=plt.cm.gray, vmin=0, vmax=1)
    ax[0].set_xlabel(label.format(mse_none, ssim_none))
    ax[0].set_title('GT CT')

    ax[1].imshow(im2, cmap=plt.cm.gray, vmin=0, vmax=1)
    ax[1].set_xlabel(label.format(mse_noise, ssim_noise))
    ax[1].set_title('RE CT')





    ###############################################
    cv2.imwrite(r"C:\Users\fadil\Desktop\Project\Segmentation\Exp1_v1_30062021 - Copy-2\CT&DCT\CT\Similartiy Data result  euclidean\image"+str(img_num)+".jpg",list)
    cv2.imwrite(r"C:\Users\fadil\Desktop\Project\Segmentation\Exp1_v1_30062021 - Copy-2\CT&DCT\CT\Similartiy Data result  euclidean\image" + str(img_num) + ".png", input_img2)
    img_num += 1
#print(Maxlist)
#plt.plot(Maxlist, Maxlist)
#plt.show()
with open(r"C:\Users\fadil\Desktop\Project\Segmentation\Exp1_v1_30062021 - Copy-2\CT&DCT\CT\Similartiy Data result  euclidean\file"+str(img_num)+".txt", "w") as f:
     for N in Maxlist:
        f.write(str(N) + "\n")
import pickle
plt.tight_layout()
plt.show()

cv2.waitKey(0)
cv2.destroyAllWindows()

I have received the following error code:

Traceback (most recent call last):
  File "C:/Users/fadil/Desktop/Project/Segmentation/Exp1_v1_30062021 - Copy-2/CT&DCT/CT/U_Net_Segmentation_CT/Similarity euclidean.py", line 45, in <module>
    s = dist.euclidean(input_img, input_img2)
  File "C:\Users\fadil\.conda\envs\tf\lib\site-packages\scipy\spatial\distance.py", line 626, in euclidean
    return minkowski(u, v, p=2, w=w)
  File "C:\Users\fadil\.conda\envs\tf\lib\site-packages\scipy\spatial\distance.py", line 513, in minkowski
    u = _validate_vector(u)
  File "C:\Users\fadil\.conda\envs\tf\lib\site-packages\scipy\spatial\distance.py", line 340, in _validate_vector
    raise ValueError("Input vector should be 1-D.")
ValueError: Input vector should be 1-D.

Process finished with exit code 1

Upvotes: 1

Views: 1393

Answers (1)

ashah
ashah

Reputation: 269

Image matching can be done by applying Keypoint Extraction algorithms. One such algorithm is ORB, which is a part of opencv library. You can find more information about ORB here.

A simple code to match two images using ORB is as follows. You can amend the code as per your requirements and loop through all the images to find matching one.

import numpy as np
import cv2
  
     
query_img = cv2.imread('query.jpg') 
train_img = cv2.imread('train.jpg')
  
# Convert images into grayscale
query_img_bw = cv2.cvtColor(query_img,cv2.COLOR_BGR2GRAY)
train_img_bw = cv2.cvtColor(train_img, cv2.COLOR_BGR2GRAY)
  
# Initialize the ORB detector algorithm
orb = cv2.ORB_create()
  
# Now detect the keypoints and compute

queryKeypoints, queryDescriptors = orb.detectAndCompute(query_img_bw,None)
trainKeypoints, trainDescriptors = orb.detectAndCompute(train_img_bw,None)
 
#match the keypoints
matcher = cv2.BFMatcher()
matches = matcher.match(queryDescriptors,trainDescriptors)
  
# draw the matches to the final image
# containing both the images 

final_img = cv2.drawMatches(query_img, queryKeypoints,
train_img, trainKeypoints, matches[:20],None)
  
final_img = cv2.resize(final_img, (1000,650))
 
# Show the final image
cv2.imshow("Matches", final_img)
cv2.waitKey(3000)

Upvotes: 3

Related Questions