Reputation: 13
I want to stitch two images having a partial overlapping region to do this I am using OpenCV. After matching of keypoints using orb and flann and finding the homography, I performed a warp perspective, but I don't get the required result. I don't understand what I am doing wrong I am very new to this please help. I am attaching the code
# -*- coding: utf-8 -*-
import numpy as np
import cv2
from matplotlib import pyplot as plt
def rotated(img):
(h,w)=img.shape[:2]
center=(w / 2,h / 2)
m=cv2.getRotationMatrix2D(center,359.5,1)
rotate=cv2.warpAffine(img,m,(w,h))
plt.imshow(rotate)
plt.show()
return rotate
d=(600,400)
img2= cv2.imread('G:/mca6thsem/bulletsimages/a31/a22/5500k/land2_part1.jpeg')
img1 = cv2.imread('G:/mca6thsem/bulletsimages/a31/a22/5500k/land2prt2.jpeg')
img1=cv2.resize(img1,d)
img1=rotated(img1)
img2=cv2.resize(img2,d)
img2=rotated(img2)
orb = cv2.ORB_create()
kp, des= orb.detectAndCompute(img1, None)
kp1, des1 = orb.detectAndCompute(img2, None)
imgor = cv2.drawKeypoints(img1,kp,None,color=(0,255,0))
imgor1 = cv2.drawKeypoints(img2,kp1,None,color=(0,255,0))
#flann
FLANN_INDEX_LSH=0
index_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
search_params = dict(checks=30)
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(np.float32(des),np.float32(des1),k=2)
good = []
for m,n in matches:
if m.distance < 0.9*n.distance:
good.append(m)
if len(good)>10:
src_pts = np.float32([ kp[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp1[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h = img1.shape[1]
w= img1.shape[0]
dst = cv2.warpPerspective(img2,M,(img2.shape[1] + img1.shape[1], img2.shape[0]))
dst[0:img1.shape[0],0:img1.shape[1]]=img1
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv2.drawMatches(img1,kp,img2,kp1,good,None,**draw_params)
plt.imshow(img3, 'gray'),plt.show()
#im1Reg=cv2.resize(im1Reg,d)
plt.imshow(dst, 'gray'),plt.show()
Upvotes: 0
Views: 659
Reputation: 1170
Your warpPerspective is working correctly, but you aren't getting enough keypoint matches for your warp to turn out like you want. Try using a different keypoint detector like Brisk, KAZE, or AKAZE, and different keypoint matchers, like Brute Force or K-Nearest Neighbors.
Upvotes: 2