Reputation: 464
I am stitching multiple images. While stitching two images it is showing dashed black line in between stitching like below.
Has anyone knows about this how I can remove or get rid of this black dashed line ?
main part of stitching code which stitches two images and calls next image with result of previous stitched images untill all images gets over:
detector = cv2.xfeatures2d.SURF_create(400)
gray1 = cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)
ret1, mask1 = cv2.threshold(gray1,1,255,cv2.THRESH_BINARY)
kp1, descriptors1 = detector.detectAndCompute(gray1,mask1)
gray2 = cv2.cvtColor(image2,cv2.COLOR_BGR2GRAY)
ret2, mask2 = cv2.threshold(gray2,1,255,cv2.THRESH_BINARY)
kp2, descriptors2 = detector.detectAndCompute(gray2,mask2)
keypoints1Im = cv2.drawKeypoints(image1, kp1, outImage = cv2.DRAW_MATCHES_FLAGS_DEFAULT, color=(0,0,255))
util.display("KEYPOINTS",keypoints1Im)
keypoints2Im = cv2.drawKeypoints(image2, kp2, outImage = cv2.DRAW_MATCHES_FLAGS_DEFAULT, color=(0,0,255))
util.display("KEYPOINTS",keypoints2Im)
matcher = cv2.BFMatcher()
matches = matcher.knnMatch(descriptors2,descriptors1, k=2)
good = []
for m, n in matches:
if m.distance < 0.55 * n.distance:
good.append(m)
print (str(len(good)) + " Matches were Found")
if len(good) <= 10:
return image1
matches = copy.copy(good)
matchDrawing = util.drawMatches(gray2,kp2,gray1,kp1,matches)
util.display("matches",matchDrawing)
src_pts = np.float32([ kp2[m.queryIdx].pt for m in matches ]).reshape(-1,1,2)
dst_pts = np.float32([ kp1[m.trainIdx].pt for m in matches ]).reshape(-1,1,2)
A = cv2.estimateRigidTransform(src_pts,dst_pts,fullAffine=False)
if A is None:
HomogResult = cv2.findHomography(src_pts,dst_pts,method=cv2.RANSAC)
H = HomogResult[0]
height1,width1 = image1.shape[:2]
height2,width2 = image2.shape[:2]
corners1 = np.float32(([0,0],[0,height1],[width1,height1],[width1,0]))
corners2 = np.float32(([0,0],[0,height2],[width2,height2],[width2,0]))
warpedCorners2 = np.zeros((4,2))
for i in range(0,4):
cornerX = corners2[i,0]
cornerY = corners2[i,1]
if A is not None: #check if we're working with affine transform or perspective transform
warpedCorners2[i,0] = A[0,0]*cornerX + A[0,1]*cornerY + A[0,2]
warpedCorners2[i,1] = A[1,0]*cornerX + A[1,1]*cornerY + A[1,2]
else:
warpedCorners2[i,0] = (H[0,0]*cornerX + H[0,1]*cornerY + H[0,2])/(H[2,0]*cornerX + H[2,1]*cornerY + H[2,2])
warpedCorners2[i,1] = (H[1,0]*cornerX + H[1,1]*cornerY + H[1,2])/(H[2,0]*cornerX + H[2,1]*cornerY + H[2,2])
allCorners = np.concatenate((corners1, warpedCorners2), axis=0)
[xMin, yMin] = np.int32(allCorners.min(axis=0).ravel() - 0.5)
[xMax, yMax] = np.int32(allCorners.max(axis=0).ravel() + 0.5)
translation = np.float32(([1,0,-1*xMin],[0,1,-1*yMin],[0,0,1]))
warpedResImg = cv2.warpPerspective(image1, translation, (xMax-xMin, yMax-yMin))
if A is None:
fullTransformation = np.dot(translation,H) #again, images must be translated to be 100% visible in new canvas
warpedImage2 = cv2.warpPerspective(image2, fullTransformation, (xMax-xMin, yMax-yMin))
else:
warpedImageTemp = cv2.warpPerspective(image2, translation, (xMax-xMin, yMax-yMin))
warpedImage2 = cv2.warpAffine(warpedImageTemp, A, (xMax-xMin, yMax-yMin))
result = np.where(warpedImage2 != 0, warpedImage2, warpedResImg)
Please help me out. Thanks.
Edit:
Input image1(resized)
Input image2(resized)
Result(resized)
Update:
Result after @fmw42 anwser:
Upvotes: 4
Views: 2386
Reputation: 3583
I will focus on one of the cuts as a prove of concept. I agree with the comments that your code is a bit lengthy and hard to work with. So step one is to glue the pictures myself.
import cv2
import matplotlib.pyplot as plt
import numpy as np
import itertools
from scipy.interpolate import UnivariateSpline
upper_image = cv2.cvtColor(cv2.imread('yQv6W.jpg'), cv2.COLOR_BGR2RGB)/255
lower_image = cv2.cvtColor(cv2.imread('zoWJv.jpg'), cv2.COLOR_BGR2RGB)/255
result_image = np.zeros((466+139,700+22,3))
result_image[139:139+lower_image.shape[0],:lower_image.shape[1]] = lower_image
result_image[0:upper_image.shape[0], 22:22+upper_image.shape[1]] = upper_image
plt.imshow(result_image)
Ok no dashed black line but I admit not perfect either. So the next step is to align at least the street and the little way at the very right of the picture. For that I will need to shrink the picture to non integer size and turn that back into a grid. I will use a knn
like method for that.
Edit: As requested in the comments I'll explain the shrinking a bit more detailed since it would have to be done by hand again for an other stitching. The magic happens in the line (I replaced n by it's value)
f = UnivariateSpline([0,290,510,685],[0,310,530,700])
I tried first to scale the lower picture in the x-direction to make the little way on the very right fit the upper image. Unfortunately then the street wouldn't fit the street. So what I do is scale down according to the above function. At pixel 0 I still want to have pixel zero, at 290 I want to have what used to be at 310 and so on. Notice that 290,510 and 310,530 are the new respectively old x-coordinates of street and way at the hight of the gluing.
class Image_knn():
def fit(self, image):
self.image = image.astype('float')
def predict(self, x, y):
image = self.image
weights_x = [(1-(x % 1)).reshape(*x.shape,1), (x % 1).reshape(*x.shape,1)]
weights_y = [(1-(y % 1)).reshape(*x.shape,1), (y % 1).reshape(*x.shape,1)]
start_x = np.floor(x).astype('int')
start_y = np.floor(y).astype('int')
return sum([image[np.clip(np.floor(start_x + x), 0, image.shape[0]-1).astype('int'),
np.clip(np.floor(start_y + y), 0, image.shape[1]-1).astype('int')] * weights_x[x]*weights_y[y]
for x,y in itertools.product(range(2),range(2))])
image_model = Image_knn()
image_model.fit(lower_image)
n = 685
f = UnivariateSpline([0,290,510,n],[0,310,530,700])
np.linspace(0,lower_image.shape[1],n)
yspace = f(np.arange(n))
result_image = np.zeros((466+139,700+22, 3))
a,b = np.meshgrid(np.arange(0,lower_image.shape[0]), yspace)
result_image[139:139+lower_image.shape[0],:n] = np.transpose(image_model.predict(a,b), [1,0,2])
result_image[0:upper_image.shape[0], 22:22+upper_image.shape[1]] = upper_image
plt.imshow(result_image, 'gray')
Much better, no black line but maybe we can still smoothen the cut a bit. I figured if I take convex combinations of upper and lower image at the cut it would look much better.
result_image = np.zeros((466+139,700+22,3))
a,b = np.meshgrid(np.arange(0,lower_image.shape[0]), yspace)
result_image[139:139+lower_image.shape[0],:n] = np.transpose(image_model.predict(a,b), [1,0,2])
transition_range = 10
result_image[0:upper_image.shape[0]-transition_range, 22:22+upper_image.shape[1]] = upper_image[:-transition_range,:]
transition_pixcels = upper_image[-transition_range:,:]*np.linspace(1,0,transition_range).reshape(-1,1,1)
result_image[upper_image.shape[0]-transition_range:upper_image.shape[0], 22:22+upper_image.shape[1]] *= np.linspace(0,1,transition_range).reshape(-1,1,1)
result_image[upper_image.shape[0]-transition_range:upper_image.shape[0], 22:22+upper_image.shape[1]] += transition_pixcels
plt.imshow(result_image)
plt.savefig('text.jpg')
For completeness here also a version gluing at the top with a tilted bottom. I attach the pictures at a point and turn around that fixed point
by a few degrees. And again I am correcting some very slight non alignements in the end. To get the coordinates of that I am using jupyter lab and %matplotlib widget
.
fixed_point_upper = np.array([139,379])
fixed_point_lower = np.array([0,400])
angle = np.deg2rad(2)
down_dir = np.array([np.sin(angle+np.pi/2),np.cos(angle+np.pi/2)])
right_dir = np.array([np.sin(angle),np.cos(angle)])
result_image_height = np.ceil((fixed_point_upper+lower_image.shape[0]*down_dir+(lower_image.shape[1]-fixed_point_lower[1])*right_dir)[0]).astype('int')
right_shift = np.ceil(-(fixed_point_upper+lower_image.shape[0]*down_dir-fixed_point_lower[1]*right_dir)[1]).astype('int')
result_image_width = right_shift+upper_image.shape[1]
result_image = np.zeros([result_image_height, result_image_width,3])
fixed_point_result = np.array([fixed_point_upper[0],fixed_point_upper[1]+right_shift])
lower_top_left = fixed_point_result-fixed_point_lower[1]*right_dir
result_image[:upper_image.shape[0],-upper_image.shape[1]:] = upper_image
# calculate points in lower_image
result_coordinates = np.stack(np.where(np.ones(result_image.shape[:2],dtype='bool')),axis=1)
lower_coordinates = np.stack([(result_coordinates-lower_top_left)@down_dir,(result_coordinates-lower_top_left)@right_dir],axis=1)
mask = (0 <= lower_coordinates[:,0]) & (0 <= lower_coordinates[:,1]) \
& (lower_coordinates[:,0] <= lower_image.shape[0]) & (lower_coordinates[:,1] <= lower_image.shape[1])
result_coordinates = result_coordinates[mask]
lower_coordinates = lower_coordinates[mask]
# COORDINATES ON RESULT IMAGE
# left street 254
# left sides of houses 295, 420, 505
# right small street, both sides big street 590,635,664
# COORDINATES ON LOWER IMAGE
# left street 234
# left sides of houses 280, 399, 486
# right small street, both sides big street 571, 617, 642
def coord_transform(y):
return (y-lower_top_left[1])/right_dir[1]
y = tuple(map(coord_transform, [lower_top_left[1], 254, 295, 420, 505, 589, 635, 664]))
f = UnivariateSpline(y,[0, 234, 280, 399, 486, 571, 617, 642])
result_image[result_coordinates[:,0],result_coordinates[:,1]] = image_model.predict(lower_coordinates[:,0],np.vectorize(f)(lower_coordinates[:,1]))
Upvotes: 0
Reputation: 53081
The problem arises because when you do the warping, the border pixels of the image get resampled/interpolated with black background pixels. This leaves a non-zero border around your warped image of varying values that show as your dashed dark line when merged with the other image. This happens because your merge test is binary, tested with != 0.
So one simple thing you can do is mask the warped image in Python/OpenCV to get its bounds from the black background outside the image and then erode the mask. Then use the mask to erode the image boundary. This can be achieve by the following changes to your last lines of code presented as follows:
if A is None:
fullTransformation = np.dot(translation,H) #again, images must be translated to be 100% visible in new canvas
warpedImage2 = cv2.warpPerspective(image2, fullTransformation, (xMax-xMin, yMax-yMin))
else:
warpedImageTemp = cv2.warpPerspective(image2, translation, (xMax-xMin, yMax-yMin))
warpedImage2 = cv2.warpAffine(warpedImageTemp, A, (xMax-xMin, yMax-yMin))
mask2 = cv2.threshold(warpedImage2, 0, 255, cv2.THRESH_BINARY)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
mask2 = cv2.morphologyEx(mask2, cv2.MORPH_ERODE, kernel)
warpedImage2[mask2==0] = 0
result = np.where(warpedImage2 != 0, warpedImage2, warpedResImg)
I simply added the following code lines to your code:
mask2 = cv2.threshold(warpedImage2, 0, 255, cv2.THRESH_BINARY)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
mask2 = cv2.morphologyEx(mask2, cv2.MORPH_ERODE, kernel)
warpedImage2[mask2==0] = 0
You can increase the kernel size if desired to erode more.
Here is the before and after. Note that I did not have SURF and tried to use ORB, which did not align well. So your roads do not align. But the mismatch due to misalignment emphasizes the issue as it shows the dashed jagged black border line. The fact that ORB did not work or I do not have proper code from above to make it align is not important. The masking does what I think you want and is extendable to the processing of all your images.
The other thing that can be done in combination with the above is to feather the mask and then ramp blend the two images using the mask. This is done by blurring the mask (a bit more) and then stretching the values over the inside half of the blurred border and making the ramp only on the outside half of the blurred border. Then blend the two images with the ramped mask and its inverse as follows for the same code as above.
if A is None:
fullTransformation = np.dot(translation,H) #again, images must be translated to be 100% visible in new canvas
warpedImage2 = cv2.warpPerspective(image2, fullTransformation, (xMax-xMin, yMax-yMin))
else:
warpedImageTemp = cv2.warpPerspective(image2, translation, (xMax-xMin, yMax-yMin))
warpedImage2 = cv2.warpAffine(warpedImageTemp, A, (xMax-xMin, yMax-yMin))
mask2 = cv2.threshold(warpedImage2, 0, 255, cv2.THRESH_BINARY)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
mask2 = cv2.morphologyEx(mask2, cv2.MORPH_ERODE, kernel)
warpedImage2[mask2==0] = 0
mask2 = cv2.blur(mask2, (5,5))
mask2 = skimage.exposure.rescale_intensity(mask2, in_range=(127.5,255), out_range=(0,255)).astype(np.float64)
result = (warpedImage2 * mask2 + warpedResImg * (255 - mask2))/255
result = result.clip(0,255).astype(np.uint8)
cv2.imwrite("image1_image2_merged3.png", result)
The result when compared to the original composite is as follows:
ADDITION
I have corrected my ORB code to reverse the use of images and now it aligns. So here are all 3 techniques: the original, the one that only uses a binary mask and the one that uses a ramped mask for blending (all as described above).
ADDITION2
Here are the 3 requested images: original, binary masked, ramped mask blending.
Here is my ORB code for the last version above
I tried to change as little as possible from your code, except I had to use ORB and I had to swap the names image1 and image2 near the end.
import cv2
import matplotlib.pyplot as plt
import numpy as np
import itertools
from scipy.interpolate import UnivariateSpline
from skimage.exposure import rescale_intensity
image1 = cv2.imread("image1.jpg")
image2 = cv2.imread("image2.jpg")
gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(gray1, None)
keypoints2, descriptors2 = orb.detectAndCompute(gray2, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(image1, keypoints1, image2, keypoints2, matches, None)
cv2.imwrite("/Users/fred/desktop/image1_image2_matches.png", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
print(points1)
print("")
print(points2)
A = cv2.estimateRigidTransform(points1,points2,fullAffine=False)
#print(A)
if A is None:
HomogResult = cv2.findHomography(points1,points2,method=cv2.RANSAC)
H = HomogResult[0]
height1,width1 = image1.shape[:2]
height2,width2 = image2.shape[:2]
corners1 = np.float32(([0,0],[0,height1],[width1,height1],[width1,0]))
corners2 = np.float32(([0,0],[0,height2],[width2,height2],[width2,0]))
warpedCorners2 = np.zeros((4,2))
# project corners2 into domain of image1 from A affine or H homography
for i in range(0,4):
cornerX = corners2[i,0]
cornerY = corners2[i,1]
if A is not None: #check if we're working with affine transform or perspective transform
warpedCorners2[i,0] = A[0,0]*cornerX + A[0,1]*cornerY + A[0,2]
warpedCorners2[i,1] = A[1,0]*cornerX + A[1,1]*cornerY + A[1,2]
else:
warpedCorners2[i,0] = (H[0,0]*cornerX + H[0,1]*cornerY + H[0,2])/(H[2,0]*cornerX + H[2,1]*cornerY + H[2,2])
warpedCorners2[i,1] = (H[1,0]*cornerX + H[1,1]*cornerY + H[1,2])/(H[2,0]*cornerX + H[2,1]*cornerY + H[2,2])
allCorners = np.concatenate((corners1, warpedCorners2), axis=0)
[xMin, yMin] = np.int32(allCorners.min(axis=0).ravel() - 0.5)
[xMax, yMax] = np.int32(allCorners.max(axis=0).ravel() + 0.5)
translation = np.float32(([1,0,-1*xMin],[0,1,-1*yMin],[0,0,1]))
warpedResImg = cv2.warpPerspective(image2, translation, (xMax-xMin, yMax-yMin))
if A is None:
fullTransformation = np.dot(translation,H) #again, images must be translated to be 100% visible in new canvas
warpedImage2 = cv2.warpPerspective(image2, fullTransformation, (xMax-xMin, yMax-yMin))
else:
warpedImageTemp = cv2.warpPerspective(image1, translation, (xMax-xMin, yMax-yMin))
warpedImage2 = cv2.warpAffine(warpedImageTemp, A, (xMax-xMin, yMax-yMin))
mask2 = cv2.threshold(warpedImage2, 0, 255, cv2.THRESH_BINARY)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
mask2 = cv2.morphologyEx(mask2, cv2.MORPH_ERODE, kernel)
warpedImage2[mask2==0] = 0
mask2 = cv2.blur(mask2, (5,5))
mask2 = rescale_intensity(mask2, in_range=(127.5,255), out_range=(0,255)).astype(np.float64)
result = (warpedImage2 * mask2 + warpedResImg * (255 - mask2))/255
result = result.clip(0,255).astype(np.uint8)
cv2.imwrite("image1_image2_merged2.png", result)
You had the following. Note where the names, image1 and image2 are being used compared to my code above.
warpedResImg = cv2.warpPerspective(image1, translation, (xMax-xMin, yMax-yMin))
if A is None:
fullTransformation = np.dot(translation,H) #again, images must be translated to be 100% visible in new canvas
warpedImage2 = cv2.warpPerspective(image2, fullTransformation, (xMax-xMin, yMax-yMin))
else:
warpedImageTemp = cv2.warpPerspective(image2, translation, (xMax-xMin, yMax-yMin))
warpedImage2 = cv2.warpAffine(warpedImageTemp, A, (xMax-xMin, yMax-yMin))
Upvotes: 5