Reputation: 2260
I found this code to get a skeletonized image. I have a circle image (https://docs.google.com/file/d/0ByS6Z5WRz-h2RXdzVGtXUTlPSGc/edit?usp=sharing).
img = cv2.imread(nomeimg,0)
size = np.size(img)
skel = np.zeros(img.shape,np.uint8)
ret,img = cv2.threshold(img,127,255,0)
element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
done = False
while( not done):
eroded = cv2.erode(img,element)
temp = cv2.dilate(eroded,element)
temp = cv2.subtract(img,temp)
skel = cv2.bitwise_or(skel,temp)
img = eroded.copy()
zeros = size - cv2.countNonZero(img)
if zeros==size:
done = True
print("skel")
print(skel)
cv2.imshow("skel",skel)
cv2.waitKey(0)
The problem is that image result is not a "skeleton" but a set of points! My purpose was to extract contour perimeter after i have skeletonized the image. How can I edit my code to solve it? It is correct using cv2.findContours to find skeleton circle?
Upvotes: 3
Views: 5414
Reputation: 97331
You need to reverse white & black, and fill all the holes by call cv2.dilate
first:
import numpy as np
import cv2
img = cv2.imread("e_5.jpg",0)
size = np.size(img)
skel = np.zeros(img.shape,np.uint8)
ret,img = cv2.threshold(img,127,255,0)
element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
img = 255 - img
img = cv2.dilate(img, element, iterations=3)
done = False
while( not done):
eroded = cv2.erode(img,element)
temp = cv2.dilate(eroded,element)
temp = cv2.subtract(img,temp)
skel = cv2.bitwise_or(skel,temp)
img = eroded.copy()
zeros = size - cv2.countNonZero(img)
if zeros==size:
done = True
Here is the result:
But, the result is not good, because there are many gaps. The following algorithm is better, it uses functions in scipy.ndimage.morphology
:
import scipy.ndimage.morphology as m
import numpy as np
import cv2
def skeletonize(img):
h1 = np.array([[0, 0, 0],[0, 1, 0],[1, 1, 1]])
m1 = np.array([[1, 1, 1],[0, 0, 0],[0, 0, 0]])
h2 = np.array([[0, 0, 0],[1, 1, 0],[0, 1, 0]])
m2 = np.array([[0, 1, 1],[0, 0, 1],[0, 0, 0]])
hit_list = []
miss_list = []
for k in range(4):
hit_list.append(np.rot90(h1, k))
hit_list.append(np.rot90(h2, k))
miss_list.append(np.rot90(m1, k))
miss_list.append(np.rot90(m2, k))
img = img.copy()
while True:
last = img
for hit, miss in zip(hit_list, miss_list):
hm = m.binary_hit_or_miss(img, hit, miss)
img = np.logical_and(img, np.logical_not(hm))
if np.all(img == last):
break
return img
img = cv2.imread("e_5.jpg",0)
ret,img = cv2.threshold(img,127,255,0)
element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
img = 255 - img
img = cv2.dilate(img, element, iterations=3)
skel = skeletonize(img)
imshow(skel, cmap="gray", interpolation="nearest")
The result is:
Upvotes: 5
Reputation: 3852
Your skeletonization algorithm calculates the skeleton of a white area:
To fix your code, you can change the parameters for your threshold function:
ret,img = cv2.threshold(img,240,255,1)
The parameters are described here.
Upvotes: 0