Reputation: 1652
I am trying my hand at image processing and my goal is to output the measurements of a human hand given an image of a human hand as the input. My current thought process is to include a quarter in the image to provide a reference value. Therefore, my input looks like this:
I am currently using scikit-image for image processing, and my code looks like this:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from skimage import data
from skimage.filter import threshold_otsu
from skimage.segmentation import clear_border
from skimage.morphology import label, closing, square
from skimage.measure import regionprops
from skimage.color import label2rgb
from skimage import io, color
#image = data.coins()[50:-50, 50:-50]
filename = io.imread("hand2.JPG")
image = color.rgb2gray(filename)
# apply threshold
thresh = threshold_otsu(image)
bw = closing(image > thresh, square(3))
# remove artifacts connected to image border
cleared = bw.copy()
#clear_border(cleared)
# label image regions
label_image = label(cleared)
borders = np.logical_xor(bw, cleared)
label_image[borders] = -1
image_label_overlay = label2rgb(label_image, image=image)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12))
ax.imshow(image_label_overlay)
for region in regionprops(label_image):
# skip small images
if region.area < 1000:
continue
print "Perimeter: "
print region.perimeter
print "Area: "
print region.area
print ""
# draw rectangle around segments
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
plt.show()
I am able to segment my image into regions, but I don't know how to convert my hand segment into measurements for the individual fingers and width of the hand. I think I'm close, I just don't quite know how to proceed!
EDIT: Maybe I should be using opencv for this?
Upvotes: 0
Views: 2657
Reputation: 7253
It wasn't clear exactly what you wanted as output, but here is my best guess. I used the SLIC segmentation algorithm to identify regions in the image. Based on their region properties (area), I choose the largest two (hand and coin) and display them, along with their principal axes.
import numpy as np
import matplotlib.pyplot as plt
import math
from skimage import io, segmentation, measure, color
image = io.imread("hand2.JPG")
label_image = segmentation.slic(image, n_segments=2)
label_image = measure.label(label_image)
regions = measure.regionprops(label_image)
areas = [r.area for r in regions]
ix = np.argsort(areas)
hand = regions[ix[-1]]
coin = regions[ix[-2]]
selected_labels = np.zeros_like(image[..., 0], dtype=np.uint8)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12))
for n, region in enumerate([hand, coin]):
selected_labels[region.coords[:, 0], region.coords[:, 1]] = n + 2
y0, x0 = region.centroid
orientation = region.orientation
x1 = x0 + math.cos(orientation) * 0.5 * region.major_axis_length
y1 = y0 - math.sin(orientation) * 0.5 * region.major_axis_length
x2 = x0 - math.sin(orientation) * 0.5 * region.minor_axis_length
y2 = y0 - math.cos(orientation) * 0.5 * region.minor_axis_length
ax.plot((x0, x1), (y0, y1), '-r', linewidth=2.5)
ax.plot((x0, x2), (y0, y2), '-r', linewidth=2.5)
ax.plot(x0, y0, '.g', markersize=15)
image_label_overlay = color.label2rgb(selected_labels, image=image, bg_label=0)
ax.imshow(image_label_overlay, cmap='gray')
ax.axis('image')
plt.show()
Upvotes: 1