Reputation: 13
I am trying to write an algorithm that calculates the mean value of certain neighboring elements of a 2D array.
I would like to see if it is possible to speed it up using Cython, but it is the first time I use it myself.
Python version:
import numpy as np
def clamp(val, minval, maxval):
return max(minval, min(val, maxval))
def filter(arr, r):
M = arr.shape[0]
N = arr.shape[1]
new_arr = np.zeros([M, N], dtype=np.int)
for x in range(M):
for y in range(N):
# Corner elements
p1 = clamp(x-r, 0, M)
p2 = clamp(y-r, 0, N)
p3 = clamp(y+r, 0, N-1)
p4 = clamp(x+r, 0, M-1)
nbr_elements = (p3-p2-1)*2+(p4-p1-1)*2+4
tmp = 0
# End points
tmp += arr[p1, p2]
tmp += arr[p1, p3]
tmp += arr[p4, p2]
tmp += arr[p4, p3]
# The rest
tmp += sum(arr[p1+1:p4, p2])
tmp += sum(arr[p1+1:p4, p3])
tmp += sum(arr[p1, p2+1:p3])
tmp += sum(arr[p4, p2+1:p3])
new_arr[x, y] = tmp/nbr_elements
return new_arr
and my attempt of a Cython implementation. I found out that max/min/sum was faster if you re-implemented them, rather than using the python version
Cython version:
from __future__ import division
import numpy as np
cimport numpy as np
DTYPE = np.int
ctypedef np.int_t DTYPE_t
cdef inline int int_max(int a, int b): return a if a >= b else b
cdef inline int int_min(int a, int b): return a if a <= b else b
def clamp(int val, int minval, int maxval):
return int_max(minval, int_min(val, maxval))
def cython_sum(np.ndarray[DTYPE_t, ndim=1] y):
cdef int N = y.shape[0]
cdef int x = y[0]
cdef int i
for i in xrange(1, N):
x += y[i]
return x
def filter(np.ndarray[DTYPE_t, ndim=2] arr, int r):
cdef M = im.shape[0]
cdef N = im.shape[1]
cdef np.ndarray[DTYPE_t, ndim=2] new_arr = np.zeros([M, N], dtype=DTYPE)
cdef int p1, p2, p3, p4, nbr_elements, tmp
for x in range(M):
for y in range(N):
# Corner elements
p1 = clamp(x-r, 0, M)
p2 = clamp(y-r, 0, N)
p3 = clamp(y+r, 0, N-1)
p4 = clamp(x+r, 0, M-1)
nbr_elements = (p3-p2-1)*2+(p4-p1-1)*2+4
tmp = 0
# End points
tmp += arr[p1, p2]
tmp += arr[p1, p3]
tmp += arr[p4, p2]
tmp += arr[p4, p3]
# The rest
tmp += cython_sum(arr[p1+1:p4, p2])
tmp += cython_sum(arr[p1+1:p4, p3])
tmp += cython_sum(arr[p1, p2+1:p3])
tmp += cython_sum(arr[p4, p2+1:p3])
new_arr[x, y] = tmp/nbr_elements
return new_arr
I made a test script:
import time
import numpy as np
import square_mean_py
import square_mean_cy
N = 500
arr = np.random.randint(15, size=(N, N))
r = 8
# Timing
t = time.time()
res_py = square_mean_py.filter(arr, r)
print time.time()-t
t = time.time()
res_cy = square_mean_cy.filter(arr, r)
print time.time()-t
Which prints
9.61458301544
1.44476890564
that is a speed-up of approx. 7 times. I have seen a lot of Cython implmentations that yield a lot better speed-up, and so I was thinking that maybe some of you see a a potential way of speeding up the algorithm?
Upvotes: 1
Views: 913
Reputation: 2385
There are a few issues with your Cython script:
x, y, M
and N
which are used in ranges. cdef
ed the two functions cython_sum
and clamp
since you don't need them at Python level. im
that appears in filter
function? I am assuming you meant arr
.Fixing those I will rewrite/modify your Cython script like so:
from __future__ import division
import numpy as np
cimport numpy as np
from cython cimport boundscheck, wraparound
DTYPE = np.int
ctypedef np.int_t DTYPE_t
cdef inline int int_max(int a, int b): return a if a >= b else b
cdef inline int int_min(int a, int b): return a if a <= b else b
cdef int clamp3(int val, int minval, int maxval):
return int_max(minval, int_min(val, maxval))
@boundscheck(False)
cdef int cython_sum2(DTYPE_t[:] y):
cdef int N = y.shape[0]
cdef int x = y[0]
cdef int i
for i in range(1, N):
x += y[i]
return x
@boundscheck(False)
@wraparound(False)
def filter3(DTYPE_t[:,::1] arr, int r):
cdef int M = arr.shape[0]
cdef int N = arr.shape[1]
cdef np.ndarray[DTYPE_t, ndim=2, mode='c'] \
new_arr = np.zeros([M, N], dtype=DTYPE)
cdef int p1, p2, p3, p4, nbr_elements, tmp, x, y
for x in range(M):
for y in range(N):
# Corner elements
p1 = clamp3(x-r, 0, M)
p2 = clamp3(y-r, 0, N)
p3 = clamp3(y+r, 0, N-1)
p4 = clamp3(x+r, 0, M-1)
nbr_elements = (p3-p2-1)*2+(p4-p1-1)*2+4
tmp = 0
# End points
tmp += arr[p1, p2]
tmp += arr[p1, p3]
tmp += arr[p4, p2]
tmp += arr[p4, p3]
# The rest
tmp += cython_sum2(arr[p1+1:p4, p2])
tmp += cython_sum2(arr[p1+1:p4, p3])
tmp += cython_sum2(arr[p1, p2+1:p3])
tmp += cython_sum2(arr[p4, p2+1:p3])
new_arr[x, y] = <int>(tmp/nbr_elements)
return new_arr
Here is the timing on my machine:
arr = np.random.randint(15, size=(500, 500))
Original (Python) version: 7.34 s
Your Cython version: 1.98 s
New Cython version: 0.0323 s
That is almost 60 times speed up over your Cython script and over 200 times speed-up over the original Python script.
Upvotes: 4