Reputation: 165
I'm implementing a CNN using Numpy and I can't find a way to implement backpropagation for max-pooling efficiently as I did for forward-propagation. This is what I did in the forward propagation:
def run(self, x, is_training=True):
"""
Applying MaxPooling on `x`
:param x: input - [n_batches, channels, height, width]
:param is_training: a boolean indicating whether training or not
:return: output of the MaxPooling on `x`
"""
n_batch, ch_x, h_x, w_x = x.shape
h_poolwindow, w_poolwindow = self.pool_size
out_h = int((h_x - h_poolwindow) / self.stride) + 1
out_w = int((w_x - w_poolwindow) / self.stride) + 1
windows = as_strided(x,
shape=(n_batch, ch_x, out_h, out_w, *self.pool_size),
strides=(x.strides[0], x.strides[1],
self.stride * x.strides[2],
self.stride * x.strides[3],
x.strides[2], x.strides[3])
)
out = np.max(windows, axis=(4, 5))
if is_training:
self.cache['X'] = x
return out
My current implementation of the backpropagation:
def backprop(self, dA_prev):
"""
Backpropagation in a max-pooling layer
:return: the derivative of the cost layer with respect to the current layer
"""
x = self.cache['X']
n_batch, ch_x, h_x, w_x = x.shape
h_poolwindow, w_poolwindow = self.pool_size
dA = np.zeros(shape=x.shape) # dC/dA --> gradient of the input
for n in range(n_batch):
for ch in range(ch_x):
curr_y = out_y = 0
while curr_y + h_poolwindow <= h_x:
curr_x = out_x = 0
while curr_x + w_poolwindow <= w_x:
window_slice = x[n, ch, curr_y:curr_y + h_poolwindow, curr_x:curr_x + w_poolwindow]
i, j = np.unravel_index(np.argmax(window_slice), window_slice.shape)
dA[n, ch, curr_y + i, curr_x + j] = dA_prev[n, ch, out_y, out_x]
curr_x += self.stride
out_x += 1
curr_y += self.stride
out_y += 1
return dA
Can I vectorize it?
Upvotes: 4
Views: 5246
Reputation: 165
I managed to solve it by changing the forward propagation to:
windows = as_strided(x,
shape=(n_batch, ch_x, out_h, out_w, *self.pool_size),
strides=(x.strides[0], x.strides[1],
self.stride * x.strides[2],
self.stride * x.strides[3],
x.strides[2], x.strides[3])
)
out = np.max(windows, axis=(4, 5))
maxs = out.repeat(2, axis=2).repeat(2, axis=3)
x_window = x[:, :, :out_h * self.stride, :out_w * self.stride]
mask = np.equal(x_window, maxs).astype(int)
if is_training:
self.cache['X'] = x
self.cache['mask'] = mask
return out
and changing the backpropagation to:
mask = self.cache['mask']
dA = dA_prev.repeat(h_poolwindow, axis=2).repeat(w_poolwindow, axis=3)
dA = np.multiply(dA, mask)
pad = np.zeros(x.shape)
pad[:, :, :dA.shape[2], :dA.shape[3]] = dA
return pad
Upvotes: 4