Reputation: 25929
I'm trying to implement Gaussian blur in C++. However, it looks like regardless of radius, I always get a blur with radius ~3. I suppose this may have something to do with alpha premultiplying, but I'm unable to figure out, what am I doing wrong.
The implementation looks like following:
extern "C" void __cdecl GaussianBlur(unsigned char* bitmapData,
int stride,
int width,
int height,
int radius)
{
// Gaussian kernel
int diameter = 2 * radius + 1;
std::shared_ptr<float[]> kernel = generateGaussKernel(diameter);
// Blur
auto copy = std::shared_ptr<unsigned char[]>(new unsigned char[height * stride]);
for (int y = 0; y < height; y++)
memcpy(copy.get() + y * stride, bitmapData + y * stride, width * BYTES_PER_PIXEL);
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
{
Color sum;
float weight = 0.0f;
int count = 0;
int xStart = x - radius;
int xEnd = x + radius;
int yStart = y - radius;
int yEnd = y + radius;
for (int x1 = xStart; x1 <= xEnd; x1++)
for (int y1 = yStart; y1 <= yEnd; y1++)
{
// Find weight
int kernelX = x1 - xStart;
int kernelY = y1 - yStart;
float kernelValue = kernel[kernelY * diameter + kernelX];
// Premultiply alpha
Color color;
if (x1 >= 0 && x1 < width && y1 >= 0 && y1 < height)
color = getColor(copy.get(), stride, x1, y1);
else
color = Color(0);
sum.R += (float)(color.R * color.A) * kernelValue;
sum.G += (float)(color.G * color.A) * kernelValue;
sum.B += (float)(color.B * color.A) * kernelValue;
sum.A += color.A * kernelValue;
weight += kernelValue;
count++;
}
if (count > 0)
{
Color result;
result.A = sum.A / weight;
if (result.A > 0)
{
result.R = ((sum.R / weight) / result.A);
result.G = ((sum.G / weight) / result.A);
result.B = ((sum.B / weight) / result.A);
}
setColor(bitmapData, stride, x, y, result);
}
}
}
Gauss kernel genration is implemented as following:
std::shared_ptr<float[]> generateGaussKernel(int diameter)
{
float sigma = 1;
std::shared_ptr<float[]> kernel(new float[diameter * diameter]);
int mean = diameter / 2;
float sum = 0.0; // For accumulating the kernel values
for (int x = 0; x < diameter; ++x)
for (int y = 0; y < diameter; ++y) {
kernel[y * diameter + x] = (float)(exp(-0.5 * (pow((x - mean) / sigma, 2.0) + pow((y - mean) / sigma, 2.0))) / (2 * M_PI * sigma * sigma));
// Accumulate the kernel values
sum += kernel[y * diameter + x];
}
// Normalize the kernel
for (int x = 0; x < diameter; ++x)
for (int y = 0; y < diameter; ++y)
kernel[y * diameter + x] /= sum;
return kernel;
}
And utils (if relevant):
const int BYTES_PER_PIXEL = 4;
const int B_OFFSET = 0;
const int G_OFFSET = 1;
const int R_OFFSET = 2;
const int ALPHA_OFFSET = 3;
inline float getAlpha(unsigned char* bitmap, int stride, int x, int y)
{
return bitmap[y * stride + x * BYTES_PER_PIXEL + ALPHA_OFFSET] / 255.0f;
}
inline Color getColor(unsigned char* bitmap, int stride, int x, int y)
{
Color result;
result.A = bitmap[y * stride + x * BYTES_PER_PIXEL + ALPHA_OFFSET] / 255.0f;
result.R = bitmap[y * stride + x * BYTES_PER_PIXEL + R_OFFSET];
result.G = bitmap[y * stride + x * BYTES_PER_PIXEL + G_OFFSET];
result.B = bitmap[y * stride + x * BYTES_PER_PIXEL + B_OFFSET];
return result;
}
What am I doing wrong?
Edit: How the function is called:
[DllImport("Animator.Engine.Native.dll", CallingConvention = CallingConvention.Cdecl)]
public static extern void GaussianBlur(IntPtr bitmapData,
int stride,
int width,
int height,
int radius);
//(...)
internal override void Apply(BitmapBuffer framebuffer, BitmapBuffer backBuffer, BitmapBuffer frontBuffer, BitmapBufferRepository repository)
{
var data = framebuffer.Bitmap.LockBits(new System.Drawing.Rectangle(0, 0, framebuffer.Bitmap.Width, framebuffer.Bitmap.Height), System.Drawing.Imaging.ImageLockMode.ReadWrite, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
ImageProcessing.GaussianBlur(data.Scan0,
data.Stride,
data.Width,
data.Height,
Radius);
framebuffer.Bitmap.UnlockBits(data);
}
Edit 2
The (faulty) effect:
Edit 3
The whole project is opensource, link to repo: https://gitlab.com/spook/Animator.git
If you want to test it, run Animator.Editor/Animator.Editor
project with the following example xml:
<Movie>
<Movie.Config>
<MovieConfig FramesPerSecond="30" Height="200" Width="200" />
</Movie.Config>
<Scene Name="Scene1" Background="White">
<Rectangle Position="0;0" Width="20" Height="20" Brush="Red">
<Rectangle.Effects>
<GaussianBlurEffect Radius="7" />
</Rectangle.Effects>
</Rectangle>
</Scene>
</Movie>
Upvotes: 0
Views: 1267
Reputation: 25929
The error was in the way Gaussian kernel was generated:
std::shared_ptr<float[]> generateGaussKernel(int diameter)
{
float sigma = 1;
Having constant sigma
, the Gaussian bell's shape was always the same, regardless of kernel size. It was just needed to fix it to:
std::shared_ptr<float[]> generateGaussKernel(int diameter)
{
float sigma = diameter / 4.0f;
...to make it work.
Upvotes: 1