Reputation: 1272
I have the following code that creates a bitmap from an std::wstring using which then is saved into an image.
#include <stb_image_write.h>
#include <stb_truetype.h>
unsigned char buffer[24 << 20];
stbtt_fontinfo font;
fread(buffer, 1, 1000000, fopen("../assets/fonts/unibody_8.ttf", "rb"));
stbtt_InitFont(&font, buffer, 0);
float scale = stbtt_ScaleForPixelHeight(&font, 48 * 2);
int ascent, descent;
stbtt_GetFontVMetrics(&font, &ascent, &descent, 0);
int baseline = (int)(ascent * scale);
int width = 0;
for (const auto &character : content) {
int advance, leftSideBearing;
stbtt_GetCodepointHMetrics(&font, character, &advance, &leftSideBearing);
texture.width += advance * scale;
}
int height = (ascent + (descent * -1)) * scale;
std::vector<unsigned char> pixels(
(size_t)(texture.width * texture.height), (unsigned char)0);
float xpos = 0.0f;
int characterIndex = 0;
while (content[characterIndex]) {
int advance, lsb, x0, y0, x1, y1;
float x_shift = xpos - (float)floor(xpos);
stbtt_GetCodepointHMetrics(
&font, content[characterIndex], &advance, &lsb);
stbtt_GetCodepointBitmapBoxSubpixel(
&font,
content[characterIndex],
scale,
scale,
x_shift,
0,
&x0,
&y0,
&x1,
&y1);
auto stride = width * (baseline + y0) + (int)xpos + x0;
stbtt_MakeCodepointBitmapSubpixel(
&font,
&pixels.at(0) + stride,
x1 - x0,
y1 - y0,
texture.width,
scale,
scale,
x_shift,
0,
content[characterIndex]);
xpos += (advance * scale);
if (content[characterIndex + 1]) {
int kernAdvance = stbtt_GetCodepointKernAdvance(
&font, content[characterIndex], content[characterIndex + 1]);
xpos += scale * kernAdvance;
}
++characterIndex;
}
// This step works fine, this means that the data in pixels is good
stbi_write_png("image.png", width, height, 1, pixels.data(), 0);
What I want to do now is to load the bitmap into an OpenGL texture, but it seems like this step crashes the app.
uint32_t id = 0;
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D, id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// This crashes the application, I guess it's because the data in pixels is not valid for an OpenGL texture
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RGBA,
width,
height,
0,
GL_RGBA,
GL_UNSIGNED_BYTE,
pixels.data());
I tried to catch the exception but the program just silently crashes, so I'm not sure how to handle this.
Upvotes: 2
Views: 2447
Reputation: 45332
This
std::vector<unsigned char> pixels( (size_t)(texture.width * texture.height), (unsigned char)0);
sets up your pixel data as a monochrome bitmap width * height with 8 bit per pixel.
But here:
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels.data());
you tell the GL that you have 32 bits per pixel, 8 bit for red, green, blue and alpha each. As a result, the GL will read 4 times the memory than you provide, reading past the end of your buffer and if your lucky, crashing the application by some segmentation fault at some point afterwards.
You did not specify the GL version you are using. For modern GL, you would use GL_RED
as the texture format and handle the conversion to RGBA either directly via your shader, or via texture swizzle settings. For ancient legacy GL, you probably would have to use the GL_LUMINANCE
format.
You must also be aware that the default unpack row alignment for the GL is 4 bytes, so if your width is not a multiple of 4, you also have to explicitly set glPixelStore(GL_UNPACK_ALIGNMENT,1)
to match your data alignment.
Upvotes: 3