bfops
bfops

Reputation: 5678

what could be causing this opengl segfault in glBufferSubData?

I've been whittling down this segfault for a while, and here's a pretty minimal reproducible example on my machine (below). I have the sinking feeling that it's a driver bug, but I'm very unfamiliar with OpenGL, so it's more likely I'm just doing something wrong.

Is this correct OpenGL 3.3 code? Should be fine regardless of platform and compiler and all that?

Here's the code, compiled with gcc -ggdb -lGL -lSDL2

#include <stdio.h>
#include "GL/gl.h"
#include "GL/glext.h"
#include "SDL2/SDL.h"

// this section is for loading OpenGL things from later versions.

typedef void (APIENTRY *GLGenVertexArrays) (GLsizei n, GLuint *arrays);
typedef void (APIENTRY *GLGenBuffers) (GLsizei n, GLuint *buffers);
typedef void (APIENTRY *GLBindVertexArray) (GLuint array);
typedef void (APIENTRY *GLBindBuffer) (GLenum target, GLuint buffer);
typedef void (APIENTRY *GLBufferData) (GLenum target, GLsizeiptr size, const GLvoid* data, GLenum usage);
typedef void (APIENTRY *GLBufferSubData) (GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid* data);
typedef void (APIENTRY *GLGetBufferSubData) (GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid* data);
typedef void (APIENTRY *GLFlush) (void);
typedef void (APIENTRY *GLFinish) (void);

GLGenVertexArrays glGenVertexArrays = NULL;
GLGenBuffers glGenBuffers = NULL;
GLBindVertexArray glBindVertexArray = NULL;
GLBindBuffer glBindBuffer = NULL;
GLBufferData glBufferData = NULL;
GLBufferSubData glBufferSubData = NULL;
GLGetBufferSubData glGetBufferSubData = NULL;

void load_gl_pointers() {
  glGenVertexArrays = (GLGenVertexArrays)SDL_GL_GetProcAddress("glGenVertexArrays");
  glGenBuffers = (GLGenBuffers)SDL_GL_GetProcAddress("glGenBuffers");
  glBindVertexArray = (GLBindVertexArray)SDL_GL_GetProcAddress("glBindVertexArray");
  glBindBuffer = (GLBindBuffer)SDL_GL_GetProcAddress("glBindBuffer");
  glBufferData = (GLBufferData)SDL_GL_GetProcAddress("glBufferData");
  glBufferSubData = (GLBufferSubData)SDL_GL_GetProcAddress("glBufferSubData");
  glGetBufferSubData = (GLGetBufferSubData)SDL_GL_GetProcAddress("glGetBufferSubData");
}

// end OpenGL loading stuff


#define CAPACITY (1 << 8)

// return nonzero if an OpenGL error has occurred.
int opengl_checkerr(const char* const label) {
  GLenum err;
  switch(err = glGetError()) {
    case GL_INVALID_ENUM:
      printf("GL_INVALID_ENUM");
      break;
    case GL_INVALID_VALUE:
      printf("GL_INVALID_VALUE");
      break;
    case GL_INVALID_OPERATION:
      printf("GL_INVALID_OPERATION");
      break;
    case GL_INVALID_FRAMEBUFFER_OPERATION:
      printf("GL_INVALID_FRAMEBUFFER_OPERATION");
      break;
    case GL_OUT_OF_MEMORY:
      printf("GL_OUT_OF_MEMORY");
      break;
    case GL_STACK_UNDERFLOW:
      printf("GL_STACK_UNDERFLOW");
      break;
    case GL_STACK_OVERFLOW:
      printf("GL_STACK_OVERFLOW");
      break;
    default: return 0;
  }

  printf(" %s\n", label);
  return 1;
}

int main(int nargs, const char* args[]) {
  printf("initializing..\n");
  SDL_Init(SDL_INIT_EVERYTHING);
  SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
  SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3);
  SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);

  SDL_Window* const w =
    SDL_CreateWindow(
      "broken",
      SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED,
      1, 1,
      SDL_WINDOW_OPENGL
    );

  if(w == NULL) {
    printf("window was null\n");
    return 0;
  }

  SDL_GLContext context = SDL_GL_CreateContext(w);

  if(context == NULL) {
    printf("context was null\n");
    return 0;
  }

  load_gl_pointers();

  if(opengl_checkerr("init")) {
    return 1;
  }

  printf("GL_VENDOR: %s\n", glGetString(GL_VENDOR));
  printf("GL_RENDERER: %s\n", glGetString(GL_RENDERER));

  float* const vs = malloc(CAPACITY * sizeof(float));
  memset(vs, 0, CAPACITY * sizeof(float));

  unsigned int i = 0;
  while(i < 128000) {
    GLuint vertex_array;
    GLuint vertex_buffer;

    glGenVertexArrays(1, &vertex_array);
    glBindVertexArray(vertex_array);

    glGenBuffers(1, &vertex_buffer);
    glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);

    if(opengl_checkerr("gen/binding")) {
      return 1;
    }

    glBufferData(
      GL_ARRAY_BUFFER,
      CAPACITY * sizeof(float),
      vs, // initialize with `vs` just to make sure it's allocated.
      GL_DYNAMIC_DRAW
    );

    // verify that the memory is allocated by reading it back into `vs`.
    glGetBufferSubData(
      GL_ARRAY_BUFFER,
      0,
      CAPACITY * sizeof(float),
      vs
    );

    if(opengl_checkerr("creating buffer")) {
      return 1;
    }

    glFlush();
    glFinish();

    // segfault occurs here..
    glBufferSubData(
      GL_ARRAY_BUFFER,
      0,
      CAPACITY * sizeof(float),
      vs
    );

    glFlush();
    glFinish();

    ++i;
  }

  return 0;
}

When I bump the iterations from 64k to 128k, I start getting:

Program received signal SIGSEGV, Segmentation fault.
0x00007ffff754c859 in __memcpy_sse2_unaligned () from /usr/lib/libc.so.6
(gdb) bt
#0  0x00007ffff754c859 in __memcpy_sse2_unaligned () from /usr/lib/libc.so.6
#1  0x00007ffff2ea154d in ?? () from /usr/lib/xorg/modules/dri/i965_dri.so
#2  0x0000000000400e5c in main (nargs=1, args=0x7fffffffe8d8) at opengl-segfault.c:145

However, I can more than double the capacity (keeping the number of iterations at 64k) without segfaulting.

GL_VENDOR: Intel Open Source Technology Center
GL_RENDERER: Mesa DRI Intel(R) Haswell Mobile

Upvotes: 2

Views: 1125

Answers (2)

lecker909
lecker909

Reputation: 321

I had a very similar issue when calling glGenTextures and glBindTexture. I tried debugging and when i would try to step through these lines I would get something like:

Program received signal SIGSEGV, Segmentation fault. 0x00007ffff26eaaa8 in ?? () from /usr/lib/x86_64-linux-gnu/dri/i965_dri.so

Note that prior to adding textures, I could successfully run programs with vbos and vaos and generate meshes fine. After looking into the answer suggesting switching from xf86-video-intel driver to xf86-video-fbdev driver, I would advise against it(There really isn't that much info on this issue or users facing segfaults on linux with integrated intel graphics cards. perhaps a good question to ask the folks over at Intel OpenSource).

The solution I found was to stop using freeglut. Switch to glfw instead. Whether there actually is some problem with the intel linux graphics stack is besides the matter, it seems the solvable problem is freeglut. If you want to use glfw with your machines most recent opengl core profile you need just the following:

glfwWindowHint (GLFW_CONTEXT_VERSION_MAJOR, 3); glfwWindowHint (GLFW_CONTEXT_VERSION_MINOR, 0); glfwWindowHint (GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);

Setting forward compat(although ive seen lots of post argueing you shouldnt do this) means mesa is free to select a core context permitted one sets the minimum context to 3.0 or higher. I guess freeglut must be going wrong somewhere in its interactions with mesa, if anyone can share some light on this that would be great!

Upvotes: 1

bfops
bfops

Reputation: 5678

This is a bug in the intel graphics drivers for Linux. Switching from the xf86-video-intel driver to xf86-video-fbdev driver solves the problem.

Edit: I'm not recommending switching to fbdev, just using it as an experiment to see whether the segfault goes away.

Upvotes: 0

Related Questions