Reputation: 3310
I am at a point where I think I am done porting my application to work with GL 3.3 and a #version 330 shader. However, I get a black screen (from glClear()) but don't see anything wrong. I'll try to give a walkthrough through my code.
This is my rendering function:
void render() {
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
applyCameraPerspective();
glUseProgram(prog);
setUniforms();
for (ObjectList::iterator it = objectList.begin(); it != objectList.end(); it++) {
GLuint usedTexture = glTextureList.find((*it)->getTextureName())->second;
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, usedTexture);
glUniform1ui(textureLoc, 0);
(*it)->render();
//glBindTexture(GL_TEXTURE_2D, 0);
}
glPopMatrix();
frameCount++;
}
usedTexture is the GLuint texture index.
*it is an object of class RenderObject, which's render() function looks like so:
void render() {
if (totalTriangleCount > 0) {
glBindBuffer(GL_ARRAY_BUFFER, vbo);
//glEnableClientState(GL_VERTEX_ARRAY);
//glEnableClientState(GL_NORMAL_ARRAY);
//glEnableClientState(GL_TEXTURE_COORD_ARRAY);
//glVertexPointer(3, GL_FLOAT, 32, BUFFER_OFFSET(0));
//glNormalPointer(GL_FLOAT, 32, BUFFER_OFFSET(12));
//glTexCoordPointer(2, GL_FLOAT, 32, BUFFER_OFFSET(24));
glDrawArrays(GL_TRIANGLES, 0, totalTriangleCount * 3);
//glDisableClientState(GL_VERTEX_ARRAY);
//glDisableClientState(GL_NORMAL_ARRAY);
//glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
}
You can see here glXXXPointer()-functions and client state toggles being removed to work with the new version. When I uncomment them, I, strangely, get geometry rendered, but with what looks like uninterpolated texture coordinates. However, the point of the layout qualifier (see further below) should be that these calls are no longer required.
The objects have had a initToGL()-function called beforehand, which looks like this:
void initToGL(GLuint positionLoc, GLuint normalLoc, GLuint texCoordLoc) {
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
totalTriangleCount = 0;
for (FaceList::iterator it = faceList.begin(); it != faceList.end(); it++) {
totalTriangleCount += (*it)->getTriangleCount();
}
// Per triangle: 36 bytes for position, 36 for normal, 24 for texture coordinates (32 per vertex)
UINT bytesNeeded = 3 * (12 + 12 + 8) * totalTriangleCount;
glBufferData(GL_ARRAY_BUFFER, bytesNeeded, NULL, GL_STATIC_DRAW_ARB);
glVertexAttribPointer(positionLoc, 3, GL_FLOAT, GL_FALSE, 32, BUFFER_OFFSET(0)); // Position
glVertexAttribPointer(normalLoc, 3, GL_FLOAT, GL_TRUE, 32, BUFFER_OFFSET(12)); // Normal
glVertexAttribPointer(texCoordLoc, 2, GL_FLOAT, GL_FALSE, 32, BUFFER_OFFSET(24)); // Texture coordinates
GLintptr currentOffset = 0;
for (FaceList::iterator fIt = faceList.begin(); fIt != faceList.end(); fIt++) {
std::list<std::vector<UINT>> indicesList = (*fIt)->getTriangleVertexIndices();
for (std::list<std::vector<UINT>>::iterator triIt = indicesList.begin(); triIt != indicesList.end(); triIt++) {
// Get Vertex positions
Vector3f vertex1 = (*fIt)->getVertex(triIt->at(0));
Vector3f vertex2 = (*fIt)->getVertex(triIt->at(1));
Vector3f vertex3 = (*fIt)->getVertex(triIt->at(2));
// Calculate normal; keep in mind these vertices are clockwise!
Vector3f normal = (vertex3 - vertex1).crossProduct(vertex2 - vertex1).normalize();
// Get texture coordinates
Vector2f textureCoordinate1 = (*fIt)->getTextureCoordinates(triIt->at(0));
Vector2f textureCoordinate2 = (*fIt)->getTextureCoordinates(triIt->at(1));
Vector2f textureCoordinate3 = (*fIt)->getTextureCoordinates(triIt->at(2));
// Vertex 1
glBufferSubData(GL_ARRAY_BUFFER, currentOffset, 12, (GLvoid*) &vertex1);
currentOffset += 12;
glBufferSubData(GL_ARRAY_BUFFER, currentOffset, 12, (GLvoid*) &normal);
currentOffset += 12;
glBufferSubData(GL_ARRAY_BUFFER, currentOffset, 8, (GLvoid*) &textureCoordinate1);
currentOffset += 8;
// Vertex 2
glBufferSubData(GL_ARRAY_BUFFER, currentOffset, 12, (GLvoid*) &vertex2);
currentOffset += 12;
glBufferSubData(GL_ARRAY_BUFFER, currentOffset, 12, (GLvoid*) &normal);
currentOffset += 12;
glBufferSubData(GL_ARRAY_BUFFER, currentOffset, 8, (GLvoid*) &textureCoordinate2);
currentOffset += 8;
// Vertex 3
glBufferSubData(GL_ARRAY_BUFFER, currentOffset, 12, (GLvoid*) &vertex3);
currentOffset += 12;
glBufferSubData(GL_ARRAY_BUFFER, currentOffset, 12, (GLvoid*) &normal);
currentOffset += 12;
glBufferSubData(GL_ARRAY_BUFFER, currentOffset, 8, (GLvoid*) &textureCoordinate3);
currentOffset += 8;
}
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
Here, the parameters given to this function are the same ones retrieved from the shader (see further below). The only change here is the addition of the 3 calls to glVertexAttribPointer().
applyCameraPerspective():
void applyCameraPerspective() {
glRotatef(cam->getPitch(), 1.0f, 0.0f, 0.0f);
glRotatef(cam->getRoll(), 0.0f, 0.0f, 1.0f);
glRotatef(cam->getYaw(), 0.0f, 1.0f, 0.0f);
glTranslatef(-cam->x(), -cam->y(), -cam->z());
}
This still relies on OpenGL's matrix stack.
setUniforms():
void setUniforms() {
GLfloat m[16];
glGetFloatv(GL_PROJECTION_MATRIX, &m[0]);
glUniformMatrix4fv(projectionMatrixLoc, 1, GL_FALSE, &m[0]);
glGetFloatv(GL_MODELVIEW_MATRIX, &m[0]);
//for (int y = 0; y < 4; y++) {
// for (int x = 0; x < 4; x++) {
// std::cout << std::showpoint << m[4 * x + y] << std::noshowpoint << " " ;
// }
// std::cout << std::endl;
//}
glUniformMatrix4fv(modelViewMatrixLoc, 1, GL_FALSE, &m[0]);
// Error: GL_NORMAL_MATRIX is not a thing. :-(
//glGetFloatv(GL_NORMAL_MATRIX, 1, GL_FALSE, &m[0]);
// HACKHACK: As long as we don't do scale transformations (which we don't) we will be fine doing just this.
m[12] = 0;
m[13] = 0;
m[14] = 0;
glUniformMatrix4fv(normalMatrixLoc, 1, GL_FALSE, &m[0]);
}
You can see here I have added output to test the correctness of the model view matrix. It is as expected.
My code for loading textures has not changed. My code for loading the shader program is executed before uploading objects and looks like so:
bool setupShader() {
bool success = true;
// Vertex shader
vs = glCreateShader(GL_VERTEX_SHADER);
std::string vsString = TextFileReader().readWhole("Shader/basic_fog.vert");
const char* vsChars = vsString.c_str();
glShaderSource(vs, 1, &vsChars, NULL);
std::cout << "Compiling vertex shader." << std::endl;
glCompileShader(vs);
success &= printShaderInfo(vs);
// Fragment shader
fs = glCreateShader(GL_FRAGMENT_SHADER);
std::string fsString = TextFileReader().readWhole("Shader/basic_fog.frag");
const char* fsChars = fsString.c_str();
glShaderSource(fs, 1, &fsChars, NULL);
std::cout << "Compiling fragment shader." << std::endl;
glCompileShader(fs);
success &= printShaderInfo(fs);
// Bundle them into a program
prog = glCreateProgram();
glAttachShader(prog, vs);
glAttachShader(prog, fs);
glBindFragDataLocation(prog, 0, "fragColor");
glLinkProgram(prog);
success &= printProgramInfo(prog);
if (success) {
// These all are GLuints
positionLoc = glGetAttribLocation(prog, "position");
normalLoc = glGetAttribLocation(prog, "normal");
texCoordLoc = glGetAttribLocation(prog, "texCoordinates");
projectionMatrixLoc = glGetUniformLocation(prog, "projectionMatrix");
modelViewMatrixLoc = glGetUniformLocation(prog, "modelViewMatrix");
normalMatrixLoc = glGetUniformLocation(prog, "normalMatrix");
textureLoc = glGetUniformLocation(prog, "colorTexture");
}
return success;
}
Notice that I never glBindAttribLocation(), because I use the layout qualifier (see below). This function returns true for the following shader code which I use.
Vertex shader:
#version 330
#extension GL_ARB_separate_shader_objects : enable
layout(location = 0) in vec3 position;
layout(location = 0) out vec3 fragPosition;
layout(location = 1) in vec3 normal;
layout(location = 1) out vec3 fragNormal;
layout(location = 2) in vec2 texCoordinates;
layout(location = 2) out vec2 fragTexCoordinates;
uniform mat4 modelViewMatrix, projectionMatrix, normalMatrix;
void main() {
fragPosition = (modelViewMatrix * vec4(position, 1)).xyz;
fragNormal = (normalMatrix * vec4(normal, 1)).xyz;
fragTexCoordinates = vec2(texCoordinates);
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1);
}
Fragment shader:
#version 330
#extension GL_ARB_separate_shader_objects : enable
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec2 textureCoordinates;
layout(location = 0) out vec4 fragColor;
uniform sampler2D colorTexture;
void main() {
fragColor = texture2D(colorTexture, textureCoordinates);
}
I have tried setting fragColor to just red: No results. Not a single pixel runs through the fragment shader.
That is a lot of code. I'd greatly appreciate someone taking a look at it, especially with regards to simply using certain functions incorrectly.
Upvotes: 1
Views: 614
Reputation: 162327
One of the most obvious problems with your code is the mishmash of deprecated OpenGL, like the fixed function matrix stack and modern features like self defined uniforms. Stuff like this:
glGetFloatv(GL_PROJECTION_MATRIX, &m[0]);
glUniformMatrix4fv(projectionMatrixLoc, 1, GL_FALSE, &m[0]);
which indicates that you're using old-style FFP matrix functions to calculate the matrix, query it from OpenGL and feed it back into a uniform. You really shouldn't do this. Use a real matrix math library instead, like GLM or Eigen.
Then of course glEnableClientState
will not work with modern generic vertex attributes. I do not see any calls to glEnableVertexAttribArray
, so if those are missing it's no surprise that noting gets drawn.
Upvotes: 4