Reputation: 1320
I'm trying to understand OpenGL's basic matrix transformation logic better. This is 2D code copied from a textbook and modified to handle 3D; it "sort of works", but the final result is visually different when I do the matrix multiplication myself vs. using glMultMatrix.
The lines marked 'XXX' allow flipping between 'my multiplication' and 'OpenGL multiplication'. I tried some obvious things (eg. row vs. column major, order of transforms, etc.)
If anyone can illuminate me as to what I'm doing wrong compared to OpenGL, I'd appreciate it.
#include <iostream>
#include <math.h>
#include "glut.h"
#include "vector3.h"
typedef GLfloat Matrix4x4[4][4];
Matrix4x4 matComposite;
void matrix4x4SetIdentity(Matrix4x4 matIdent4x4){
GLint row, col;
for(row = 0; row<4; row++){
for(col = 0; col<4; col++){
matIdent4x4[row][col] = (row == col);
}
}
}
void matrix4x4PreMultiply(Matrix4x4 m1, Matrix4x4 m2){
GLint row, col;
Matrix4x4 matTemp;
for(row=0; row<4; row++){
for(col=0; col<4; col++){
matTemp[row][col] = m1[row][0] * m2[0][col] +
m1[row][1] * m2[1][col] +
m1[row][2] * m2[2][col] +
m1[row][3] * m2[3][col];
}
}
for(row=0; row<4; row++){
for(col=0; col<4; col++){
m2[row][col] = matTemp[row][col];
}
}
}
vector3 matrixMult(GLfloat x, GLfloat y, GLfloat z){
GLfloat tempX = matComposite[0][0] * x + matComposite[0][1] * y + matComposite[0][2] * z + matComposite[0][3];
GLfloat tempY = matComposite[1][0] * x + matComposite[1][1] * y + matComposite[1][2] * z + matComposite[1][3];
GLfloat tempZ = matComposite[2][0] * x + matComposite[2][1] * y + matComposite[2][2] * z + matComposite[2][3];
GLfloat tempW = matComposite[3][0] + matComposite[3][1] + matComposite[3][2] + matComposite[3][3];
// XXX return vector3(tempX/tempW, tempY/tempW, tempZ/tempW); // XXX
return vector3 (x, y, z);
}
void render() {
// my version of viewing/projection multiplication
GLfloat mvmX[4][4] = {{0.948683, 0.095346, -0.301511, 0.000000}, {0.000000, 0.953463, 0.301511, 0.000000}, {0.316228, -0.286039, 0.904534, 0.000000}, {0.000004, 0.000000, -132.664993, 1.000000}};
GLfloat pmX[4][4] = {{1.500000, 0.000000, 0.000000, 0.000000}, {0.000000, 1.500000, 0.000000, 0.000000}, {0.000000, 0.000000, -1.015113, -1.000000}, {0.000000, 0.000000, -3.022670, 0.000000}};
matrix4x4SetIdentity(matComposite);
matrix4x4PreMultiply(pmX, matComposite);
matrix4x4PreMultiply(mvmX, matComposite);
// OpenGL's version of viewing/projection multiplication
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
GLfloat mvm[] = {0.948683, 0.095346, -0.301511, 0.000000, 0.000000, 0.953463, 0.301511, 0.000000, 0.316228, -0.286039, 0.904534, 0.000000, 0.000004, 0.000000, -132.664993, 1.000000};
GLfloat pm[] = {1.500000, 0.000000, 0.000000, 0.000000, 0.000000, 1.500000, 0.000000, 0.000000, 0.000000, 0.000000, -1.015113, -1.000000, 0.000000, 0.000000, -3.022670, 0.000000};
glMultMatrixf(pm); // XXX
glMultMatrixf(mvm); // XXX
// draw a shape
glColor3f(1, 0, 0);
glBegin(GL_POLYGON);
vector3 vpt = matrixMult(0, 0, 0);
glVertex3f(vpt.x, vpt.y, vpt.z);
vpt = matrixMult(0, 50, 0);
glVertex3f(vpt.x, vpt.y, vpt.z);
vpt = matrixMult(50, 50, 0);
glVertex3f(vpt.x, vpt.y, vpt.z);
vpt = matrixMult(0, 50, 0);
glVertex3f(vpt.x, vpt.y, vpt.z);
glEnd();
}
void display(void) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
render();
glutSwapBuffers();
}
void main(int argc, char **argv){
glutInit( &argc, argv );
glutInitDisplayMode (GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH) ;
glutInitWindowSize(500, 500);
glutInitWindowPosition(100, 100);
int windowHandle = glutCreateWindow("Testing MVM and PM Matrices");
glutSetWindow(windowHandle);
glutDisplayFunc(display);
glutMainLoop();
}
Upvotes: 1
Views: 80
Reputation: 45342
From this
matTemp[row][col]
I deduce that you use row major storage order for your operations. However, OpenGL uses column major
order. So you have to transpose the matrices before you can use them with glLoadMatrix
.
Also, your input matrices don't match your matrix order:
GLfloat pmX[4][4] = {{1.500000, 0.000000, 0.000000, 0.000000}, {0.000000, 1.500000, 0.000000, 0.000000}, {0.000000, 0.000000, -1.015113, -1.000000}, {0.000000, 0.000000, -3.022670, 0.000000}};
The projection matrix should have the last row as 0,0,-1,0
, but the way your code interprets your matrices is also transposed to that, so it will see (0, 0, -3.02267, 0)
as the last row. So you need to transpose that first, apply your matrix operations, and transpose the result for OpenGL - or you just change your matrix operations to match those of OpenGL and don't transpose anything.
Note that with modern GL, you can easily use any matrix oder you like. The code to set a uniform matrix has a transpose
parameter which will tell the GL wich of the two orders you use.
Upvotes: 1