Reputation: 37731
I need to show a square polygon with the 100% of the width of the screen, then, i supose that i must zoom it (with Z axis) until the polygon borders are tounching the screen borders.
I'm trying to achieve this using gluProject to project a coordinate in 3D into a 2D screen coordinate. If the screen coordinate is either 0 or matches the width or height, then it is touching a screen border.
The problem is that something is going wrong, the outputCoords
array returned with gluProject is giving me these values: 0,0,0.5, but my square is centered on the sreen, and with Z=-5.0f!!!!
I dont understand these values...
This is the code i'm using to obtain the 2D Projection of my square poligon on the screen:
This code is on the onSurfaceCreated method of the GLSurfaceView class, ¿it have to be putted in another method? ¿where?
/////////////// NEW CODE FOR SCALING THE AR IMAGE TO THE DESIRED WIDTH /////////////////
mg.getCurrentModelView(gl);
mg.getCurrentProjection(gl);
float [] modelMatrix = new float[16];
float [] projMatrix = new float[16];
modelMatrix=mg.mModelView;
projMatrix=mg.mProjection;
int [] mView = new int[4];
// Fill this with your window width and height
mView[0] = 0;
mView[1] = 0;
mView[2] = 800; //width
mView[3] = 480; //height
// Make sure you have 3 components in this array even if the screen only needs 2
float [] outputCoords = new float[3];
// objX, objY, objZ are the coordinates of one of the borders
GLU.gluProject(-1.0f, -1.0f, 0.0f, modelMatrix, 0, projMatrix, 0, mView, 0, outputCoords, 0);
This is my square class:
public class Square {
//Buffer de vertices
private FloatBuffer vertexBuffer;
//Buffer de coordenadas de texturas
private FloatBuffer textureBuffer;
//Puntero de texturas
private int[] textures = new int[3];
//El item a representar
private Bitmap image;
//Definición de vertices
private float vertices[] =
{
-1.0f, -1.0f, 0.0f, //Bottom Left
1.0f, -1.0f, 0.0f, //Bottom Right
-1.0f, 1.0f, 0.0f, //Top Left
1.0f, 1.0f, 0.0f //Top Right
};
private float texture[] =
{
//Mapping coordinates for the vertices
0.0f, 1.0f,
1.0f, 1.0f,
0.0f, 0.0f,
1.0f, 0.0f
};
//Inicializamos los buffers
public Square(Bitmap image) {
ByteBuffer byteBuf = ByteBuffer.allocateDirect(vertices.length * 4);
byteBuf.order(ByteOrder.nativeOrder());
vertexBuffer = byteBuf.asFloatBuffer();
vertexBuffer.put(vertices);
vertexBuffer.position(0);
byteBuf = ByteBuffer.allocateDirect(texture.length * 4);
byteBuf.order(ByteOrder.nativeOrder());
textureBuffer = byteBuf.asFloatBuffer();
textureBuffer.put(texture);
textureBuffer.position(0);
this.image=image;
}
//Funcion de dibujado
public void draw(GL10 gl) {
gl.glFrontFace(GL10.GL_CCW);
//gl.glEnable(GL10.GL_BLEND);
//Bind our only previously generated texture in this case
gl.glBindTexture(GL10.GL_TEXTURE_2D, textures[0]);
//Point to our vertex buffer
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, vertexBuffer);
gl.glTexCoordPointer(2, GL10.GL_FLOAT, 0, textureBuffer);
//Enable vertex buffer
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
gl.glEnableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
//Draw the vertices as triangle strip
gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, 0, vertices.length / 3);
//Disable the client state before leaving
gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);
gl.glDisableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
//gl.glDisable(GL10.GL_BLEND);
}
//Carga de texturas
public void loadGLTexture(GL10 gl, Context context) {
//Generamos un puntero de texturas
gl.glGenTextures(1, textures, 0);
//y se lo asignamos a nuestro array
gl.glBindTexture(GL10.GL_TEXTURE_2D, textures[0]);
//Creamos filtros de texturas
gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MIN_FILTER, GL10.GL_NEAREST);
gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MAG_FILTER, GL10.GL_LINEAR);
//Diferentes parametros de textura posibles GL10.GL_CLAMP_TO_EDGE
gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_WRAP_S, GL10.GL_REPEAT);
gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_WRAP_T, GL10.GL_REPEAT);
/*
String imagePath = "radiocd5.png";
AssetManager mngr = context.getAssets();
InputStream is=null;
try {
is = mngr.open(imagePath);
} catch (IOException e1) { e1.printStackTrace(); }
*/
//Get the texture from the Android resource directory
InputStream is=null;
/*
if (item.equals("rim"))
is = context.getResources().openRawResource(R.drawable.rueda);
else if (item.equals("selector"))
is = context.getResources().openRawResource(R.drawable.selector);
*/
/*
is = context.getResources().openRawResource(resourceId);
Bitmap bitmap = null;
try {
bitmap = BitmapFactory.decodeStream(is);
} finally {
try {
is.close();
is = null;
} catch (IOException e) {
}
}
*/
Bitmap bitmap =image;
//con el siguiente código redimensionamos las imágenes que sean mas grandes de 256x256.
int newW=bitmap.getWidth();
int newH=bitmap.getHeight();
float fact;
if (newH>256 || newW>256)
{
if (newH>256)
{
fact=(float)255/(float)newH; //porcentaje por el que multiplicar para ser tamaño 256
newH=(int)(newH*fact); //altura reducida al porcentaje necesario
newW=(int)(newW*fact); //anchura reducida al porcentaje necesario
}
if (newW>256)
{
fact=(float)255/(float)newW; //porcentaje por el que multiplicar para ser tamaño 256
newH=(int)(newH*fact); //altura reducida al porcentaje necesario
newW=(int)(newW*fact); //anchura reducida al porcentaje necesario
}
bitmap=Bitmap.createScaledBitmap(bitmap, newW, newH, true);
}
//con el siguiente código transformamos imágenes no potencia de 2 en imágenes potencia de 2 (pot)
//meto el bitmap NOPOT en un bitmap POT para que no aparezcan texturas blancas.
int nextPot=256;
int h = bitmap.getHeight();
int w = bitmap.getWidth();
int offx=(nextPot-w)/2; //distancia respecto a la izquierda, para que la imagen quede centrada en la nueva imagen POT
int offy=(nextPot-h)/2; //distancia respecto a arriba, para que la imagen quede centrada en la nueva imagen POT
Bitmap bitmap2 = Bitmap.createBitmap(nextPot, nextPot, Bitmap.Config.ARGB_8888); //crea un bitmap transparente gracias al ARGB_8888
Canvas comboImage = new Canvas(bitmap2);
comboImage.drawBitmap(bitmap, offx, offy, null);
comboImage.save();
//Usamos Android GLUtils para espcificar una textura de 2 dimensiones para nuestro bitmap
GLUtils.texImage2D(GL10.GL_TEXTURE_2D, 0, bitmap2, 0);
//Checkeamos si el GL context es versión 1.1 y generamos los Mipmaps por Flag. Si no, llamamos a nuestra propia implementación
if(gl instanceof GL11) {
gl.glTexParameterf(GL11.GL_TEXTURE_2D, GL11.GL_GENERATE_MIPMAP, GL11.GL_TRUE);
GLUtils.texImage2D(GL10.GL_TEXTURE_2D, 0, bitmap2, 0);
} else {
buildMipmap(gl, bitmap2);
}
//Limpiamos los bitmaps
bitmap.recycle();
bitmap2.recycle();
}
//Nuestra implementación de MipMap. Escalamos el bitmap original hacia abajo por factor de 2 y lo asignamos como nuevo nivel de mipmap
private void buildMipmap(GL10 gl, Bitmap bitmap) {
int level = 0;
int height = bitmap.getHeight();
int width = bitmap.getWidth();
while(height >= 1 || width >= 1) {
GLUtils.texImage2D(GL10.GL_TEXTURE_2D, level, bitmap, 0);
if(height == 1 || width == 1) {
break;
}
level++;
height /= 2;
width /= 2;
Bitmap bitmap2 = Bitmap.createScaledBitmap(bitmap, width, height, true);
bitmap.recycle();
bitmap = bitmap2;
}
}
}
Upvotes: 3
Views: 5386
Reputation: 45968
gluProject
does exactly what the fixed function transformation pipeline would do, too:
The 3D vertex is expanded to homogeneous coordinates by appending a 1 as fourth coordinate: v[3]=1
.
Then this homogenous vertex is multiplied by the modelview matrix and the projection matrix: v'=P*M*v
.
Then comes the persepctive division. By dividing by the fourth coordinate we account for perspective distortion (if you have an orthographic projection e.g. using glOrtho
, then v'[3]==1
and there is no perspective distortion): v"=v'/v'[3]
.
Now everything in your viewing volume (the visible area of your scene) has been transformed into normalized device coordinates, the [-1,1]-cube. So what needs to be done is transform this into screen coordinates [0,w] x [0,h]: x=w * (v"[0]+1) / 2
and y = h * (v"[1]+1) / 2
. And finally, the z-coordinate is transformed from [-1,1] to [0,1] to give the normalized depth value that is written into the depth buffer: z = (v"[2]+1) / 2
.
So the key to understand what happens to the z value is to realize, that the distance to the camera (the z value in view space) is first transformed into the [-1,1] range by the projection matrix, depending on the near-far range (the near and far values you put into glOrtho
, glFrustum
or gluPerspective
). Then this normalized value is transformed into the [0,1] range to result in the final depth value that gets written into the depth buffer and that gluProject
computes as z-value of the window coordinates.
So what you actually got out (0, 0, 0.5)
is the lower left corner of your screen and with a depth of 0.5. With an orthographic matrix (without any perspective distortion) and an identity modelview matrix this would be equal to a coordinate of (left, bottom, (far-near)/2)
, where bottom
, left
, near
and far
are the corresponding arguments you put into the glOrtho
function call (or something with similar functionality). So the vertex is in the middle of the near-far-range and in the lower left corner of the viewing volume (as seen from the camera). But this won't hold for a perspective projection, as in this case the transformation from the view-space z-coordinate to the depth value is not linear (though still monotonic, of course).
Since you put in the vertex (-1, -1, 0)
, this could mean your modelview matrix is identity and your projection matrix corresponds to a matrix created with glOrtho(-1, 1, -1, 1, -1, 1)
, which is also nearly the identity matrix (though with a mirrored z value, but because the input z is 0, you might not notice it). So if these are not the values you would have awaited (after understanding the workings of gluProject
, of course), it may also just be that your matrices haven't been retrieved correctly and you just got identity matrices instead of your actual modelview and projection matrices.
So I think there is nothing wrong with your gluProject
function. You might also look at the answers to this question to gain some more insight into OpenGL's default transformation pipeline. Although with the advent of vertex shaders some of the stages can be computed differently, you normally still follow the idiomatic model -> view -> projection approach.
Upvotes: 11