Related
I've looked for hours but I couldn't find anything but two StackOverflow questions, one of which had one answer, by the author, not really giving proper explanation.
I was working on a game with LWJGL2.9 and now decided to switch to LibGDX for its simplicity. I had working code that allowed me to move through space - but that code can't be converted 1:1 to LibGDX and I can't find any example, explanation, tutorial or anything of the like, not even in LibGDX's wiki.
This is the code that I used for LWJGL:
public Vectorf calculateMovement(MovementDirection direction, float newYaw, float walkSpeed, double deltaTime) {
Vectorf position = new Vectorf(0f, 0f, 0f); // Just a Vector3f
float amount = (walkSpeed) * (float) deltaTime;
switch (direction) {
case FORWARD:
case BACKWARD: {
float dx = (float) (direction.getXAdd() * Math.sin(Math.toRadians(newYaw))) * amount;
float dz = (float) -(direction.getZAdd() * Math.cos(Math.toRadians(newYaw))) * amount;
position.add(dx, direction.getYAdd() * amount, dz);
break;
}
case LEFT:
case RIGHT: {
float dx = (amount * (float) (Math.sin(Math.toRadians(newYaw + 90))) * direction.getXAdd());
float dz = (amount * (float) (Math.cos(Math.toRadians(newYaw + 90))) * direction.getZAdd());
position.add(dx, direction.getYAdd() * amount, dz);
break;
}
default: {
position.add(direction.getXAdd() * amount, direction.getYAdd() * amount, direction.getZAdd() * amount);
}
}
return position;
}
public enum MovementDirection {
FORWARD(1, 1),
BACKWARD(-1, -1),
LEFT(-1, 1),
RIGHT(1, -1),
UP(1),
DOWN(-1);
private final int xAdd;
private final int yAdd;
private final int zAdd;
MovementDirection(int yAdd) {
this(0, yAdd, 0);
}
MovementDirection(int xAdd, int zAdd) {
this(xAdd, 0, zAdd);
}
MovementDirection(int xAdd, int yAdd, int zAdd) {
this.xAdd = xAdd;
this.yAdd = yAdd;
this.zAdd = zAdd;
}
public float getXAdd() {
return xAdd;
}
public float getYAdd() {
return yAdd;
}
public float getZAdd() {
return zAdd;
}
}
I then updated the view matrix before rendering every frame calling the following method:
public static Matrix4f createViewMatrix(Camera camera) {
Matrix4f matrix = new Matrix4f();
matrix.setIdentity();
Matrix4f.rotate((float) Math.toRadians(camera.getYaw()), new Vector3f(1, 0, 0), matrix, matrix);
Matrix4f.rotate((float) Math.toRadians(camera.getPitch()), new Vector3f(0, 1, 0), matrix, matrix);
Matrix4f.rotate((float) Math.toRadians(camera.getDelta()), new Vector3f(0, 0, 1), matrix, matrix);
Matrix4f.translate(camera.getNegativePosition(), matrix, matrix);
return matrix;
}
LibGDX, though, uses matrixes in a totally different way and I have no idea how to configure it (or how to properly update the view matrix, for that matter). Here's what I have:
public static Matrix4 createViewMatrix(Camera camera) {
Matrix4 matrix = new Matrix4();
matrix.setToLookAt(camera.direction, camera.up);
return matrix;
}
I'm not really even sure how I would be using that, I'm completely lost. I can draw a cube on the screen and move my mouse around to see it but as soon as I try to move (W,A,S,D) the cube disappears (I'm guessing it has to do with the view matrix practically not being applied).
I know that it's a long read and the code may even be bad but I already could find little to nothing for LWJGL let alone LibGDX.
I finally got it. I was drawing before moving my camera and that, apparently, was the issue.
My render loop previously looked like this:
#Override
public void render() {
// Other code
Gdx.gl.glEnable(GL20.GL_DEPTH_TEST);
Gdx.gl.glViewport(0, 0, Gdx.graphics.getBackBufferWidth(), Gdx.graphics.getBackBufferHeight());
Gdx.gl.glClearColor(0, 0, 1, 1);
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT | GL20.GL_DEPTH_BUFFER_BIT);
// Handle camera input
camera.update();
modelBatch.render(camera);
Gdx.gl.glDisable(GL20.GL_DEPTH_TEST);
}
I simply moved the camera input handling and camera update above the "render" code:
#Override
public void render() {
// Other code
// Handle camera input
camera.update();
Gdx.gl.glEnable(GL20.GL_DEPTH_TEST);
Gdx.gl.glViewport(0, 0, Gdx.graphics.getBackBufferWidth(), Gdx.graphics.getBackBufferHeight());
Gdx.gl.glClearColor(0, 0, 1, 1);
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT | GL20.GL_DEPTH_BUFFER_BIT);
modelBatch.render(camera);
Gdx.gl.glDisable(GL20.GL_DEPTH_TEST);
}
I've been working on this a while and feel so close! Should be easy, but I'm still new to this.
The skeleton hand data is being passed in as joints[KinectPV2.JointType_HandLeft] and can be accessed through joint.getX() and joint.getY(). I want to pass this data into the update function to replace mouseX and mouseY. I'm guessing I have to create global variables to access it within the update function or maybe I have to pass the skeleton data as parameters into the update function? How can I replace the mouse position data with the hand position?
import KinectPV2.*;
KinectPV2 kinect;
private class MyFluidData implements DwFluid2D.FluidData{
// update() is called during the fluid-simulation update step.
#Override
public void update(DwFluid2D fluid) {
float px, py, vx, vy, radius, vscale, temperature;
radius = 15;
vscale = 10;
px = width/2;
py = 50;
vx = 1 * +vscale;
vy = 1 * vscale;
radius = 40;
temperature = 1f;
fluid.addDensity(px, py, radius, 0.2f, 0.3f, 0.5f, 1.0f);
fluid.addTemperature(px, py, radius, temperature);
particles.spawn(fluid, px, py, radius, 100);
boolean mouse_input = mousePressed;
// add impulse: density + velocity, particles
if(mouse_input && mouseButton == LEFT){
radius = 15;
vscale = 15;
px = mouseX;
py = height-mouseY;
vx = (mouseX - pmouseX) * +vscale;
vy = (mouseY - pmouseY) * -vscale;
fluid.addDensity (px, py, radius, 0.25f, 0.0f, 0.1f, 1.0f);
fluid.addVelocity(px, py, radius, vx, vy);
particles.spawn(fluid, px, py, radius*2, 300);
}
// add impulse: density + temperature, particles
if(mouse_input && mouseButton == CENTER){
radius = 15;
vscale = 15;
px = mouseX;
py = height-mouseY;
temperature = 2f;
fluid.addDensity(px, py, radius, 0.25f, 0.0f, 0.1f, 1.0f);
fluid.addTemperature(px, py, radius, temperature);
particles.spawn(fluid, px, py, radius, 100);
}
// particles
if(mouse_input && mouseButton == RIGHT){
px = mouseX;
py = height - 1 - mouseY; // invert
radius = 50;
particles.spawn(fluid, px, py, radius, 300);
}
}
}
int viewport_w = 1280;
int viewport_h = 720;
int viewport_x = 230;
int viewport_y = 0;
int gui_w = 200;
int gui_x = 20;
int gui_y = 20;
int fluidgrid_scale = 3;
DwFluid2D fluid;
// render targets
PGraphics2D pg_fluid;
//texture-buffer, for adding obstacles
PGraphics2D pg_obstacles;
// custom particle system
MyParticleSystem particles;
// some state variables for the GUI/display
int BACKGROUND_COLOR = 0;
boolean UPDATE_FLUID = true;
boolean DISPLAY_FLUID_TEXTURES = false;
boolean DISPLAY_FLUID_VECTORS = false;
int DISPLAY_fluid_texture_mode = 0;
boolean DISPLAY_PARTICLES = true;
public void settings() {
size(viewport_w, viewport_h, P2D);
smooth(4);
}
public void setup() {
surface.setLocation(viewport_x, viewport_y);
// main library context
DwPixelFlow context = new DwPixelFlow(this);
context.print();
context.printGL();
// fluid simulation
fluid = new DwFluid2D(context, viewport_w, viewport_h, fluidgrid_scale);
// set some simulation parameters
fluid.param.dissipation_density = 0.999f;
fluid.param.dissipation_velocity = 0.99f;
fluid.param.dissipation_temperature = 0.80f;
fluid.param.vorticity = 0.10f;
fluid.param.timestep = 0.25f;
fluid.param.gridscale = 8f;
// interface for adding data to the fluid simulation
MyFluidData cb_fluid_data = new MyFluidData();
fluid.addCallback_FluiData(cb_fluid_data);
// pgraphics for fluid
pg_fluid = (PGraphics2D) createGraphics(viewport_w, viewport_h, P2D);
pg_fluid.smooth(4);
pg_fluid.beginDraw();
pg_fluid.background(BACKGROUND_COLOR);
pg_fluid.endDraw();
// pgraphics for obstacles
pg_obstacles = (PGraphics2D) createGraphics(viewport_w, viewport_h, P2D);
pg_obstacles.smooth(4);
pg_obstacles.beginDraw();
pg_obstacles.clear();
float radius;
radius = 200;
pg_obstacles.stroke(64);
pg_obstacles.strokeWeight(1);
pg_obstacles.fill(0);
pg_obstacles.rect(1*width/2f, 1*height/4f, radius, radius/2, 10);
pg_obstacles.stroke(64);
pg_obstacles.strokeWeight(1);
pg_obstacles.fill(0);
pg_obstacles.rect(1*width/3.5f, 1*height/2.5f, radius, radius/2, 10);
//// border-obstacle
//pg_obstacles.strokeWeight(20);
//pg_obstacles.stroke(64);
//pg_obstacles.noFill();
//pg_obstacles.rect(0, 0, pg_obstacles.width, pg_obstacles.height);
pg_obstacles.endDraw();
fluid.addObstacles(pg_obstacles);
// custom particle object
particles = new MyParticleSystem(context, 1024 * 1024);
kinect = new KinectPV2(this);
//Enables depth and Body tracking (mask image)
kinect.enableDepthMaskImg(true);
kinect.enableSkeletonDepthMap(true);
kinect.init();
background(0);
frameRate(60);
}
public void draw() {
PImage imgC = kinect.getDepthMaskImage();
image(imgC, 0, 0, 320, 240);
//get the skeletons as an Arraylist of KSkeletons
ArrayList<KSkeleton> skeletonArray = kinect.getSkeletonDepthMap();
//individual joints
for (int i = 0; i < skeletonArray.size(); i++) {
KSkeleton skeleton = (KSkeleton) skeletonArray.get(i);
//if the skeleton is being tracked compute the skleton joints
if (skeleton.isTracked()) {
KJoint[] joints = skeleton.getJoints();
color col = skeleton.getIndexColor();
fill(col);
stroke(col);
drawHandState(joints[KinectPV2.JointType_HandRight]);
drawHandState(joints[KinectPV2.JointType_HandLeft]);
}
}
// update simulation
if(UPDATE_FLUID){
fluid.addObstacles(pg_obstacles);
fluid.update();
particles.update(fluid);
}
// clear render target
pg_fluid.beginDraw();
pg_fluid.background(BACKGROUND_COLOR);
pg_fluid.endDraw();
// render fluid stuff
if(DISPLAY_FLUID_TEXTURES){
// render: density (0), temperature (1), pressure (2), velocity (3)
fluid.renderFluidTextures(pg_fluid, DISPLAY_fluid_texture_mode);
}
if(DISPLAY_FLUID_VECTORS){
// render: velocity vector field
fluid.renderFluidVectors(pg_fluid, 10);
}
if( DISPLAY_PARTICLES){
// render: particles; 0 ... points, 1 ...sprite texture, 2 ... dynamic points
particles.render(pg_fluid, BACKGROUND_COLOR);
}
// display
image(pg_fluid , 320, 0);
image(pg_obstacles, 320, 0);
// display number of particles as text
//String txt_num_particles = String.format("Particles %,d", particles.ALIVE_PARTICLES);
//fill(0, 0, 0, 220);
//noStroke();
//rect(10, height-10, 160, -30);
//fill(255,128,0);
//text(txt_num_particles, 20, height-20);
// info
//String txt_fps = String.format(getClass().getName()+ " [size %d/%d] [frame %d] [fps %6.2f]", fluid.fluid_w, fluid.fluid_h, fluid.simulation_step, frameRate);
//surface.setTitle(txt_fps);
}
//draw a ellipse depending on the hand state
void drawHandState(KJoint joint) {
noStroke();
handState(joint.getState());
//println(joint.getState());
pushMatrix();
translate(joint.getX(), joint.getY(), joint.getZ());
//println(joint.getX(), joint.getY(), joint.getZ());
ellipse(joint.getX(), joint.getY(), 70, 70);
popMatrix();
}
/*
Different hand state
KinectPV2.HandState_Open
KinectPV2.HandState_Closed
KinectPV2.HandState_Lasso
KinectPV2.HandState_NotTracked
*/
//Depending on the hand state change the color
void handState(int handState) {
switch(handState) {
case KinectPV2.HandState_Open:
fill(0, 255, 0);
break;
case KinectPV2.HandState_Closed:
fill(255, 0, 0);
break;
case KinectPV2.HandState_Lasso:
fill(0, 0, 255);
break;
case KinectPV2.HandState_NotTracked:
fill(100, 100, 100);
break;
}
}
I'm guessing I have to create global variables to access it within the update function or maybe I have to pass the skeleton data as parameters into the update function?
What happened when you tried those approaches?
Either approach sounds fine. You could store the variables in a sketch-level variable, set those variables from the kinect code, then use those variables in your drawing code. Or you could pass the variables as a parameter to the drawing code. Either should work fine. I'd probably go for the first approach because it sounds easier to me, but that's just my personal preference.
I suggest working in smaller chunks. Create a separate program that ignores the kinect for now. Create a hard-coded sketch-level variable that holds the same type of information you'd get from the kinect. Then write drawing code that uses that hard-coded variable to draw the frame. Get that working perfectly before you try adding the kinect code back in.
Then if you get stuck on a specific step, you can post a MCVE and we can go from there. Good luck.
I have created a pinch zoom with a scale detector which in turn calls the following renderer.
This uses the projection matrix to do the zoom and then scales the eye per the zoom when panning.
public class vboCustomGLRenderer implements GLSurfaceView.Renderer {
// Store the model matrix. This matrix is used to move models from object space (where each model can be thought
// of being located at the center of the universe) to world space.
private float[] mModelMatrix = new float[16];
// Store the view matrix. This can be thought of as our camera. This matrix transforms world space to eye space;
// it positions things relative to our eye.
private float[] mViewMatrix = new float[16];
// Store the projection matrix. This is used to project the scene onto a 2D viewport.
private float[] mProjectionMatrix = new float[16];
// Allocate storage for the final combined matrix. This will be passed into the shader program.
private float[] mMVPMatrix = new float[16];
// This will be used to pass in the transformation matrix.
private int mMVPMatrixHandle;
// This will be used to pass in model position information.
private int mPositionHandle;
// This will be used to pass in model color information.
private int mColorUniformLocation;
// How many bytes per float.
private final int mBytesPerFloat = 4;
// Offset of the position data.
private final int mPositionOffset = 0;
// Size of the position data in elements.
private final int mPositionDataSize = 3;
// How many elements per vertex for double values.
private final int mPositionFloatStrideBytes = mPositionDataSize * mBytesPerFloat;
// Position the eye behind the origin.
public double eyeX = default_settings.mbrMinX + ((default_settings.mbrMaxX - default_settings.mbrMinX)/2);
public double eyeY = default_settings.mbrMinY + ((default_settings.mbrMaxY - default_settings.mbrMinY)/2);
// Position the eye behind the origin.
//final float eyeZ = 1.5f;
public float eyeZ = 1.5f;
// We are looking toward the distance
public double lookX = eyeX;
public double lookY = eyeY;
public float lookZ = 0.0f;
// Set our up vector. This is where our head would be pointing were we holding the camera.
public float upX = 0.0f;
public float upY = 1.0f;
public float upZ = 0.0f;
public double mScaleFactor = 1;
public double mScrnVsMapScaleFactor = 0;
public vboCustomGLRenderer() {}
public void setEye(double x, double y){
eyeX -= (x / screen_vs_map_horz_ratio);
lookX = eyeX;
eyeY += (y / screen_vs_map_vert_ratio);
lookY = eyeY;
// Set the camera position (View matrix)
Matrix.setLookAtM(mViewMatrix, 0, (float)eyeX, (float)eyeY, eyeZ, (float)lookX, (float)lookY, lookZ, upX, upY, upZ);
}
public void setScaleFactor(float scaleFactor, float gdx, float gdy){
mScaleFactor *= scaleFactor;
mRight = mRight / scaleFactor;
mLeft = -mRight;
mTop = mTop / scaleFactor;
mBottom = -mTop;
//Need to calculate the shift in the eye when zooming on a particular spot.
//So get the distance between the zoom point and eye point, figure out the
//new eye point by getting the factor of this distance.
double eyeXShift = (((mWidth / 2) - gdx) - (((mWidth / 2) - gdx) / scaleFactor));
double eyeYShift = (((mHeight / 2) - gdy) - (((mHeight / 2) - gdy) / scaleFactor));
screen_vs_map_horz_ratio = (mWidth/(mRight-mLeft));
screen_vs_map_vert_ratio = (mHeight/(mTop-mBottom));
eyeX -= (eyeXShift / screen_vs_map_horz_ratio);
lookX = eyeX;
eyeY += (eyeYShift / screen_vs_map_vert_ratio);
lookY = eyeY;
// Set the scale (Projection matrix)
Matrix.frustumM(mProjectionMatrix, 0, (float)mLeft, (float)mRight, (float)mBottom, (float)mTop, near, far);
}
#Override
public void onSurfaceCreated(GL10 unused, EGLConfig config) {
// Set the background frame color
//White
GLES20.glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
// Set the view matrix. This matrix can be said to represent the camera position.
// NOTE: In OpenGL 1, a ModelView matrix is used, which is a combination of a model and
// view matrix. In OpenGL 2, we can keep track of these matrices separately if we choose.
Matrix.setLookAtM(mViewMatrix, 0, (float)eyeX, (float)eyeY, eyeZ, (float)lookX, (float)lookY, lookZ, upX, upY, upZ);
final String vertexShader =
"uniform mat4 u_MVPMatrix; \n" // A constant representing the combined model/view/projection matrix.
+ "attribute vec4 a_Position; \n" // Per-vertex position information we will pass in.
+ "attribute vec4 a_Color; \n" // Per-vertex color information we will pass in.
+ "varying vec4 v_Color; \n" // This will be passed into the fragment shader.
+ "void main() \n" // The entry point for our vertex shader.
+ "{ \n"
+ " v_Color = a_Color; \n" // Pass the color through to the fragment shader.
// It will be interpolated across the triangle.
+ " gl_Position = u_MVPMatrix \n" // gl_Position is a special variable used to store the final position.
+ " * a_Position; \n" // Multiply the vertex by the matrix to get the final point in
+ "} \n"; // normalized screen coordinates.
final String fragmentShader =
"precision mediump float; \n" // Set the default precision to medium. We don't need as high of a
// precision in the fragment shader.
+ "uniform vec4 u_Color; \n" // This is the color from the vertex shader interpolated across the
// triangle per fragment.
+ "void main() \n" // The entry point for our fragment shader.
+ "{ \n"
+ " gl_FragColor = u_Color; \n" // Pass the color directly through the pipeline.
+ "} \n";
// Load in the vertex shader.
int vertexShaderHandle = GLES20.glCreateShader(GLES20.GL_VERTEX_SHADER);
if (vertexShaderHandle != 0)
{
// Pass in the shader source.
GLES20.glShaderSource(vertexShaderHandle, vertexShader);
// Compile the shader.
GLES20.glCompileShader(vertexShaderHandle);
// Get the compilation status.
final int[] compileStatus = new int[1];
GLES20.glGetShaderiv(vertexShaderHandle, GLES20.GL_COMPILE_STATUS, compileStatus, 0);
// If the compilation failed, delete the shader.
if (compileStatus[0] == 0)
{
GLES20.glDeleteShader(vertexShaderHandle);
vertexShaderHandle = 0;
}
}
if (vertexShaderHandle == 0)
{
throw new RuntimeException("Error creating vertex shader.");
}
// Load in the fragment shader shader.
int fragmentShaderHandle = GLES20.glCreateShader(GLES20.GL_FRAGMENT_SHADER);
if (fragmentShaderHandle != 0)
{
// Pass in the shader source.
GLES20.glShaderSource(fragmentShaderHandle, fragmentShader);
// Compile the shader.
GLES20.glCompileShader(fragmentShaderHandle);
// Get the compilation status.
final int[] compileStatus = new int[1];
GLES20.glGetShaderiv(fragmentShaderHandle, GLES20.GL_COMPILE_STATUS, compileStatus, 0);
// If the compilation failed, delete the shader.
if (compileStatus[0] == 0)
{
GLES20.glDeleteShader(fragmentShaderHandle);
fragmentShaderHandle = 0;
}
}
if (fragmentShaderHandle == 0)
{
throw new RuntimeException("Error creating fragment shader.");
}
// Create a program object and store the handle to it.
int programHandle = GLES20.glCreateProgram();
if (programHandle != 0)
{
// Bind the vertex shader to the program.
GLES20.glAttachShader(programHandle, vertexShaderHandle);
// Bind the fragment shader to the program.
GLES20.glAttachShader(programHandle, fragmentShaderHandle);
// Bind attributes
GLES20.glBindAttribLocation(programHandle, 0, "a_Position");
GLES20.glBindAttribLocation(programHandle, 1, "a_Color");
// Link the two shaders together into a program.
GLES20.glLinkProgram(programHandle);
// Get the link status.
final int[] linkStatus = new int[1];
GLES20.glGetProgramiv(programHandle, GLES20.GL_LINK_STATUS, linkStatus, 0);
// If the link failed, delete the program.
if (linkStatus[0] == 0)
{
GLES20.glDeleteProgram(programHandle);
programHandle = 0;
}
}
if (programHandle == 0)
{
throw new RuntimeException("Error creating program.");
}
// Set program handles. These will later be used to pass in values to the program.
mMVPMatrixHandle = GLES20.glGetUniformLocation(programHandle, "u_MVPMatrix");
mPositionHandle = GLES20.glGetAttribLocation(programHandle, "a_Position");
mColorUniformLocation = GLES20.glGetUniformLocation(programHandle, "u_Color");
// Tell OpenGL to use this program when rendering.
GLES20.glUseProgram(programHandle);
}
static double mWidth = 0;
static double mHeight = 0;
static double mLeft = 0;
static double mRight = 0;
static double mTop = 0;
static double mBottom = 0;
static double mRatio = 0;
double screen_width_height_ratio;
double screen_height_width_ratio;
final float near = 1.5f;
final float far = 10.0f;
double screen_vs_map_horz_ratio = 0;
double screen_vs_map_vert_ratio = 0;
#Override
public void onSurfaceChanged(GL10 unused, int width, int height) {
// Adjust the viewport based on geometry changes,
// such as screen rotation
// Set the OpenGL viewport to the same size as the surface.
GLES20.glViewport(0, 0, width, height);
screen_width_height_ratio = (double) width / height;
screen_height_width_ratio = (double) height / width;
//Initialize
if (mRatio == 0){
mWidth = (double) width;
mHeight = (double) height;
//map height to width ratio
double map_extents_width = default_settings.mbrMaxX - default_settings.mbrMinX;
double map_extents_height = default_settings.mbrMaxY - default_settings.mbrMinY;
double map_width_height_ratio = map_extents_width/map_extents_height;
if (screen_width_height_ratio > map_width_height_ratio){
mRight = (screen_width_height_ratio * map_extents_height)/2;
mLeft = -mRight;
mTop = map_extents_height/2;
mBottom = -mTop;
}
else{
mRight = map_extents_width/2;
mLeft = -mRight;
mTop = (screen_height_width_ratio * map_extents_width)/2;
mBottom = -mTop;
}
mRatio = screen_width_height_ratio;
}
if (screen_width_height_ratio != mRatio){
final double wRatio = width/mWidth;
final double oldWidth = mRight - mLeft;
final double newWidth = wRatio * oldWidth;
final double widthDiff = (newWidth - oldWidth)/2;
mLeft = mLeft - widthDiff;
mRight = mRight + widthDiff;
final double hRatio = height/mHeight;
final double oldHeight = mTop - mBottom;
final double newHeight = hRatio * oldHeight;
final double heightDiff = (newHeight - oldHeight)/2;
mBottom = mBottom - heightDiff;
mTop = mTop + heightDiff;
mWidth = (double) width;
mHeight = (double) height;
mRatio = screen_width_height_ratio;
}
screen_vs_map_horz_ratio = (mWidth/(mRight-mLeft));
screen_vs_map_vert_ratio = (mHeight/(mTop-mBottom));
Matrix.frustumM(mProjectionMatrix, 0, (float)mLeft, (float)mRight, (float)mBottom, (float)mTop, near, far);
}
ListIterator<mapLayer> orgNonAssetCatLayersList_it;
ListIterator<FloatBuffer> mapLayerObjectList_it;
ListIterator<Byte> mapLayerObjectTypeList_it;
mapLayer MapLayer;
#Override
public void onDrawFrame(GL10 unused) {
GLES20.glClear(GLES20.GL_DEPTH_BUFFER_BIT | GLES20.GL_COLOR_BUFFER_BIT);
drawPreset();
orgNonAssetCatLayersList_it = default_settings.orgNonAssetCatMappableLayers.listIterator();
while (orgNonAssetCatLayersList_it.hasNext()) {
MapLayer = orgNonAssetCatLayersList_it.next();
if (MapLayer.BatchedPointVBO != null){
}
if (MapLayer.BatchedLineVBO != null){
drawLineString(MapLayer.BatchedLineVBO, MapLayer.lineStringObjColor);
}
if (MapLayer.BatchedPolygonVBO != null){
drawPolygon(MapLayer.BatchedPolygonVBO, MapLayer.polygonObjColor);
}
}
}
private void drawPreset()
{
Matrix.setIdentityM(mModelMatrix, 0);
// This multiplies the view matrix by the model matrix, and stores the result in the MVP matrix
// (which currently contains model * view).
Matrix.multiplyMM(mMVPMatrix, 0, mViewMatrix, 0, mModelMatrix, 0);
// This multiplies the modelview matrix by the projection matrix, and stores the result in the MVP matrix
// (which now contains model * view * projection).
Matrix.multiplyMM(mMVPMatrix, 0, mProjectionMatrix, 0, mMVPMatrix, 0);
GLES20.glUniformMatrix4fv(mMVPMatrixHandle, 1, false, mMVPMatrix, 0);
}
private void drawLineString(final FloatBuffer geometryBuffer, final float[] colorArray)
{
// Pass in the position information
geometryBuffer.position(mPositionOffset);
GLES20.glVertexAttribPointer(mPositionHandle, mPositionDataSize, GLES20.GL_FLOAT, false, mPositionFloatStrideBytes, geometryBuffer);
GLES20.glEnableVertexAttribArray(mPositionHandle);
GLES20.glUniform4f(mColorUniformLocation, colorArray[0], colorArray[1], colorArray[2], 1f);
GLES20.glLineWidth(2.0f);
GLES20.glDrawArrays(GLES20.GL_LINES, 0, geometryBuffer.capacity()/mPositionDataSize);
}
private void drawPolygon(final FloatBuffer geometryBuffer, final float[] colorArray)
{
// Pass in the position information
geometryBuffer.position(mPositionOffset);
GLES20.glVertexAttribPointer(mPositionHandle, mPositionDataSize, GLES20.GL_FLOAT, false, mPositionFloatStrideBytes, geometryBuffer);
GLES20.glEnableVertexAttribArray(mPositionHandle);
GLES20.glUniform4f(mColorUniformLocation, colorArray[0], colorArray[1], colorArray[2], 1f);
GLES20.glLineWidth(1.0f);
GLES20.glDrawArrays(GLES20.GL_LINES, 0, geometryBuffer.capacity()/mPositionDataSize);
}
}
This works very well up until it gets to a certain level then the panning starts jumping. After testing I found that it was because the floating point value of the eye, could not cope with such a small shift in position. I keep my x and y eye values in doubles so it continues to calculate shifting positions, then when calling setLookAtM() I convert them to floats.
So need I need to change the way the zoom works. I was thinking instead of zooming with the projection, scaling the model larger or smaller.
The setScaleFactor() function in my code will change, by removing the projection and eye shifting.
There is a Matrix.scaleM(m,Offset,x,y,z) function but I am unsure how or where to implement this.
Could use some suggestions on how to accomplish this.
[Edit] 24/7/2013
I tried altering setScaleFactor() like so:
public void setScaleFactor(float scaleFactor, float gdx, float gdy){
mScaleFactor *= scaleFactor;
}
and in drawPreset()
private void drawPreset()
{
Matrix.setIdentityM(mModelMatrix, 0);
//*****Added scaleM
Matrix.scaleM(mModelMatrix, 0, (float)mScaleFactor, (float)mScaleFactor, 1.0f);
// This multiplies the view matrix by the model matrix, and stores the result in the MVP matrix
// (which currently contains model * view).
Matrix.multiplyMM(mMVPMatrix, 0, mViewMatrix, 0, mModelMatrix, 0);
// This multiplies the modelview matrix by the projection matrix, and stores the result in the MVP matrix
// (which now contains model * view * projection).
Matrix.multiplyMM(mMVPMatrix, 0, mProjectionMatrix, 0, mMVPMatrix, 0);
GLES20.glUniformMatrix4fv(mMVPMatrixHandle, 1, false, mMVPMatrix, 0);
}
Now as soon as you do a zoom the image disappears from the screen.
Actually I found it right off to the right hand side. I could still pan over to it.
Still not sure on what I should be scaling to zoom, is it the model, view or view-model?
I have found out that if you take the center of your model back to the origin (0,0) it allows you to extend your zoom capabilities. With my x coord data which was between 152.6 and 152.7.
Taking it back to the origin by the offset 152.65, which needs to be applied to the data before loading it into the floatbuffer.
So the width of the data becomes 0.1 or 0.05 on each side, allowing for more precision on the trailing end of the value.
I've just started learning OpenGL for Android and I'm having a weird problem when drawing a circle. some of its vertices stick to the left and top wall making lines go out from the circle a bit randomly. Every time I restart the app they have a different position.
My DrawScreen class where the circle is drawn:
public class DrawScreen implements GLSurfaceView.Renderer {
Ball ball;
public float mAngle;
private int mProgram;
private int maPositionHandle;
private final String vertexShaderCode =
// This matrix member variable provides a hook to manipulate
// the coordinates of the objects that use this vertex shader
"uniform mat4 uMVPMatrix; \n" +
"attribute vec4 vPosition; \n" +
"void main(){ \n" +
// the matrix must be included as a modifier of gl_Position
" gl_Position = uMVPMatrix * vPosition; \n" +
"} \n";
private final String fragmentShaderCode =
"precision mediump float; \n" +
"void main(){ \n" +
" gl_FragColor = vec4 (0.63671875, 0.76953125, 0.22265625, 1.0); \n" +
"} \n";
private int muMVPMatrixHandle;
private float[] mMVPMatrix = new float[16];
private float[] mVMatrix = new float[16];
private float[] mProjMatrix = new float[16];
public void onSurfaceCreated(GL10 unused, EGLConfig config) {
ball = new Ball();
// Set the background frame color
GLES20.glClearColor(0.5f, 0.5f, 0.5f, 1.0f);
muMVPMatrixHandle = GLES20.glGetUniformLocation(mProgram, "uMVPMatrix");
Matrix.setLookAtM(mVMatrix, 0, 0, 0, -3, 0f, 0f, 0f, 0f, 1.0f, 0.0f);
ball.initShapes(240, 360, 50);
int vertexShader = loadShader(GLES20.GL_VERTEX_SHADER, vertexShaderCode);
int fragmentShader = loadShader(GLES20.GL_FRAGMENT_SHADER, fragmentShaderCode);
mProgram = GLES20.glCreateProgram(); // create empty OpenGL Program
GLES20.glAttachShader(mProgram, vertexShader); // add the vertex shader to program
GLES20.glAttachShader(mProgram, fragmentShader); // add the fragment shader to program
GLES20.glLinkProgram(mProgram); // creates OpenGL program executables
// get handle to the vertex shader's vPosition member
maPositionHandle = GLES20.glGetAttribLocation(mProgram, "vPosition");
}
public void onDrawFrame(GL10 unused) {
// Redraw background color
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT | GLES20.GL_DEPTH_BUFFER_BIT);
// Add program to OpenGL environment
GLES20.glUseProgram(mProgram);
// Prepare the circle data
GLES20.glVertexAttribPointer(maPositionHandle, 3, GLES20.GL_FLOAT, false, 0, ball.ballVB);
GLES20.glEnableVertexAttribArray(maPositionHandle);
// Apply a ModelView Projection transformation
Matrix.multiplyMM(mMVPMatrix, 0, mProjMatrix, 0, mVMatrix, 0);
GLES20.glUniformMatrix4fv(muMVPMatrixHandle, 1, false, mMVPMatrix, 0);
// Draw the circle
GLES20.glDrawArrays(GLES20.GL_LINE_LOOP, 0, (int) (ball.getNumSeg() * 3));
}
public void onSurfaceChanged(GL10 unused, int width, int height) {
GLES20.glViewport(0, 0, width, height);
float ratio = (float) width / height;
// this projection matrix is applied to object coodinates
// in the onDrawFrame() method
Matrix.frustumM(mProjMatrix, 0, -ratio, ratio, -1, 1, 3, 7);
muMVPMatrixHandle = GLES20.glGetUniformLocation(mProgram, "uMVPMatrix");
Matrix.setLookAtM(mVMatrix, 0, 0, 0, -3, 0f, 0f, 0f, 0f, 1.0f, 0.0f);
}
private int loadShader(int type, String shaderCode){
// create a vertex shader type (GLES20.GL_VERTEX_SHADER)
// or a fragment shader type (GLES20.GL_FRAGMENT_SHADER)
int shader = GLES20.glCreateShader(type);
// add the source code to the shader and compile it
GLES20.glShaderSource(shader, shaderCode);
GLES20.glCompileShader(shader);
return shader;
}
}
And my Ball class where the circle is created:
public class Ball {
public FloatBuffer ballVB;
private float cx, cy, r;
float numSegments = 360;
public void initShapes(float tx, float ty, float tr){
cx = (tx / 240.f) - 1.f;
cy = (ty / 360.f) - 1.f;
r = (tr / 240.f);
float ballCoords[] = new float[(int) (numSegments * 3)];
double theta = (2 * 3.1415926 / numSegments);
float c = (float) Math.cos(theta);//precalculate the sine and cosine
float s = (float) Math.sin(theta);
float t;
float x = r;//we start at angle = 0
float y = 0;
for(int i = 0; i < (numSegments * 3); i = i + 3 ) {
ballCoords[i] = (x + cx);
ballCoords[i + 1] = (y + cy);
ballCoords[i + 2] = (0);
//apply the rotation matrix
t = x;
x = c * x - s * y;
y = s * t + c * y;
}
// initialize vertex Buffer for triangle
ByteBuffer vbb = ByteBuffer.allocateDirect(
// (# of coordinate values * 4 bytes per float)
ballCoords.length * 4);
vbb.order(ByteOrder.nativeOrder());// use the device hardware's native byte order
ballVB = vbb.asFloatBuffer(); // create a floating point buffer from the ByteBuffer
ballVB.put(ballCoords); // add the coordinates to the FloatBuffer
ballVB.position(0); // set the buffer to read the first coordinate
}
public float getNumSeg(){
return numSegments;
}
}
I've been scouring the internet for hours but haven't found anything. Hope you guys can help me.
GLES20.glDrawArrays(GLES20.GL_LINE_LOOP, 0, (int) (ball.getNumSeg() * 3));
I'm suspicious of this, are segments referring to individual vertices?
The final argument to glDrawArrays is the number of vertices to draw, not the number of floats. You should probably remove the * 3 multiplier from glDrawArrays.
Your extra lines are probably from drawing garbage data because you're drawing 3 times as many vertices as you've actually allocated.
Hi I'm creating a water scene and have a class called drawWater. The class takes in a equation to alter it's appearance. When I try to create
drawater water = new drawWater();
drawater water2 = new drawWater();
They both seem to be created with the correct values but when I draw them onto the screen only one is shown. Both the values are different.
I don't know where I'm going wrong. It doesn't seem to be a JOGL problem but the way I'm setting up the class?
Can anyone see where I went wrong?
This is the main class:
package waterAttempt41;
import Common.GLDisplay;
public class Lesson27 {
public static void main(String[] args) {
GLDisplay neheGLDisplay = GLDisplay.createGLDisplay("Current water attempt");
Renderer renderer = new Renderer();
//InputHandler inputHandler = new InputHandler(renderer, neheGLDisplay);
neheGLDisplay.addGLEventListener(renderer);
//neheGLDisplay.addKeyListener(inputHandler);
neheGLDisplay.start();
}
}
This is the renderer class:
package waterAttempt41;
import Common.TextureReader;
import java.io.IOException;
import java.util.logging.Logger;
import javax.media.opengl.GL;
import javax.media.opengl.GLAutoDrawable;
import javax.media.opengl.GLEventListener;
import javax.media.opengl.glu.GLU;
class Renderer implements GLEventListener {
private static final Logger logger = Logger.getLogger(Renderer.class.getName());
drawWater water;
drawWater water2;
private int[] textures = new int[3]; // Storage For 3 Textures
private boolean aDown = false;
private boolean up = false;
private GLU glu = new GLU();
public void init(GLAutoDrawable drawable) {
GL gl = drawable.getGL();
try {
loadGLTextures(drawable);
} catch (IOException e) {
logger.info("Exception loading textures or Objects");
System.out.println("Couldn't load model/Texture");
throw new RuntimeException(e);
}
gl.glEnable(GL.GL_DEPTH_TEST);
gl.glShadeModel(GL.GL_SMOOTH);
gl.glLightModeli(GL.GL_LIGHT_MODEL_TWO_SIDE, GL.GL_TRUE);
gl.glCullFace(GL.GL_BACK); // Set Culling Face To Back Face
gl.glEnable(GL.GL_CULL_FACE); // Enable Culling
gl.glClearColor(0.0f, 0.0f, 0.0f, 1.0f); // Set Clear Color (Greenish Color)
float spot_ambient[] = {0.2f, 0.2f, 0.2f, 1.0f};
float spot_diffuse[] = {10.2f, 10.2f, 10.2f, 10.0f};
float spot_specular[] = {10.2f, 10.2f, 10.2f, 10.0f};
gl.glLightfv(GL.GL_LIGHT0, GL.GL_AMBIENT, spot_ambient, 1);
gl.glLightfv(GL.GL_LIGHT0, GL.GL_DIFFUSE, spot_diffuse, 1);
gl.glLightfv(GL.GL_LIGHT0, GL.GL_SPECULAR, spot_specular, 1);
gl.glEnable(GL.GL_LIGHTING);
gl.glEnable(GL.GL_LIGHT0);
water = new drawWater();
water.setup(gl, "a*sin( y *(x-b) )");
water.setFuncVar('a', 1.103);
water.setFuncVar('b', 1.103);
water.setMax(5);
water2 = new drawWater();
water2.setup(gl, "a*sin( y *(x-b) )");
water2.setFuncVar('a', 0.05);
water2.setFuncVar('b', 10.0);
water2.setMax(25);
}
public void display(GLAutoDrawable drawable) {
GL gl = drawable.getGL();
// Clear Color Buffer, Depth Buffer, Stencil Buffer
gl.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT | GL.GL_STENCIL_BUFFER_BIT);
logger.info("\nWater: a = " + water.getFuncVar('a') + " b =" + water.getFuncVar('b') + "\n"
+ "Water2: a = " + water2.getFuncVar('a') + " b =" + water2.getFuncVar('b')
+ "\nWater: = " + water.getEqu() + " \nWater2 =" + water2.getEqu()
+ "\nWater: max = " + water.getMax() + " Water2: max = " + water2.getMax());
gl.glPushMatrix();
gl.glTranslatef(0.0f, -20.50f, 0.0f);
gl.glTranslatef(0.0f, 0.0f, -31.0f);
gl.glRotatef(90, 0.0f, 0.0f, 1.0f); // Rotate By -yrot On Y Axis
gl.glRotatef(90, 0.0f, 1.0f, 0.0f);
water.draw(gl);
gl.glTranslatef(0.0f, -20.0f, 0.0f);
water2.draw(gl);
gl.glPopMatrix();
gl.glPushMatrix();
gl.glTranslatef(0.0f, 0.0f, -20.0f); // Zoom Into The Screen 20 Units
gl.glEnable(GL.GL_TEXTURE_2D); // Enable Texture Mapping ( NEW )
drawRoom(gl); // Draw The Room
gl.glPopMatrix();
gl.glFlush(); // Flush The OpenGL Pipeline
}
private void drawRoom(GL gl) { // Draw The Room (Box)
gl.glBegin(GL.GL_QUADS); // Begin Drawing Quads
/*
// Floor
gl.glNormal3f(0.0f, 1.0f, 0.0f); // Normal Pointing Up
gl.glVertex3f(-20.0f, -20.0f, -40.0f); // Back Left
gl.glVertex3f(-20.0f, -20.0f, 40.0f); // Front Left
gl.glVertex3f(20.0f, -20.0f, 40.0f); // Front Right
gl.glVertex3f(20.0f, -20.0f, -40.0f); // Back Right
// Ceiling
gl.glNormal3f(0.0f, -1.0f, 0.0f); // Normal Point Down
gl.glVertex3f(-10.0f, 10.0f, 20.0f); // Front Left
gl.glVertex3f(-10.0f, 10.0f, -20.0f); // Back Left
gl.glVertex3f(10.0f, 10.0f, -20.0f); // Back Right
gl.glVertex3f(10.0f, 10.0f, 20.0f); // Front Right
// Back Wall
gl.glNormal3f(0.0f, 0.0f, -1.0f); // Normal Pointing Towards Viewer
gl.glVertex3f(20.0f, 20.0f, 30.0f); // Top Right
gl.glVertex3f(20.0f, -20.0f, 30.0f); // Bottom Right
gl.glVertex3f(-20.0f, -20.0f, 30.0f); // Bottom Left
gl.glVertex3f(-20.0f, 20.0f, 30.0f); // Top Left
// Left Wall
gl.glNormal3f(1.0f, 0.0f, 0.0f); // Normal Pointing Right
gl.glVertex3f(-20.0f, 20.0f, 30.0f); // Top Front
gl.glVertex3f(-20.0f, -20.0f, 30.0f); // Bottom Front
gl.glVertex3f(-20.0f, -20.0f, -30.0f); // Bottom Back
gl.glVertex3f(-20.0f, 20.0f, -30.0f); // Top Back
// Right Wall
gl.glNormal3f(-1.0f, 0.0f, 0.0f); // Normal Pointing Left
gl.glVertex3f(20.0f, 20.0f, -30.0f); // Top Back
gl.glVertex3f(20.0f, -20.0f, -30.0f); // Bottom Back
gl.glVertex3f(20.0f, -20.0f, 30.0f); // Bottom Front
gl.glVertex3f(20.0f, 20.0f, 30.0f); // Top Front
*/
// Front Wall
gl.glNormal3f(0.0f, 0.0f, 1.0f); // Normal Pointing Away From Viewer
gl.glTexCoord2f(1, 1);
gl.glVertex3f(-20.0f, 20.0f, -30.0f); // Top Left
gl.glTexCoord2f(1, 0);
gl.glVertex3f(-20.0f, -20.0f, -30.0f); // Bottom Left
gl.glTexCoord2f(0, 0);
gl.glVertex3f(20.0f, -20.0f, -30.0f); // Bottom Right
gl.glTexCoord2f(0, 1);
gl.glVertex3f(20.0f, 20.0f, -30.0f); // Top Right
gl.glPopMatrix();
gl.glEnd(); // Done Drawing Quads
}
public void reshape(GLAutoDrawable drawable, int xstart, int ystart, int width, int height) {
GL gl = drawable.getGL();
height = (height == 0) ? 1 : height;
gl.glViewport(0, 0, width, height);
gl.glMatrixMode(GL.GL_PROJECTION);
gl.glLoadIdentity();
gl.glRotatef(90, 0.0f, 0.0f, 1.0f);
glu.gluPerspective(60, (float) width / height, 1, 1000);
glu.gluLookAt(1.0f, 0.0f, 25.0f,
0.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f);
gl.glMatrixMode(GL.GL_MODELVIEW);
gl.glLoadIdentity();
}
public void displayChanged(GLAutoDrawable drawable, boolean modeChanged, boolean deviceChanged) {
}
private void loadGLTextures(GLAutoDrawable gldrawable) throws IOException {
TextureReader.Texture texture = null;
texture = TextureReader.readTexture("data/images/042.bmp");
GL gl = gldrawable.getGL();
//Create Nearest Filtered Texture
gl.glGenTextures(1, textures, 0);
gl.glBindTexture(GL.GL_TEXTURE_2D, textures[0]);
gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR);
gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR);
gl.glTexImage2D(GL.GL_TEXTURE_2D,
0,
3,
texture.getWidth(),
texture.getHeight(),
0,
GL.GL_RGB,
GL.GL_UNSIGNED_BYTE,
texture.getPixels());
}
}
The drawWater Class:
import com.sun.opengl.util.BufferUtil;
import javax.media.opengl.GL;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
/**
*
* #author Shane
*/
public class drawWater {
private Expr2 func; // The function that is being drawn.
private String functionInput;
private boolean version_1_5; // Check is OpenGL 1.5 is available; set in init().
private boolean dataIsValid; // Set to true whenever data needs to be recomputed.
// This is checked in the display() method before drawing.
private int max;
/* Buffers to hold the points and normals for the surface. */
private FloatBuffer vBuf = BufferUtil.newFloatBuffer(201 * 201 * 3);
private FloatBuffer nBuf = BufferUtil.newFloatBuffer(201 * 201 * 3);
/* Buffers to hold the indices for drawing the surface and lines with glDrawElements*/
private IntBuffer surfaceIndexBuffer = BufferUtil.newIntBuffer(200 * 201 * 2);
private IntBuffer xLineIndexBuffer = BufferUtil.newIntBuffer(21 * 201);
private IntBuffer yLineIndexBuffer = BufferUtil.newIntBuffer(21 * 201);
/* VBO ID numbers for holding the data when OpenGL version is 1.5 or higher */
private int vertexVBO, normalVBO; // VBO IDs for surface data.
private int xLineVBO, yLineVBO, surfaceVBO; // VBO IDs for index data.
public drawWater() {
}
public void setup(GL gl, String equ) {
version_1_5 = gl.isExtensionAvailable("GL_VERSION_1_5");
if (gl.isExtensionAvailable("GL_VERSION_1_3")) {
gl.glEnable(GL.GL_MULTISAMPLE);
}
this.makeElementBuffers(); // Generate lists of indices for glDrawElements. This data never changes.
if (version_1_5) {
// Generate VBOs for the data, and fill the ones that are for index data with
// data from Java nio buffers. The VBOs for index data won't change again and
// so use GL.GL_STATIC_DRAW.
int[] ids = new int[5];
gl.glGenBuffers(5, ids, 0);
this.vertexVBO = ids[0];
this.normalVBO = ids[1];
this.xLineVBO = ids[2];
this.yLineVBO = ids[3];
this.surfaceVBO = ids[4];
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, vertexVBO);
gl.glVertexPointer(3, GL.GL_FLOAT, 0, 0);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, normalVBO);
gl.glNormalPointer(GL.GL_FLOAT, 0, 0);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, 0);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, surfaceVBO);
gl.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, 4 * 2 * 200 * 201, surfaceIndexBuffer, GL.GL_STATIC_DRAW);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, xLineVBO);
gl.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, 4 * 21 * 201, xLineIndexBuffer, GL.GL_STATIC_DRAW);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, yLineVBO);
gl.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, 4 * 21 * 201, yLineIndexBuffer, GL.GL_STATIC_DRAW);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0);
} else {
gl.glVertexPointer(3, GL.GL_FLOAT, 0, vBuf);
gl.glNormalPointer(GL.GL_FLOAT, 0, nBuf);
}
this.functionInput = equ;
this.func = new Expr2(equ);
this.dataIsValid = false; // Force recomputation of data with new graph definition.
}
public void draw(GL gl) {
if (func != null) {
gl.glMaterialfv(GL.GL_FRONT, GL.GL_AMBIENT_AND_DIFFUSE, new float[]{0.7f, 10.7f, 1}, 0);
gl.glMaterialfv(GL.GL_BACK, GL.GL_AMBIENT_AND_DIFFUSE, new float[]{10.8f, 0.8f, 0.5f}, 0);
if (!dataIsValid) {
this.computeSurfaceData();
if (version_1_5) {
// Set up VBOs for surface points and normals. Since these change
// pretty regularly, use GL.GL_DYNAMIC_DRAW.
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, vertexVBO);
gl.glBufferData(GL.GL_ARRAY_BUFFER, 4 * 3 * 201 * 201, vBuf, GL.GL_DYNAMIC_DRAW);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, normalVBO);
gl.glBufferData(GL.GL_ARRAY_BUFFER, 4 * 3 * 201 * 201, nBuf, GL.GL_DYNAMIC_DRAW);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, 0);
}
}
gl.glEnableClientState(GL.GL_VERTEX_ARRAY);
gl.glEnableClientState(GL.GL_NORMAL_ARRAY);
this.drawSurface(gl); // Just draw the surface.
gl.glPolygonOffset(1, 1);
gl.glEnable(GL.GL_POLYGON_OFFSET_FILL);
this.drawSurface(gl);
gl.glDisable(GL.GL_POLYGON_OFFSET_FILL);
gl.glDisable(GL.GL_LIGHTING);
gl.glColor3f(0, 0, 0);
gl.glDisableClientState(GL.GL_NORMAL_ARRAY);
gl.glEnable(GL.GL_LIGHTING);
}
gl.glDisableClientState(GL.GL_VERTEX_ARRAY);
gl.glDisableClientState(GL.GL_NORMAL_ARRAY);
}
public void setIsVaild(boolean bool) {
this.dataIsValid = bool;
}
public void setMax(int max) {
this.max = max;
}
public int getMax() {
return this.max;
}
public double getFuncVar(char var) {
return this.func.getVariable(var);
}
public void setFuncVar(char var, double value) {
this.func.setVariable(var, value);
}
public void setFunc(String func) {
this.func.parse(func);
}
public String getEqu() {
return this.functionInput;
}
private void makeElementBuffers() {
for (int i = 0; i < 201; i += 10) { // indices for drawing lines in x-direction
for (int j = 0; j < 201; j++) {
this.xLineIndexBuffer.put(201 * i + j);
}
}
for (int i = 0; i < 201; i += 10) { // indices for drawing lines in y-direction
for (int j = 0; j < 201; j++) {
this.yLineIndexBuffer.put(201 * j + i);
}
}
for (int i = 0; i < 200; i++) { // indices for drawing surface with GL_TRIANGLE_STRIPs
for (int j = 0; j < 201; j++) {
this.surfaceIndexBuffer.put(201 * (i + 1) + j);
this.surfaceIndexBuffer.put(201 * i + j);
}
}
this.xLineIndexBuffer.rewind();
this.yLineIndexBuffer.rewind();
this.surfaceIndexBuffer.rewind();
}
private void computeSurfaceData() {
double xmin = -5;
double xmax = 5;
double ymin = -5;
double ymax = 5;
double xRes = 200;
double yRes = 200;
float[] surfaceData = new float[301 * 3];
float[] normalData = new float[301 * 3];
double dx = (xmax - xmin) / xRes;
double dy = (ymax - ymin) / yRes;
for (int i = 0; i <= xRes; i++) {
int v = 0;
int n = 0;
double y1 = ymin + dy * i;
for (int j = 0; j <= yRes; j++) {
double x = xmin + dx * j;
this.func.setVariable('x', x);
this.func.setVariable('y', y1);
double z1 = this.func.value();
float[] normal1 = computeUnitNormal(x, y1);
surfaceData[v++] = (float) x;
surfaceData[v++] = (float) y1;
surfaceData[v++] = (float) z1;
normalData[n++] = normal1[0];
normalData[n++] = normal1[1];
normalData[n++] = normal1[2];
}
this.vBuf.put(surfaceData, 0, 201 * 3);
this.nBuf.put(normalData, 0, 201 * 3);
}
this.vBuf.rewind();
this.nBuf.rewind();
this.dataIsValid = true;
}
/**
* Draw the surface as a series of triangle strips.
*/
private void drawSurface(GL gl) {
if (version_1_5) {
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, surfaceVBO);
for (int i = 0; i < 200; i++) {
gl.glDrawElements(GL.GL_TRIANGLE_STRIP, 402, GL.GL_UNSIGNED_INT, 402 * i * 4);
}
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0);
} else {
for (int i = 0; i < 200; i++) {
this.surfaceIndexBuffer.position(402 * i);
gl.glDrawElements(GL.GL_TRIANGLE_STRIP, 402, GL.GL_UNSIGNED_INT, surfaceIndexBuffer);
}
}
}
/**
* Compute a unit normal to the graph of z = func(x,y).
* This is only an approximation, using nearby points instead
* of exact derivatives.
*/
private float[] computeUnitNormal(double x, double y) {
double epsilon = 0.00001;
this.func.setVariable('x', x);
this.func.setVariable('y', y);
double z = this.func.value();
this.func.setVariable('x', x + epsilon);
double z1 = func.value();
this.func.setVariable('x', x);
this.func.setVariable('y', y + epsilon);
double z2 = this.func.value();
// normal is (epsilon,0,z1-z) X (0,epsilon,z2-z)
double a = -epsilon * (z1 - z);
double b = -epsilon * (z2 - z);
double c = epsilon * epsilon;
double length = Math.sqrt(a * a + b * b + c * c);
if (Double.isNaN(length) || Double.isInfinite(length)) {
return new float[]{0, 0, 1};
} else {
return new float[]{(float) (a / length), (float) (b / length), (float) (c / length)};
}
}
}
Any help would be a appreciated. This is part of my final year project in college. So any help would be great.
The calls to glVertexPointer/glNormalPointer are not done per draw, but per setup.
So, when you create the 2 drawWater objects, the last one leaves its vertex and normal data bound to the GL, which all the calls to glDrawElements will use.
You need to modify the code so that glVertexPointer/glNormalPointer (along with the glBindBuffer calls that accompany them) are done per draw.