Shader testgrid - lines jagged and breaking - java

I need to write a simple shader for the testgrid ground surface. I want to basically draw parallel lines in shader code.
Problem: as the lines grow more distant from the camera, they begin to break up and there're gaps in them. I understand why that happens with my code - because OpenGL approximates fragment's position as being too far from the point that I calculate, so it marks it as not belonging to a line.
I am passing the actual world positions of the plane surface vectors to my shader - that's how I can calculate it.
I've been playing with the algorithm for an hour, but can't seem to get good results.
The best idea I've tried was to include a small coefficient that grows the further the line gets from the camera - but the results are underwhelming. I calculated the coefficient linearly, but I guess I need some smarter formula to go that route, because the rate at which the lines grow thinner on the screen isn't linear. I can't figure this out so far though. Currently it either makes close lines too thick, which is undesirable, or still has the same problem for distant lines.
To simplify, I'm currently only drawing X-axis lines
I'm including a piece of shader code and a screenshot of the problem.
#version 300 es
precision highp float;
precision highp int;
in highp vec3 vertexPosition;
out mediump vec4 fragColor;
void main()
{
highp float lineWidth = 0.2;
highp float squareSize = 5.0f;
highp int roundX = int(vertexPosition.x / squareSize);
highp int roundY = int(vertexPosition.z / squareSize);
highp float remainderX = vertexPosition.x - float(roundX)*squareSize;
highp float remainderY = vertexPosition.x - float(roundY)*squareSize;
// this is the small coefficient I was trying to add to linewidth
highp float test = abs(0.08 * float(roundX));
if (abs(remainderX) <= (lineWidth))
{
fragColor = vec4(1,0,0, 1);
}
else
{
fragColor = vec4(0.8,0.8,0.8, 1);
}
}
The first answer fixes the main problem with lines breaking, but introduces a visual bug. Gonna go and try to find out why. Anyway, this is already a good idea! But as you can see the lines get wider towards the end.
Edit: Found it. Just removed the Z coordinate from vertexPosition before doing dFdy. Now all I need it a way to make the lines smoother and not staircase-like.
p.s. Don't look at how optimized the code is - I'm currently just searching for the right idea
p.p.s. If someone can tell me how to do simple antialiasing for this example - this also would be most welcome.

It is important that roundX is rounded (round) to the nearest integer, rather than truncated:
highp int roundX = int(round(vertexPosition.x / squareSize));
or
highp int roundX = int(vertexPosition.x / squareSize + 0.5 * sign(vertexPosition.x));
A possible solution is to get the partial derivative of vertexPosition.xy along the y axis of the viewport by dFdy.
The length of the partial derivative of vertexPosition.xy gives the distance between 2 fragments in model space. Thus the minimum thickness of a line can be defined:
vec2 dy = dFdy(vertexPosition.xy);
float minWidth = length(dy);
float w = step(max(lineWidth, minWidth), abs(remainderX));
fragColor = mix(vec4(1.0, 0.0, 0.0, 1.0), vec4(0.8, 0.8, 0.8, 1.0), w);
For smoother lines, you have to interpolate the line color and the ground color. Interpolate if abs(remainderX) is between min(lineWidth, minWidth) and max(lineWidth, minWidth). Use smoothstep for the interpolation. e.g.:
highp int roundX = int(round(vertexPosition.x / squareSize));
highp float remainderX = vertexPosition.x - float(roundX)*squareSize;
vec2 dy = dFdy(vertexPosition.xy);
float minWidth = length(dy);
float w = smoothstep(min(lineWidth, minWidth), max(lineWidth, minWidth), abs(remainderX));
fragColor = mix(vec4(1.0, 0.0, 0.0, 1.0), vec4(0.8, 0.8, 0.8, 1.0), w);
See the Three.js example, which uses the shader:
(function onLoad() {
var camera, scene, renderer, orbitControls;
init();
animate();
function init() {
renderer = new THREE.WebGLRenderer({
antialias: true,
alpha: true
});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.shadowMap.enabled = true;
document.body.appendChild(renderer.domElement);
camera = new THREE.PerspectiveCamera(70, window.innerWidth / window.innerHeight, 1, 300);
camera.position.set(10, 15, -60);
loader = new THREE.TextureLoader();
loader.setCrossOrigin("");
scene = new THREE.Scene();
scene.background = new THREE.Color(0xffffff);
scene.add(camera);
window.onresize = resize;
orbitControls = new THREE.OrbitControls(camera, renderer.domElement);
var helper = new THREE.GridHelper(400, 10);
helper.material.opacity = 0.25;
helper.material.transparent = true;
scene.add(helper);
var axis = new THREE.AxesHelper(1000);
scene.add(axis);
var material = new THREE.ShaderMaterial({
vertexShader: document.getElementById('vertex-shader').textContent,
fragmentShader: document.getElementById('fragment-shader').textContent,
});
material.extensions = {
derivatives: true
}
var geometry = new THREE.BoxGeometry( 100, 0.1, 100 );
var mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
}
function resize() {
var aspect = window.innerWidth / window.innerHeight;
renderer.setSize(window.innerWidth, window.innerHeight);
camera.aspect = aspect;
camera.updateProjectionMatrix();
}
function animate() {
requestAnimationFrame(animate);
orbitControls.update();
render();
}
function render() {
renderer.render(scene, camera);
}
})();
<script type='x-shader/x-vertex' id='vertex-shader'>
varying vec3 vertexPosition;
void main() {
vertexPosition = position.zyx;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}
</script>
<script type='x-shader/x-fragment' id='fragment-shader'>
precision highp float;
varying vec3 vertexPosition;
int round(float x)
{
return int(x + 0.5 * sign(x));
}
void main(){
vec4 fragColor;
highp float lineWidth = 0.2;
highp float squareSize = 5.0;
highp int roundX = round(vertexPosition.x / squareSize);
highp float remainderX = vertexPosition.x - float(roundX)*squareSize;
vec2 dy = dFdy(vertexPosition.xy);
float minWidth = length(dy);
float w = smoothstep(min(lineWidth, minWidth), max(lineWidth, minWidth), abs(remainderX));
//float w = step(max(lineWidth, minWidth), abs(remainderX));
fragColor = mix(vec4(1.0, 0.0, 0.0, 1.0), vec4(0.8, 0.8, 0.8, 1.0), w);
gl_FragColor = fragColor;
}
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/110/three.min.js"></script>
<script src="https://threejs.org/examples/js/controls/OrbitControls.js"></script>

Related

Why does this lighting code only work for single lights and break when I add multiple lights?

I'm trying to create a simple 2D game engine using lwjgl and java. But I got stuck during the lighting programming, because the code I wrote only works when I have one light in the scene and not when I add multiple. I just can't figure out why so i decided to ask here.
This is the fragment shader with the lighting calculation:
#version 330 core
layout (location = 0) out vec4 color;
layout (origin_upper_left, pixel_center_integer) in vec4 gl_FragCoord;
in DATA {
vec2 tc;
} fs_in;
uniform sampler2D tex;
float map(float value, float min1, float max1, float min2, float max2) {
return min2 + (value - min1) * (max2 - min2) / (max1 - min1);
}
void main() {
vec3 lightColor = vec3(1, 1, 1);
float range = 700;
float x = 200;
float y = 200;
float ambient = 0.1;
float intensity = 0.8;
float alpha = 1.0;
vec3 totalDiffuse = vec3(0.0);
for(int i=0;i<1;i++){
alpha = 1-map(distance(gl_FragCoord.xy, vec2(x*i, y*i)), 0.0, range, 0.0, 1.0);
totalDiffuse += alpha*lightColor;
}
totalDiffuse = max(totalDiffuse, ambient);
color = vec4(totalDiffuse, 1.0) * texture(tex, fs_in.tc);
}
If I run this code with only one light in the scene, i.e. setting the times the for loop runs to 1, then it works just fine and creates something like this:
But when I change it to loop for example 3 times, you would expect it to create 3 different lights, but actually it just increases the light intensity of the first light like this:
Anybody know why?
The result of the term
alpha = 1-map(distance(gl_FragCoord.xy, vec2(x*i, y*i)), 0.0, range, 0.0, 1.0);
can be negative.
This will cause that the totalDiffuse is decreased.
Clamp alpha to a minimum of 0:
//totalDiffuse += alpha*lightColor;
totalDiffuse += max(0.0, alpha) * lightColor;
Note, the distance between the light sources is length(vec2(200, 200)), but the illumination range (radius) of each light source is 700. So anyway the light
sources are overlapping.
I recommend to use the glsl function smoothstep, which performs a Hermite interpolation between two values.
Try the following:
for(int i=0; i<3; i++ )
{
vec2 pos = vec2(x, y) * float(i);
float dist = distance(gl_FragCoord.xy, pos);
alpha = smoothstep(50.0, 100.0, dist);
totalDiffuse += clamp(1.0-alpha, 0.0, 1.0) * lightColor;
}
in this case the 2nd parameter (100.0) to smoothstep is the maximum radius of the light source and the 1st parameter (50.0) is the radius which is full lit.
See the WebGL example, where I used smoothstep:
(function loadscene() {
var canvas, gl, vp_size, prog, bufObj = {};
function initScene() {
canvas = document.getElementById( "ogl-canvas");
gl = canvas.getContext( "experimental-webgl" );
if ( !gl )
return;
progDraw = gl.createProgram();
for (let i = 0; i < 2; ++i) {
let source = document.getElementById(i==0 ? "draw-shader-vs" : "draw-shader-fs").text;
let shaderObj = gl.createShader(i==0 ? gl.VERTEX_SHADER : gl.FRAGMENT_SHADER);
gl.shaderSource(shaderObj, source);
gl.compileShader(shaderObj);
let status = gl.getShaderParameter(shaderObj, gl.COMPILE_STATUS);
if (!status) alert(gl.getShaderInfoLog(shaderObj));
gl.attachShader(progDraw, shaderObj);
gl.linkProgram(progDraw);
}
status = gl.getProgramParameter(progDraw, gl.LINK_STATUS);
if ( !status ) alert(gl.getProgramInfoLog(progDraw));
progDraw.inPos = gl.getAttribLocation(progDraw, "inPos");
progDraw.u_time = gl.getUniformLocation(progDraw, "u_time");
progDraw.u_resolution = gl.getUniformLocation(progDraw, "u_resolution");
gl.useProgram(progDraw);
var pos = [ -1, -1, 1, -1, 1, 1, -1, 1 ];
var inx = [ 0, 1, 2, 0, 2, 3 ];
bufObj.pos = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.pos );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( pos ), gl.STATIC_DRAW );
bufObj.inx = gl.createBuffer();
bufObj.inx.len = inx.length;
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufObj.inx );
gl.bufferData( gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( inx ), gl.STATIC_DRAW );
gl.enableVertexAttribArray( progDraw.inPos );
gl.vertexAttribPointer( progDraw.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.enable( gl.DEPTH_TEST );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
window.onresize = resize;
resize();
requestAnimationFrame(render);
}
function resize() {
//vp_size = [gl.drawingBufferWidth, gl.drawingBufferHeight];
vp_size = [window.innerWidth, window.innerHeight];
//vp_size = [256, 256]
canvas.width = vp_size[0];
canvas.height = vp_size[1];
}
function render(deltaMS) {
gl.viewport( 0, 0, canvas.width, canvas.height );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
gl.uniform1f(progDraw.u_time, deltaMS/2000.0);
gl.uniform2f(progDraw.u_resolution, canvas.width, canvas.height);
gl.drawElements( gl.TRIANGLES, bufObj.inx.len, gl.UNSIGNED_SHORT, 0 );
requestAnimationFrame(render);
}
initScene();
})();
<script id="draw-shader-vs" type="x-shader/x-vertex">
precision mediump float;
attribute vec2 inPos;
void main() {
gl_Position = vec4( inPos.xy, 0.0, 1.0 );
}
</script>
<script id="draw-shader-fs" type="x-shader/x-fragment">
precision mediump float;
uniform float u_time;
uniform vec2 u_resolution;
vec3 HUEtoRGB(in float H)
{
float R = abs(H * 6.0 - 3.0) - 1.0;
float G = 2.0 - abs(H * 6.0 - 2.0);
float B = 2.0 - abs(H * 6.0 - 4.0);
return clamp( vec3(R,G,B), 0.0, 1.0 );
}
void main() {
vec2 uv = gl_FragCoord.xy / u_resolution;
vec3 lightColor = vec3(1, 1, 1);
float range = length(u_resolution) / 4.0;
float x = u_resolution.x / 4.0;
float y = u_resolution.y / 4.0;
float ambient = 0.1;
float intensity = 0.8;
float alpha = 1.0;
vec3 totalDiffuse = vec3(0.0);
for(int i=0;i<3;i++)
{
vec2 pos = vec2(x, y) * float(i+1);
float dist = distance(gl_FragCoord.xy, pos);
alpha = smoothstep(range/4.0, range/2.0, dist);
totalDiffuse += clamp(1.0-alpha, 0.0, 1.0) * lightColor;
}
totalDiffuse = max(totalDiffuse, ambient);
vec4 texcol = vec4( 1.0-uv.x, 1.0-uv.y, uv.x*uv.y, 1.0 );
gl_FragColor = vec4(totalDiffuse, 1.0) * texcol;
}
</script>
<canvas id="ogl-canvas" style="border: none"></canvas>

How to get coordinate of fragment shaders? gl_FragCoord not working

I'm trying to make a Mandelbrot set explorer, which will shade the pixels on the screen based on its coordinate in the window. I've done this before without using shaders but its extremely slow. I can't figure out how to get the position of the fragment so that I can use the algorithm I've already developed to render the Mandelbrot set.
I'm using ljgwl 3. I've been researching all day on how to do this, and I can't find any comprehensive findings on how to get the coordinates. It seems like gl_FragCoord should work and then I could use gl_FragCoord.x and gl_FragCoord.y to get the x and y values, which is all I need for the algorithm, but my screen always ends up being all red. I'm not passing any info from the vertex shader into my fragment shader because I need to render the color of each coordinate in the Mandelbrot based on its x and y values, so the vertex positions aren't helpful (I also don't understand how to get those).
Here is my fragment shader:
in vec4 gl_FragCoord;
uniform vec2 viewportDimensions;
uniform float minX;
uniform float maxX;
uniform float minY;
uniform float maxY;
vec3 hsv2rgb(vec3 c){
vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
}
void main(){
float x = gl_FragCoord.x;
float y = gl_FragCoord.y;
vec2 c = vec2((x* (maxX-minX) / viewportDimensions.x + minX), (y*(maxY-minY)/ viewportDimensions.y + minY));
vec2 z = c;
float limit = 1000;
float iterations = 0;
for(int i = 0; i < int(limit); i++){
float t = 2.0 * z.x * z.y + c.y;
z.x = z.x * z.x - z.y * z.y + c.x;
z.y = t;
if(z.x * z.x + z.y *z.y > 4){
break;
}
iterations += 1.0;
}
float itRGB = iterations/limit;
vec3 hsv = vec3(itRGB, 1.0, 1.0);
vec3 rgb = hsv2rgb(hsv);
gl_FragColor = vec4(rgb, 1);
}
I thought that I could use gl_FragCoord without declaring it as in first but it doesn't work either way. vec2 c is attempting to map the current coordinate to a coordinate in the complex number grid based on current resolution of the window.
This is all that's in my vertex shader:
void main(){
gl_Position = ftransform();
}
And the relevant bit of my client code:
glBegin(GL_POLYGON);
glVertex2f(-1f, -1f);
glVertex2f(1f, -1f);
glVertex2f(1f, 1f);
glVertex2f(-1f, 1f);
glEnd();
This is running in my window loop, and just creates the square where the mandelbrot is supposed to render.
This is the output of my working java Mandelbrot program which doesn't use shaders:
This is the output of my shader program:
Fullscreen:
I also have no clue as to how to be able to resize the window properly without the black bars. I am attempting to do this with vec2 c in my code above as I have set the uniforms to be the windows height and width and am using that when mapping the coordinate to the complex number plane, but as gl_FragCoord doesn't seem to work then neither should this. A link to a current guide on lgjwl screen resizing based on glfwCreateWindow would be vastly appreciated.
gl_FragCoord is a built-in input variables, it isn't necessary to declared the input variable gl_FragCoord. The x and y coordinate are window (pixel) coordinate.
The lower left of gl_FragCoord is (0.5, 0.5) and the upper right is (W-0.5, H-0.5), where W and H are the width and the height of the viewport.
You've to map gl_FragCoord.x to the range [minX, maxX] and gl_FragCoord.y to the range [minY, maxy].
I recommend to us the GLSL function mix for this.
viewportDimensions is assumed to contain the with and the height of the viewport rectangle in window (pixel) coordinates.
vec2 c = mix(vec2(minX, minY), vec2(maxX, maxY), gl_FragCoord.xy / viewportDimensions.xy);
See the (WebGL) example, where I applied the suggestions to the the fragment shader of the question.
The bounds are initialized as follows
minX = -2.5
minY = -2.0
maxX = 1.5
maxY = 2.0
(function loadscene() {
var canvas, gl, vp_size, prog, bufObj = {};
function initScene() {
canvas = document.getElementById( "ogl-canvas");
gl = canvas.getContext( "experimental-webgl" );
if ( !gl )
return;
progDraw = gl.createProgram();
for (let i = 0; i < 2; ++i) {
let source = document.getElementById(i==0 ? "draw-shader-vs" : "draw-shader-fs").text;
let shaderObj = gl.createShader(i==0 ? gl.VERTEX_SHADER : gl.FRAGMENT_SHADER);
gl.shaderSource(shaderObj, source);
gl.compileShader(shaderObj);
let status = gl.getShaderParameter(shaderObj, gl.COMPILE_STATUS);
if (!status) alert(gl.getShaderInfoLog(shaderObj));
gl.attachShader(progDraw, shaderObj);
gl.linkProgram(progDraw);
}
status = gl.getProgramParameter(progDraw, gl.LINK_STATUS);
if ( !status ) alert(gl.getProgramInfoLog(progDraw));
progDraw.inPos = gl.getAttribLocation(progDraw, "inPos");
progDraw.minX = gl.getUniformLocation(progDraw, "minX");
progDraw.maxX = gl.getUniformLocation(progDraw, "maxX");
progDraw.minY = gl.getUniformLocation(progDraw, "minY");
progDraw.maxY = gl.getUniformLocation(progDraw, "maxY");
progDraw.viewportDimensions = gl.getUniformLocation(progDraw, "viewportDimensions");
gl.useProgram(progDraw);
var pos = [ -1, -1, 1, -1, 1, 1, -1, 1 ];
var inx = [ 0, 1, 2, 0, 2, 3 ];
bufObj.pos = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.pos );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( pos ), gl.STATIC_DRAW );
bufObj.inx = gl.createBuffer();
bufObj.inx.len = inx.length;
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufObj.inx );
gl.bufferData( gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( inx ), gl.STATIC_DRAW );
gl.enableVertexAttribArray( progDraw.inPos );
gl.vertexAttribPointer( progDraw.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.enable( gl.DEPTH_TEST );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
window.onresize = resize;
resize();
requestAnimationFrame(render);
}
function resize() {
//vp_size = [gl.drawingBufferWidth, gl.drawingBufferHeight];
vp_size = [window.innerWidth, window.innerHeight];
//vp_size = [256, 256]
canvas.width = vp_size[0];
canvas.height = vp_size[1];
}
function render(deltaMS) {
gl.viewport( 0, 0, canvas.width, canvas.height );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
gl.uniform1f(progDraw.minX, -2.5);
gl.uniform1f(progDraw.minY, -2.0);
gl.uniform1f(progDraw.maxX, 1.5);
gl.uniform1f(progDraw.maxY, 2.0);
gl.uniform2f(progDraw.viewportDimensions, canvas.width, canvas.height);
gl.drawElements( gl.TRIANGLES, bufObj.inx.len, gl.UNSIGNED_SHORT, 0 );
requestAnimationFrame(render);
}
initScene();
})();
html,body { margin: 0; overflow: hidden; }
<script id="draw-shader-vs" type="x-shader/x-vertex">
precision mediump float;
attribute vec2 inPos;
void main()
{
gl_Position = vec4( inPos.xy, 0.0, 1.0 );
}
</script>
<script id="draw-shader-fs" type="x-shader/x-fragment">
precision mediump float;
uniform vec2 viewportDimensions;
uniform float minX;
uniform float maxX;
uniform float minY;
uniform float maxY;
vec3 hsv2rgb(vec3 c){
vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
}
void main()
{
float x = gl_FragCoord.x;
float y = gl_FragCoord.y;
vec2 c = mix(vec2(minX, minY), vec2(maxX, maxY), gl_FragCoord.xy / viewportDimensions.xy);
vec2 z = c;
float limit = 64.0;
float iterations = 0.0;
for(int i = 0; i < 64; i++){
float t = 2.0 * z.x * z.y + c.y;
z.x = z.x * z.x - z.y * z.y + c.x;
z.y = t;
if(z.x * z.x + z.y *z.y > 4.0){
break;
}
iterations += 1.0;
}
float itRGB = iterations/limit;
vec3 hsv = vec3(itRGB, 1.0, 1.0);
vec3 rgb = hsv2rgb(hsv);
gl_FragColor = vec4(rgb, 1);
}
</script>
<canvas id="ogl-canvas" style="border: none"></canvas>

LWJGL2 projectionMatrix does not work for me

before showing my code I want to explain the Situation a littlze bit. I am trying to make a FlappyBird clone just for practice using Lwjgl2. Right now I am able to create a textured Quad which can move ONLY in x and y direction and rotate around all the axis. I was about to set up the projectionMatrix so I can also have 3D movement and the Z axis work.
I followed a tutorial on youtube, doing the exact same things but it somehow does not work for me at all.
When trying to move the Object without using the projectionMatrix, it vanishes at soon as Z > 1 or Z < -1 for some reason, although nothing should happen. As soon as I add the projectionMatrix inside the vertexShader it vanishes for every coordinate I give to be rendered at... it just disappears into the void.
Here is all the relevant code:
Model model = loader.loadToVAO(vertices, textureCoords, indices);
ModelTexture texture = new ModelTexture(loader.loadTexture("Flappy2"));
TexturedModel texturedModel = new TexturedModel(model, texture);
Entity entity = new Entity(texturedModel, new Vector3f(0,0,0),0,0,0,1 );
float y = 0;
float x = 0;
//Main loop which renders stuff
while(!Display.isCloseRequested()) {
while(Keyboard.next()) {
if (Keyboard.getEventKey() == Keyboard.KEY_SPACE) {
if (Keyboard.getEventKeyState()) {
y = 0.1f;
}
}
}
y -= 0.005f;
entity.increasePosition(x, y, 0);
entity.increaseRotation(0, 0,0 );
renderer.prepare();
shader.start();
renderer.render(entity, shader);
shader.stop();
DisplayManager.updateDisplay();
}
shader.cleanUp();
loader.cleanUp();
DisplayManager.closeDisplay();
}
This is the main loop, nothing special.
#version 400 core
in vec3 position;
in vec2 textureCoords;
out vec2 pass_textureCoords;
out vec3 color;
uniform mat4 transformationMatrix;
uniform mat4 projectionMatrix;
void main(void){
gl_Position = projectionMatrix * transformationMatrix * vec4(position,1.0);
pass_textureCoords = textureCoords;
color = vec3(position.x +0.5,0.0,position.y+0.5);
}
That was the vertex Shader.
private void createProjectionMatrix() {
float aspectRatio = (float) Display.getDisplayMode().getWidth() / (float) Display.getDisplayMode().getHeight();
float y_scale = (float)(1f / Math.tan(Math.toRadians(FOV / 2f))) * aspectRatio;
float x_scale = y_scale / aspectRatio;
float frustum_length = FAR_PLANE - NEAR_PLANE;
projectionMatrix = new Matrix4f();
projectionMatrix.m00 = x_scale;
projectionMatrix.m11 = y_scale;
projectionMatrix.m22 = -((FAR_PLANE + NEAR_PLANE) / frustum_length);
projectionMatrix.m23 = -1;
projectionMatrix.m32 = -((2 * NEAR_PLANE * FAR_PLANE) / frustum_length);
projectionMatrix.m33 = 0;
}
Here I set up the projectionMatrix in the Rendering class.
As I said, most of it is copied from a youtube tutorial, as I am new to LWJGL2. So if it works for him why does it not for me ?
I tried copying the entire tutorial code, instead of just typing it myself and it did somehow fix my problem.
I probably had switched variable names somewhere or small errors like that which prevented the projection Matrix from working.
No need to comment anymore :) Ignore this post

Render depth to texture

I can't get my depth to render correctly. No errors are thrown, the glCheckFramebufferStatus says it is complete as well.
Below is the code, the screen always shows up white. The depth values are not 1, but very very close:
EDIT:
So I tried linearizing the depth inside of my depth fragment shader and then drawing that directly to the screen to make sure the values were correct. They are correct. However, even if I send that linearized depth to my full screen quad shader (the 2nd one below), the screen is still all white.
public void initFramebuffers() {
glBindFramebuffer(GL_FRAMEBUFFER, depthShader.fbo);
depthShader.initTexture(width, height, GL_DEPTH_COMPONENT, GL_DEPTH_COMPONENT);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthShader.tex, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
}
public void initTexture(int width, int height, int format, int internalFormat) {
tex = glGenTextures();
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, width, height, 0, format, GL_FLOAT, (ByteBuffer)null);
}
Depth Shader:
#version 400
in vec3 pos;
in float radius;
uniform mat4 mView;
uniform mat4 projection;
uniform vec2 screenSize;
uniform vec3 lightPos;
out float depth;
float linearizeDepth(float depth) {
float n = 0.01;
float f = 100;
return (2.0 * n) / (f + n - depth * (f - n));
}
void main() {
//calculate normal
vec3 normal;
normal.xy = gl_PointCoord * 2.0 - 1.0;
float r2 = dot(normal.xy, normal.xy);
if (r2 > 1.0) {
discard;
}
normal.z = sqrt(1.0 - r2);
//calculate depth
vec4 pixelPos = vec4(pos + normal * radius, 1.0);
vec4 clipSpacePos = projection * pixelPos;
depth = clipSpacePos.z / clipSpacePos.w * 0.5f + 0.5f;
depth = linearizeDepth(depth);
}
Shader that reads in the depth. The values in linearizeDepth are my near and far distances:
#version 400
in vec2 coord;
uniform sampler2D depthMap;
uniform vec2 screenSize;
uniform mat4 projection;
out vec4 color;
float linearizeDepth(float depth) {
float n = 0.01;
float f = 100;
return (2.0 * n) / (f + n - depth * (f - n));
}
void main() {
float curDepth = texture2D(depthMap, coord).x;
//float d = linearizeDepth(curDepth);
color = vec4(d, d, d, 1.0f);
}
Code for drawing everything:
//--------------------Particle Depth-----------------------
{
glUseProgram(depthShader.program);
glBindFramebuffer(GL_FRAMEBUFFER, depthShader.fbo);
depthShader.particleDepthVAO(points);
//Sets uniforms
RenderUtility.addMatrix(depthShader, mView, "mView");
RenderUtility.addMatrix(depthShader, projection, "projection");
RenderUtility.addVector2(depthShader, screenSize, "screenSize");
RenderUtility.addVector3(depthShader, lightPosition, "lightPos");
glDisable(GL_BLEND);
glEnable(GL_DEPTH_TEST);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBindVertexArray(depthShader.vao);
glDrawArrays(GL_POINTS, 0, points.size());
}
//Draw full screen
{
glUseProgram(blurShader.program);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
blurShader.blurDepthVAO();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, depthShader.tex);
glUniform1i(blurShader.depthMap, 0);
//Sets uniforms
RenderUtility.addMatrix(blurShader, mView, "mView");
RenderUtility.addMatrix(blurShader, projection, "projection");
RenderUtility.addVector2(blurShader, screenSize, "screenSize");
//glEnable(GL_DEPTH_TEST);
glBindVertexArray(blurShader.vao);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glViewport(0, 0, width, height);
}
The problem ended up being that my vertex shader's out variable name didn't match the fragment shader's in variable name (doh). The code posted above is 100% correct in case anyone sees this in the future.
There are a few issues with the posted code.
Inconsistent use of render target
In the setup of the FBO, there is only a depth attachment, and no color attachment. The color draw buffer is also explicitly disabled:
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthShader.tex, 0);
glDrawBuffer(GL_NONE);
However, the fragment shader writes a color output:
out float depth;
...
depth = clipSpacePos.z / clipSpacePos.w * 0.5f + 0.5f;
depth = linearizeDepth(depth);
To write to the depth attachment of the framebuffer, you will have to set the value of the predefined gl_FragDepth variable. Just because the out variable is named depth does not mean that it's actually used as the depth output. If you want to use the color output, you'll have to create a regular texture, and attach it to GL_COLOR_ATTACHMENT0. Which would actually seem easier.
linearizeDepth() calculation
float linearizeDepth(float depth) {
float n = 0.01;
float f = 100;
return (2.0 * n) / (f + n - depth * (f - n));
}
depth = clipSpacePos.z / clipSpacePos.w * 0.5f + 0.5f;
depth = linearizeDepth(depth);
The way the clipSpacePos is processed, it looks like the arguments to linarizeDepth() will be between 0.0 and 1.0. The calculation inside the function for these extreme values is then:
0.0 --> (2.0 * n) / (f + n)
1.0 --> 1.0
This looks fine for 1.0, but questionable for 0.0. I believe it would actually be more correct to make the preparation step:
depth = clipSpacePos.z / clipSpacePos.w;
This will then pass arguments between -1.0 and 1.0 to the function, which then produces:
-1.0 --> n / f
1.0 --> 1.0
It would actually make even more sense to me to scale the whole thing to produce results between 0.0 and 1.0, but at least this version makes intuitive sense, producing the relative distance to the far plane.
Calculation more complex than necessary
The above looks unnecessarily convoluted to me. You're applying the projection matrix, take the depth from the result, and then effectively invert the depth calculation applied by the projection matrix.
It would seem a whole lot simpler to not apply the projection matrix in the first place, and simply take the original distance. You can still divide by the far distance if you want a relative distance. At least as long as you use a standard projection matrix, I believe the following is equivalent to the corrected calculation above:
vec4 pixelPos = vec4(pos + normal * radius, 1.0);
float f = 100.0; // far plane
depth = -pixelPos.z / f;
The minus sign comes in because the most commonly used eye coordinate system assumes that you're looking down the negative z-axis.
If you wanted results between 0.0 and 1.0, you could also change this to:
float n = 0.01; // near plane
float f = 100.0; // far plane
depth = (-pixelPos.z - n) / (f - n);

OpenGL Rotation Skewed Issues

I am having a bit of an issue with any rotation around the x-axis inside of OpenGL. What I have is a basic cube that is being rendered inside of a shader, with normals for lighting calculations. The transformation of the cube is the projection matrix multiplied by the model matrix. This concatenation is done inside the shader, while the rotation and translation calculations are done inside a Matrix class I wrote. When any rotations are done around the y-axis, everything rotates as expected as shown by these pictures:
The problems start when any rotations occur around the x-axis as shown here:
Apparently the x-axis rotations skew the scene and cause everything to be shown out of proportion. Here is the code the calculates the matrices and passes is to the shader every time this is rendered:
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
viewMatrix.setIdentity();
viewMatrix.translate(new Vector4(translation));
viewMatrix.rotate(1, 0, 0, -rotateX);
viewMatrix.rotate(0, 1, 0, rotateY);
Matrix4 mvMatrix = new Matrix4(viewMatrix);
Matrix4 pMatrix = new Matrix4(projectionMatrix);
lightShader.useShader();
lightShader.setUniformVector("vColor", new Vector4(.4f,0,0,1f));
lightShader.setUniformMatrix("mvMatrix", mvMatrix);
lightShader.setUniformMatrix("pMatrix", pMatrix);
triangleBatch.draw(lightShader.getAttributes());
Display.update();
The shader code is as follows:
uniform mat4 mvMatrix;
uniform mat4 pMatrix;
uniform vec4 vColor;
varying vec4 outFragColor;
attribute vec4 inVertex;
attribute vec4 inNormal;
void main(void) {
vec3 newNorm = vec3(inNormal);
mat3 mNormalMatrix;
mNormalMatrix[0] = mvMatrix[0].xyz;
mNormalMatrix[1] = mvMatrix[1].xyz;
mNormalMatrix[2] = mvMatrix[2].xyz;
vec3 vNorm = normalize(mNormalMatrix * newNorm);
vec3 vLightDir = vec3(0.0, 0.0, 1.0);
float fDot = max(0.0, dot(vNorm, vLightDir));
outFragColor.rgb = vColor.rgb * fDot;
outFragColor.a = vColor.a;
mat4 mvpMatrix;
mvpMatrix = pMatrix * mvMatrix;
gl_Position = mvpMatrix * inVertex;
}
Finally the matrix math code for rotations is as follows:
public Matrix4 rotate(Vector4 a, float radians){
Matrix4 temp = new Matrix4().setToRotate(a, radians);
this.multiply(temp);
return this;
}
public Matrix4 setToRotate(Vector4 a, float radians){
a = new Vector4(a).normalise();
float c = GameMath.cos(radians);
float s = GameMath.sin(radians);
float t = 1-c;
float x = a.x;
float y = a.y;
float z = a.z;
float x2 = a.x*a.x;
float y2 = a.y*a.y;
float z2 = a.z*a.z;
this.setIdentity();
this.matrix[0] = c + t*x2;
this.matrix[1] = t*x*y + s*z;
this.matrix[2] = t*x*z - s*y;
this.matrix[4] = t*x*y - s*z;
this.matrix[5] = c + t*y2;
this.matrix[6] = t*y*z + s*x;
this.matrix[8] = t*x*z + s*y;
this.matrix[9] = t*y*z + s*x;
this.matrix[10] = c + t*z2;
return this;
}
The matrices are stored in column major.
I found out what the issue was. apparently one of the operations in setToRotate() need to be changed from addition to subtraction. It works as expected now.

Categories