GLSL Point Light Problems - java

When I render my 3d scene using a point light shader, I run into a few problems. The light seems to rotate with the camera, the light also behaves like a directional light instead of a point light. I was following this tutorial for glsl point lights.
GLSL Core Tutorial – Point Lights
My Vertex Shader:
#version 150 core
in vec4 in_Position;
in vec4 in_Color;
in vec2 in_TextureCoord;
in vec3 in_Normal;
uniform mat4 projection;
uniform mat4 view;
uniform mat4 model;
uniform mat3 normal;
out Data {
vec3 normal;
vec3 eye;
vec3 lightDir;
vec2 st;
} Out;
void main(void) {
vec4 pos = view * model * in_Position;
vec3 norm = normal * in_Normal;
//Light Position
vec4 l_pos = view * model * vec4(0,1,0,1);
Out.normal = norm;
Out.lightDir = vec3(l_pos - pos);
Out.st = in_TextureCoord;
Out.eye = vec3(-pos);
gl_Position = projection * view * model * in_Position;
}
My Fragment Shader:
#version 150 core
uniform sampler2D texture_diffuse;
in Data {
vec3 normal;
vec3 eye;
vec3 lightDir;
vec2 st;
} DataIn;
out vec4 out_Color;
void main(void) {
vec4 diffuse = texture(texture_diffuse, DataIn.st).rgba;
vec4 spec = vec4(0.0);
vec4 specular = vec4(0.2,0.2,0.2,1);
vec4 ambient = vec4(0.2,0.2,0.2,1);
float shininess = 100;
vec3 n = normalize(DataIn.normal);
vec3 l = normalize(DataIn.lightDir);
vec3 e = normalize(DataIn.eye);
float intensity = max(dot(n,l), 0.0);
if (intensity > 0.0) {
vec3 h = normalize(l + e);
float intSpec = max(dot(h,n), 0.0);
spec = specular * pow(intSpec, shininess);
}
out_Color = max(intensity * diffuse + spec, ambient);
}
Picture of the problem:

Related

This "Single - pass wire frame shader "opengl shader worked on my old machine with amd integrated graphics but it dose not my new nvidea pc

I created this shader from following this tutorial on single pass wireframe rendering: http://codeflow.org/entries/2012/aug/02/easy-wireframe-display-with-barycentric-coordinates/
Fragment:
#version 450
layout (location = 0) out vec4 outColor;
in vec3 vBC;
const float lineWidth = 0.5;
const vec3 color = vec3(0.7, 0.7, 0.7);
float edgeFactor(){
vec3 d = fwidth(vBC);
vec3 a3 = smoothstep(vec3(0.0), d*1.5, vBC);
return min(min(a3.x, a3.y), a3.z);
}
void main(){
outColor = vec4(min(vec3(edgeFactor()), color), 1.0);
}
Vertex:
#version 450
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 baryCentric;
out vec3 vBC;
uniform mat4 T_MVP;
void main() {
//texCoord0 = texCoord;
gl_Position = T_MVP * vec4(position, 1.0);
vBC = baryCentric;
}
And here is the gl prep before rendering:
wir.bind();
wir.updateUniforms(super.getTransform(), mat, engine);
GL45.glEnable(GL45.GL_SAMPLE_ALPHA_TO_COVERAGE);
GL45.glEnable(GL45.GL_BLEND);
GL45.glBlendFunc(GL45.GL_SRC_ALPHA, GL45.GL_ONE_MINUS_SRC_ALPHA);
mesh.draw("baryCentric", GL15.GL_TRIANGLES);
And here is how i bind the vertex atrribs;
The shader worked perfectly fine on my old amd integrated graphics card. But it dosnt on my rtx 2060 super.
Shader and Gl version
on old: OpenGL version: 4.5.13399 Compatibility Profile Context 15.200.1062.1004
on new: 4.6.0 NVIDIA 445.87
First of all I dont know what causes this but i think its the model files.
How i solved this was instead of pre processing the Bary centric coords i would calculate them or rather assign them in a geometry shader like so:
vBC = vec3(1, 0, 0);
gl_Position = gl_in[0].gl_Position;
EmitVertex();
vBC = vec3(0, 1, 0);
gl_Position = gl_in[1].gl_Position;
EmitVertex();
vBC = vec3(0, 0, 1);
gl_Position = gl_in[2].gl_Position;
EmitVertex();
and nothing else just pass them onto the fragment shader and it would do the rest:
#version 400
precision mediump float;
layout (location = 0) out vec4 outColor;
in vec3 vBC;
const float lineWidth = 0.5;
const vec3 lineColor = vec3(0.7, 0.7, 0.7);
float edgeFactor() {
vec3 d = fwidth(vBC);
vec3 f = step(d * lineWidth, vBC);
return min(min(f.x, f.y), f.z);
}
void main(){
outColor = vec4(255, 191, 0.0, (1.0-edgeFactor())*0.95);
}
The vertex shader only defines the positions nothing else the most basic.
Here is the full geometry shader if any one needs it:
#version 400
layout(triangles) in;
layout(triangle_strip, max_vertices = 3) out;
out vec3 vBC;
void main()
{
vBC = vec3(1, 0, 0);
gl_Position = gl_in[0].gl_Position;
EmitVertex();
vBC = vec3(0, 1, 0);
gl_Position = gl_in[1].gl_Position;
EmitVertex();
vBC = vec3(0, 0, 1);
gl_Position = gl_in[2].gl_Position;
EmitVertex();
}
Here are some pictures:
As you can see its working with transparency which is done using:
Here is the articles i looked at:
https://tchayen.github.io/wireframes-with-barycentric-coordinates/
http://codeflow.org/entries/2012/aug/02/easy-wireframe-display-with-barycentric-coordinates/
And a cool book that helped me a lot:
https://people.inf.elte.hu/plisaai/pdf/David%20Wolff%20-%20OpenGL%204.0%20Shading%20Language%20Cookbook%20(2).pdf
Just in case here is the Vertex shader:
#version 400
precision mediump int;
precision mediump float;
layout (location = 0) in vec3 position;
uniform mat4 T_MVP;
void main() {
gl_Position = T_MVP * vec4(position, 1.0);
}

Changing spotlight direction in Processing

I'm trying to implement 2 spotlights at the top of the scene using Processing that changes their respective directions overtime. I tried using the default spotlight(r,g,b,x,y,z,nx,ny,nz,angle,concentration) method to create the spotlights and tried changing the nx, ny and nz variables to change the light direction. However, the method don't seem to take in the 3 variables. This is the glsl that I'm using.
precision mediump float;
varying vec3 normalInterp;
varying vec3 vertPos;
uniform int lightCount;
uniform vec4 lightPosition[8];
uniform vec3 lightNormal[8];
//ambient
const vec3 ambientColor = vec3(0.1, 0, 0);
//diffuse
const vec3 diffuseColor = vec3(0.5, 0.0, 0.0);
//specular
const vec3 specColor = vec3(1.0, 1.0, 1.0);
//specular reflection parameter
const float n = 30.0;
//Depth cueing
//not implemented
void main() {
float lightR = 0.0;
float lightG = 0.0;
float lightB = 0.0;
for (int i = 0; i < lightCount; i++)
{
vec3 normal = normalize(normalInterp);
vec3 lightDir = normalize(lightPosition[i] - vertPos);
//diffuse
float diffuse = max(dot(lightDir,normal), 0.0);
//specular
float specular = 0.0;
if(diffuse > 0.0) {
vec3 viewDir = normalize(-vertPos);
vec3 reflectDir = reflect(-lightDir, normal);
float specAngle = max(dot(reflectDir, viewDir), 0.0);
specular = pow(specAngle, n);
}
//Note: can add in depth cueing here
vec3 colorLinear = ambientColor +
diffuse * diffuseColor +
specular * specColor;
lightR += colorLinear.x;
lightG += colorLinear.y;
lightB += colorLinear.z;
}
gl_FragColor = vec4(lightR,lightG,lightB, 1.0);
}
There is an simple issue in the shader program. First there is a typo. It has to be lightPosition rather than lightPostion. But that is not the only issue.
The type of lightPosition[i] is vec4 and the typo of vertPos is vec3. That causes and error when vertPos is subtracted from lightPosition[i].
Either you have to construct a vec3 from lightPosition[i]:
vec3 lightDir = normalize(lightPostion[i] - vertPos);
vec3 lightDir = normalize(vec3(lightPosition[i]) - vertPos);
Or You have to get the x, y and z component form lightPosition[i] (see Swizzling):
vec3 lightDir = normalize(lightPosition[i].xyz - vertPos);
Both solutions lead to the same result.
Of course, the light position has to be set relative to the object. Note when spotLight() is called, the the light position and direction is transformed by the current model view matrix.
See the example:
Vertex shader
uniform mat4 modelview;
uniform mat4 transform;
uniform mat3 normalMatrix;
attribute vec4 position;
attribute vec4 color;
attribute vec3 normal;
varying vec3 normalInterp;
varying vec3 vertPos;
varying vec4 vertColor;
void main() {
gl_Position = transform * position;
vertPos = vec3(modelview * position);
normalInterp = normalize(normalMatrix * normal);
}
Fragment shader
precision mediump float;
varying vec3 normalInterp;
varying vec3 vertPos;
uniform int lightCount;
uniform vec4 lightPosition[8];
uniform vec3 lightNormal[8];
uniform vec3 lightDiffuse[8];
uniform vec3 lightSpecular[8];
uniform vec2 lightSpot[8];
const vec3 ambientColor = vec3(0.2);
const vec3 diffuseColor = vec3(1.0);
const vec3 specColor = vec3(1.0);
const float n = 30.0;
void main() {
vec3 lightColor = vec3(0.0, 0.0, 0.0);
for (int i = 0; i < lightCount; i++)
{
// ambient
lightColor += lightDiffuse[i] * ambientColor;
vec3 normal = normalize(normalInterp);
vec3 lightDir = normalize(lightPosition[i].xyz - vertPos);
float spot = dot(-lightNormal[i], lightDir);
if (spot < lightSpot[i].x)
continue;
//diffuse
float diffuse = max(dot(lightDir,normal), 0.0);
lightColor += diffuse * lightDiffuse[i] * diffuseColor;
//specular
if(diffuse > 0.0) {
vec3 viewDir = normalize(-vertPos);
vec3 reflectDir = reflect(-lightDir, normal);
float specAngle = max(dot(reflectDir, viewDir), 0.0);
float specular = pow(specAngle, n);
lightColor += specular * lightSpecular[i] * specColor;
}
}
gl_FragColor = vec4(lightColor.rgb, 1.0);
}
Code
PShader lightShader;
void setup() {
size(800, 600, P3D);
lightShader = loadShader("fragment.glsl","vertex.glsl");
}
float ry = 0.0;
void draw() {
background(0);
shader(lightShader);
translate(width/2.0, height/2.0);
spotLight(255, 0, 0, 0, 500, 500, 0, -1, -1, PI/25, 2);
spotLight(0, 0, 255, 500, 0, 500, -1, 0, -1, PI/25, 2);
rotateY(ry);
rotateX(-0.5);
ry += 0.02;
noStroke();
box(200);
}

Color Grading using LUT and PShader

I am trying to implement a color grading shader using LUT into processing as described here by Matt DesLauriers and here by Lev Zelensky but I get a strange result when I apply the shader with a standard lut :
image test|690x345
On the left you can see the result I get when applying the LUT shader vs the desired result on the right.
Here my implementation on processing :
PImage source;
PShader PP_LUT;
PGraphics buffer;
PGraphics lut;
PImage lutsrc;
void setup() {
size(512, 512), P2D);
source = loadImage("test.png");
lutsrc = loadImage("_LUT/lookup.png");
lut = createGraphics(lutsrc.width, lutsrc.height, P2D);
((PGraphicsOpenGL)lut).textureSampling(2);
lut.beginDraw();
lut.image(lutsrc, 0, 0);
lut.endDraw();
buffer = createGraphics(source.width, source.height, P3D);
PP_LUT = loadShader("PP_LUT.glsl");
PP_LUT.set("resolution", (float) buffer.width, (float) buffer.height);
PP_LUT.set("lut", lut);
}
void draw() {
buffer.beginDraw();
buffer.background(0);
buffer.shader(PP_LUT);
buffer.image(source, 0, 0);
buffer.endDraw();
image(buffer, 0, 0, width, height);
image(lut, 0, 0, width * 0.25, height * 0.25);
}
and the shader part :
#version 150
#ifdef GL_ES
#endif
uniform sampler2D texture;
uniform sampler2D lut;
in vec4 vertTexCoord;
out vec4 fragColor;
//https://github.com/mattdesl/glsl-lut
vec4 lookup(vec4 color_, sampler2D lut_){
color_ = clamp(color_, vec4(0), vec4(1));
//define blue
mediump float blue = color_.b * 63.0;
//define quad 1
mediump vec2 quaduv1;
quaduv1.y = floor(floor(blue) / 8.0); //devide blue by the number of col on the LUT
quaduv1.x = floor(blue) - (quaduv1.y * 8.0);
//define quad 2
mediump vec2 quaduv2;
quaduv2.y = floor(ceil(blue) / 8.0); //devide blue by the number of col on the LUT
quaduv2.x = ceil(blue) - (quaduv2.y * 8.0);
//define colorUV 1
highp vec2 coloruv1;
coloruv1.x = (quaduv1.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.r);
coloruv1.y = (quaduv1.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.g);
//define colorUV 2
highp vec2 coloruv2;
coloruv2.x = (quaduv2.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.r);
coloruv2.y = (quaduv2.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.g);
//PROCESSING NEED TO FLIP y uv
//coloruv1.y = 1.0 - coloruv1.y;
//coloruv2.y = 1.0 - coloruv2.y;
//define new color 1 & 2
lowp vec4 ncolor1 = texture2D(lut_, coloruv1);
lowp vec4 ncolor2 = texture2D(lut_, coloruv2);
//return looked up color
lowp vec4 lookedcolor = mix(ncolor1, ncolor2, fract(blue));
return vec4(lookedcolor.rgb, color_.w);
}
void main()
{
vec2 uv = vertTexCoord.xy;
vec4 color = texture2D(texture, uv);
vec4 lutColor = lookup(color, lut);
fragColor = lutColor;
}
As I understand it seems to be a problem on the texture filtering part so I tried to write my lut into an offscreen buffer and set the texture filtering mode as nearest as described on the wiki page of processing but the result is quite the same
I don't know what I am missing here. Can anyone has an idea on this ?
Thanks

LWJGL2 Multiple lights not working? All entities are black

So I'm following ThinMatrixs tutorial on multiple lights and point lights. I believe I've followed everything correctly.
I first followed the multiple lights tutorial and none of the entities and terrain were being affected. I thought doing the next tutorial, having to do with attenuation, would resolve this problem. Now all my objects are shaded black.
I'm unsure what could be wrong. Any help would be appreciated.
Code here
Thinmatrix tutorial 25 multiple lights
//FRAGMENT SHADER
#version 400 core
in vec2 pass_textureCoordinates;
in vec3 surfaceNormal;
in vec3 toLightVector[4];
in vec3 toCameraVector;
in float visibility;
out vec4 out_Color;
uniform sampler2D modelTexture;
uniform vec3 lightColour[4];
uniform vec3 attenuation[4];
uniform float shineDamper;
uniform float reflectivity;
uniform vec3 skyColour;
void main(void){
vec3 unitNormal = normalize(surfaceNormal);
vec3 unitVectorToCamera = normalize(toCameraVector);
vec3 totalDiffuse = vec3(0.0);
vec3 totalSpecular = vec3(0.0);
for(int i = 0; i < 4; i++) {
float distance = length(toLightVector[i]);
float attFactor = attenuation[i].x + (attenuation[i].y * distance) + (attenuation[i].z * distance * distance);
vec3 unitLightVector = normalize(toLightVector[i]);
float nDot1 = dot(unitNormal, unitLightVector);
float brightness = max(nDot1, 0.0);
vec3 lightDirection = -unitLightVector;
vec3 reflectedLightDirection = reflect(lightDirection, unitNormal);
float specularFactor = dot(reflectedLightDirection, unitVectorToCamera);
specularFactor = max(specularFactor, 0.0);
float dampedFactor = pow(specularFactor, shineDamper);
totalDiffuse = totalDiffuse + (brightness * lightColour[i])/attFactor;
totalSpecular = totalSpecular + (dampedFactor * reflectivity * lightColour[i])/attFactor;
}
totalDiffuse = max(totalDiffuse, 0.2);
vec4 textureColour = texture(modelTexture,pass_textureCoordinates);
if(textureColour.a<0.5) {
discard;
}
out_Color = vec4(totalDiffuse,1.0) * textureColour + vec4(totalSpecular,1.0);
out_Color = mix(vec4(skyColour,1.0),out_Color, visibility);
}
VERTEX SHADER:
#version 400 core
in vec3 position;
in vec2 textureCoordinates;
in vec3 normal;
out vec2 pass_textureCoordinates;
out vec3 surfaceNormal;
out vec3 toLightVector[4];
out vec3 toCameraVector;
out float visibility;
uniform mat4 transformationMatrix;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
uniform vec3 lightPosition[4];
uniform float useFakeLighting;
uniform float numberOfRows;
uniform vec2 offset;
const float density = 0.0035;
const float gradient = 5.0;
void main(void){
vec4 worldPosition = transformationMatrix * vec4(position,1.0);
vec4 positionRelativeToCam = viewMatrix * worldPosition;
gl_Position = projectionMatrix * positionRelativeToCam;
pass_textureCoordinates = (textureCoordinates/numberOfRows) + offset;
vec3 actualNormal = normal;
if(useFakeLighting > 0.5) {
actualNormal = vec3(0.0,1.0,0.0);
}
surfaceNormal = (transformationMatrix * vec4(actualNormal,0.0)).xyz;
for(int i =0; i< 4;i++) {
toLightVector[i] = lightPosition[i] - worldPosition.xyz;
}
toCameraVector = (inverse(viewMatrix) * vec4(0.0,0.0,0.0,1.0)).xyz - worldPosition.xyz;
float distance = length(positionRelativeToCam.xyz);
visibility = exp(-pow((distance*density),gradient));
visibility = clamp(visibility,0.0,0.9);
}
In your StaticShader class:
for(int i = 0; i < MAX_LIGHTS; i++) {
location_lightPosition[i] = super.getUniformLocation("lightPosition{" + i + "}");
location_lightColour[i] = super.getUniformLocation("lightColour{" + i + "}");
location_attenuation[i] = super.getUniformLocation("attenuation[" + i + "}");
}
You are using }/{ instead of ]/[, because of that opengl can't find the uniform and the brightness calculation doesn't work.
If you want to check if a uniform is found just chage this code in your ShaderProgram class:
protected int getUniformLocation(String uniformName){
int loc = GL20.glGetUniformLocation(programID,uniformName);
if(loc==-1) System.err.println("Uniform with name \""+uniformName+"\" not found!");
return loc;
}
From the opengl documentation:
glGetUniformLocation returns an integer that represents the location
of a specific uniform variable within a program object. name must be a
null terminated string that contains no white space. name must be an
active uniform variable name in program that is not a structure, an
array of structures, or a subcomponent of a vector or a matrix. This
function returns -1 if name does not correspond to an active uniform
variable in program, if name starts with the reserved prefix "gl_", or
if name is associated with an atomic counter or a named uniform block.
If it still doesn't work check the individual color values.
First check the texture:
out_Color = textureColour;
Second check the diffuse light:
out_Color = vec4(totalDiffuse,1.0);
Third check the specular light:
out_Color = vec4(totalSpecular,1.0);
I hope this helps.

GLSL shaders won't compile and no error message on windows vista with an NVIDIA Geforce 9800 graphics card

This is the part of the source code where the problem resides:
GL.createCapabilities();
// Define the viewport dimensions
glViewport(0, 0, 300, 300);
int shaderProgram;
final String vertexShader = "#version 330 core\n in vec3 position; // The position variable has attribute position 0\n out vec4 vertexColor; // Specify a color output to the fragment shader\n void main()\n {\n gl_Position = vec4(position, 1.0); // See how we directly give a vec3 to vec4's constructor\n vertexColor = vec4(0.5f, 0.0f, 0.0f, 1.0f); // Set the output variable to a dark-red color\n }";
String fragmentShader = "#version 330 core\n in vec3 ourColor;\n"
+ "in vec2 TexCoord;\n"
+ "out vec4 color;\n"
+ "uniform sampler2D ourTexture1;\n"
+ "void main()\n"
+ "{\n"
+ "color = vec4(ourColor, 1.0f);\n"
+ "}";
int vertex, fragment;
// Vertex Shader
vertex = GL20.glCreateShader(GL20.GL_VERTEX_SHADER);
GL20.glShaderSource(vertex, vertexShader);
GL20.glCompileShader(vertex);
// Fragment Shader
fragment = GL20.glCreateShader(GL20.GL_FRAGMENT_SHADER);
GL20.glShaderSource(fragment, fragmentShader);
GL20.glCompileShader(fragment);
//create program and bind shaders to program
shaderProgram = GL20.glCreateProgram();
GL20.glAttachShader(shaderProgram, vertex);
GL20.glAttachShader(shaderProgram, fragment);
GL20.glLinkProgram(shaderProgram);
int vlength = GL20.GL_SHADER_SOURCE_LENGTH;
int iscompiled = GL20.glGetProgrami(fragment, GL20.GL_COMPILE_STATUS);
if(iscompiled == GL_FALSE)
{
System.out.println(glGetString(GL_VERSION));
System.out.println("not compiled");
System.out.println(GL11.glGetString(GL20.GL_SHADING_LANGUAGE_VERSION));
System.out.println(GL20.glGetShaderInfoLog(vertex));
return;
}
int isLinked = GL20.glGetProgrami(shaderProgram, GL20.GL_LINK_STATUS);
if(isLinked == GL_FALSE)
{
System.out.println("failed linking");
return;
}
This is the vertex shader:
#version 330 core
layout (location = 0) in vec3 position; // The position variable has attribute position 0
out vec4 vertexColor; // Specify a color output to the fragment shader
void main()
{
gl_Position = vec4(position, 1.0); // See how we directly give a vec3 to vec4's constructor
vertexColor = vec4(0.5f, 0.0f, 0.0f, 1.0f); // Set the output variable to a dark-red color
}
This is the fragment shader:
#version 330 core
in vec4 vertexColor; // The input variable from the vertex shader (same name and same type)
out vec4 color;
void main()
{
color = vertexColor;
}
Both of the shaders don't compile. Niether the vertex or fragment shader. What needs to be fixed?
I am using the core profile of opengl version 3.3.
The operating system being run is windows vista home premium 64 bit.
int iscompiled = GL20.glGetProgrami(fragment, GL20.GL_COMPILE_STATUS);
You cannot query a program for compilation status. That is a property of each shader object seperately, and can be queried via glGetShaderiv(). Using GL_COMPILE_STATUS for glGetProgramiv() will just result in an GL_INVALID_ENUM error.

Categories