I wish to blur my background, with the following fragment shader:
varying vec4 vColor;
varying vec2 vTexCoord;
uniform vec2 screenSize;
uniform sampler2D u_texture;
uniform vec4 v_time;
const float RADIUS = 0.75;
const float SOFTNESS = 0.6;
const float blurSize = 1.0/1000.0;
void main() {
vec4 texColor = vec4(0.0); // texture2D(u_texture, vTexCoord)
texColor += texture2D(u_texture, vTexCoord - 4.0*blurSize) * 0.05;
texColor += texture2D(u_texture, vTexCoord - 3.0*blurSize) * 0.09;
texColor += texture2D(u_texture, vTexCoord - 2.0*blurSize) * 0.12;
texColor += texture2D(u_texture, vTexCoord - blurSize) * 0.15;
texColor += texture2D(u_texture, vTexCoord) * 0.16;
texColor += texture2D(u_texture, vTexCoord + blurSize) * 0.15;
texColor += texture2D(u_texture, vTexCoord + 2.0*blurSize) * 0.12;
texColor += texture2D(u_texture, vTexCoord + 3.0*blurSize) * 0.09;
texColor += texture2D(u_texture, vTexCoord + 4.0*blurSize) * 0.05;
vec4 timedColor = (vColor + v_time);
vec2 position = (gl_FragCoord.xy / screenSize.xy) - vec2(0.5);
float len = length(position);
float vignette = smoothstep(RADIUS, RADIUS-SOFTNESS, len);
texColor.rgb = mix(texColor.rgb, texColor.rgb * vignette, 0.5);
gl_FragColor = vec4(texColor.rgb * timedColor.rgb, texColor.a);
}
But the problem being is that the shader blurs all screen.. what should i write to make it focus on my background texture instead of all the screen?
Fragment shader applies to every pixel of triangle that you draw with that shader.
May be you apply shader to wrong triangles and problem is not shader.
That shader looks strange to me.
vTexCoord - blurSize
vTexCoord - is vec2
blurSize - is float
The code will look more correct if you write something
vTexCoord - vec2(blurSize, 0.0)
OK, I gonna answer my question.
I wanted my shader program to blur only the background texture instead of all the screen.
so I created two shader programs, one for the background and one for the rest.
when the program draws the background it switches to the background shader, and after that is switches to the default shader.
Simple is that.
Related
I'm trying to add two textures to a 3d cube. I achieved my goal, but on the way I lost the background color.
I want to show the original color of the images and also the background color. I use a mix, but it displays the background completely dark.
This is how it looks my fragmentShaderCode:
private final String fragmentShaderCode =
"precision mediump float;" +
"uniform sampler2D u_Texture0;" +
"uniform sampler2D u_Texture1;" +
"uniform vec4 aColor;" +
"varying vec2 v_TexCoordinate0;" +
"varying vec2 v_TexCoordinate1;" +
"void main() {" +
" vec4 base = texture2D(u_Texture0, v_TexCoordinate0);" +
" vec4 overlay = texture2D(u_Texture1, v_TexCoordinate1);" +
" mediump float ra = (overlay.a) * overlay.r + (1.0 - overlay.a) * base.r;" +
" mediump float ga = (overlay.a) * overlay.g + (1.0 - overlay.a) * base.g;" +
" mediump float ba = (overlay.a) * overlay.b + (1.0 - overlay.a) * base.b;" +
" gl_FragColor = vec4(mix(aColor.rgb, vec4(ra, ga, ba, 1.0).rgb , vec4(ra, ga, ba, 1.0).a), 1.0);" +
"}";
The alpha channel of vec4(ra, ga, ba, 1.0) is 1.0. Therefore the result of vec4(ra, ga, ba, 1.0).a is always 1.0.
You need to use the texture's alpha channels. e.g.: max(base.a, overlay.a):
vec3 textureColor = vec3(ra, ga, ba);
float textureAlpha = max(base.a, overlay.a);
gl_FragColor = vec4(mix(aColor.rgb, textureColor, textureAlpha), 1.0);
Simplify the code by mixing the texture colors with the mix function:
void main() {
vec4 base = texture2D(u_Texture0, v_TexCoordinate0);
vec4 overlay = texture2D(u_Texture1, v_TexCoordinate1);
vec3 textureColor = mix(base.rgb, overlay.rgb, overlay.a);
float textureAlpha = max(base.a, overlay.a);
gl_FragColor = vec4(mix(aColor.rgb, textureColor, textureAlpha), 1.0);
}
I am trying to implement a color grading shader using LUT into processing as described here by Matt DesLauriers and here by Lev Zelensky but I get a strange result when I apply the shader with a standard lut :
image test|690x345
On the left you can see the result I get when applying the LUT shader vs the desired result on the right.
Here my implementation on processing :
PImage source;
PShader PP_LUT;
PGraphics buffer;
PGraphics lut;
PImage lutsrc;
void setup() {
size(512, 512), P2D);
source = loadImage("test.png");
lutsrc = loadImage("_LUT/lookup.png");
lut = createGraphics(lutsrc.width, lutsrc.height, P2D);
((PGraphicsOpenGL)lut).textureSampling(2);
lut.beginDraw();
lut.image(lutsrc, 0, 0);
lut.endDraw();
buffer = createGraphics(source.width, source.height, P3D);
PP_LUT = loadShader("PP_LUT.glsl");
PP_LUT.set("resolution", (float) buffer.width, (float) buffer.height);
PP_LUT.set("lut", lut);
}
void draw() {
buffer.beginDraw();
buffer.background(0);
buffer.shader(PP_LUT);
buffer.image(source, 0, 0);
buffer.endDraw();
image(buffer, 0, 0, width, height);
image(lut, 0, 0, width * 0.25, height * 0.25);
}
and the shader part :
#version 150
#ifdef GL_ES
#endif
uniform sampler2D texture;
uniform sampler2D lut;
in vec4 vertTexCoord;
out vec4 fragColor;
//https://github.com/mattdesl/glsl-lut
vec4 lookup(vec4 color_, sampler2D lut_){
color_ = clamp(color_, vec4(0), vec4(1));
//define blue
mediump float blue = color_.b * 63.0;
//define quad 1
mediump vec2 quaduv1;
quaduv1.y = floor(floor(blue) / 8.0); //devide blue by the number of col on the LUT
quaduv1.x = floor(blue) - (quaduv1.y * 8.0);
//define quad 2
mediump vec2 quaduv2;
quaduv2.y = floor(ceil(blue) / 8.0); //devide blue by the number of col on the LUT
quaduv2.x = ceil(blue) - (quaduv2.y * 8.0);
//define colorUV 1
highp vec2 coloruv1;
coloruv1.x = (quaduv1.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.r);
coloruv1.y = (quaduv1.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.g);
//define colorUV 2
highp vec2 coloruv2;
coloruv2.x = (quaduv2.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.r);
coloruv2.y = (quaduv2.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.g);
//PROCESSING NEED TO FLIP y uv
//coloruv1.y = 1.0 - coloruv1.y;
//coloruv2.y = 1.0 - coloruv2.y;
//define new color 1 & 2
lowp vec4 ncolor1 = texture2D(lut_, coloruv1);
lowp vec4 ncolor2 = texture2D(lut_, coloruv2);
//return looked up color
lowp vec4 lookedcolor = mix(ncolor1, ncolor2, fract(blue));
return vec4(lookedcolor.rgb, color_.w);
}
void main()
{
vec2 uv = vertTexCoord.xy;
vec4 color = texture2D(texture, uv);
vec4 lutColor = lookup(color, lut);
fragColor = lutColor;
}
As I understand it seems to be a problem on the texture filtering part so I tried to write my lut into an offscreen buffer and set the texture filtering mode as nearest as described on the wiki page of processing but the result is quite the same
I don't know what I am missing here. Can anyone has an idea on this ?
Thanks
So I'm following ThinMatrixs tutorial on multiple lights and point lights. I believe I've followed everything correctly.
I first followed the multiple lights tutorial and none of the entities and terrain were being affected. I thought doing the next tutorial, having to do with attenuation, would resolve this problem. Now all my objects are shaded black.
I'm unsure what could be wrong. Any help would be appreciated.
Code here
Thinmatrix tutorial 25 multiple lights
//FRAGMENT SHADER
#version 400 core
in vec2 pass_textureCoordinates;
in vec3 surfaceNormal;
in vec3 toLightVector[4];
in vec3 toCameraVector;
in float visibility;
out vec4 out_Color;
uniform sampler2D modelTexture;
uniform vec3 lightColour[4];
uniform vec3 attenuation[4];
uniform float shineDamper;
uniform float reflectivity;
uniform vec3 skyColour;
void main(void){
vec3 unitNormal = normalize(surfaceNormal);
vec3 unitVectorToCamera = normalize(toCameraVector);
vec3 totalDiffuse = vec3(0.0);
vec3 totalSpecular = vec3(0.0);
for(int i = 0; i < 4; i++) {
float distance = length(toLightVector[i]);
float attFactor = attenuation[i].x + (attenuation[i].y * distance) + (attenuation[i].z * distance * distance);
vec3 unitLightVector = normalize(toLightVector[i]);
float nDot1 = dot(unitNormal, unitLightVector);
float brightness = max(nDot1, 0.0);
vec3 lightDirection = -unitLightVector;
vec3 reflectedLightDirection = reflect(lightDirection, unitNormal);
float specularFactor = dot(reflectedLightDirection, unitVectorToCamera);
specularFactor = max(specularFactor, 0.0);
float dampedFactor = pow(specularFactor, shineDamper);
totalDiffuse = totalDiffuse + (brightness * lightColour[i])/attFactor;
totalSpecular = totalSpecular + (dampedFactor * reflectivity * lightColour[i])/attFactor;
}
totalDiffuse = max(totalDiffuse, 0.2);
vec4 textureColour = texture(modelTexture,pass_textureCoordinates);
if(textureColour.a<0.5) {
discard;
}
out_Color = vec4(totalDiffuse,1.0) * textureColour + vec4(totalSpecular,1.0);
out_Color = mix(vec4(skyColour,1.0),out_Color, visibility);
}
VERTEX SHADER:
#version 400 core
in vec3 position;
in vec2 textureCoordinates;
in vec3 normal;
out vec2 pass_textureCoordinates;
out vec3 surfaceNormal;
out vec3 toLightVector[4];
out vec3 toCameraVector;
out float visibility;
uniform mat4 transformationMatrix;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
uniform vec3 lightPosition[4];
uniform float useFakeLighting;
uniform float numberOfRows;
uniform vec2 offset;
const float density = 0.0035;
const float gradient = 5.0;
void main(void){
vec4 worldPosition = transformationMatrix * vec4(position,1.0);
vec4 positionRelativeToCam = viewMatrix * worldPosition;
gl_Position = projectionMatrix * positionRelativeToCam;
pass_textureCoordinates = (textureCoordinates/numberOfRows) + offset;
vec3 actualNormal = normal;
if(useFakeLighting > 0.5) {
actualNormal = vec3(0.0,1.0,0.0);
}
surfaceNormal = (transformationMatrix * vec4(actualNormal,0.0)).xyz;
for(int i =0; i< 4;i++) {
toLightVector[i] = lightPosition[i] - worldPosition.xyz;
}
toCameraVector = (inverse(viewMatrix) * vec4(0.0,0.0,0.0,1.0)).xyz - worldPosition.xyz;
float distance = length(positionRelativeToCam.xyz);
visibility = exp(-pow((distance*density),gradient));
visibility = clamp(visibility,0.0,0.9);
}
In your StaticShader class:
for(int i = 0; i < MAX_LIGHTS; i++) {
location_lightPosition[i] = super.getUniformLocation("lightPosition{" + i + "}");
location_lightColour[i] = super.getUniformLocation("lightColour{" + i + "}");
location_attenuation[i] = super.getUniformLocation("attenuation[" + i + "}");
}
You are using }/{ instead of ]/[, because of that opengl can't find the uniform and the brightness calculation doesn't work.
If you want to check if a uniform is found just chage this code in your ShaderProgram class:
protected int getUniformLocation(String uniformName){
int loc = GL20.glGetUniformLocation(programID,uniformName);
if(loc==-1) System.err.println("Uniform with name \""+uniformName+"\" not found!");
return loc;
}
From the opengl documentation:
glGetUniformLocation returns an integer that represents the location
of a specific uniform variable within a program object. name must be a
null terminated string that contains no white space. name must be an
active uniform variable name in program that is not a structure, an
array of structures, or a subcomponent of a vector or a matrix. This
function returns -1 if name does not correspond to an active uniform
variable in program, if name starts with the reserved prefix "gl_", or
if name is associated with an atomic counter or a named uniform block.
If it still doesn't work check the individual color values.
First check the texture:
out_Color = textureColour;
Second check the diffuse light:
out_Color = vec4(totalDiffuse,1.0);
Third check the specular light:
out_Color = vec4(totalSpecular,1.0);
I hope this helps.
I'm trying to implement depth testing for 2D isometric game. To get something working, I started off with this sample, but I cannot get it to work correctly.
I'm trying to draw 2 images in a specific order.
first.png
second.png
first.png is drawn first, and second.png is drawn on top. Using fragment shader, I compute that red color has lower depth than green color, hence green fragments should be discarded when drawn on top of red fragments. The end result is that when second.png is drawn directly on top of first.png, the resulting square colored only red.
At the end of render function, I get the pixels of depth buffer, and looping over them I check if the values have been changed from defaults ones. It seems that no matter what I do, the values in depth buffer never change.
The depth test itself is working, if I set green fragments to depth=1.0, red fragments to depth=0.0 and my depth function is GL_LESS, only red fragments are drawn, but the depth buffer is not changed.
The code is in Java, but OpenGL functions are the same.
private SpriteBatch mBatch;
private Texture mTexture1;
private Texture mTexture2;
#Override
public void create() {
mBatch = new SpriteBatch();
mBatch.setShader(new ShaderProgram(Gdx.files.internal("test.vsh"), Gdx.files.internal("test.fsh")));
mTexture1 = new Texture("first.png");
mTexture2 = new Texture("second.png");
Gdx.gl20.glEnable(GL20.GL_DEPTH_TEST);
Gdx.gl20.glDepthFunc(GL20.GL_LESS);
Gdx.gl20.glDepthMask(true);
}
#Override
public void render() {
Gdx.gl20.glClear(GL20.GL_COLOR_BUFFER_BIT | GL20.GL_DEPTH_BUFFER_BIT);
mBatch.begin();
float scale = 4.0f;
float x = Gdx.graphics.getWidth() / 2;
float y = Gdx.graphics.getHeight() / 2;
mBatch.draw(mTexture1, x - mTexture1.getWidth() / 2 * scale, y - mTexture1.getHeight() / 2 * scale,
mTexture1.getWidth() * scale, mTexture1.getHeight() * scale);
mBatch.flush();
mBatch.draw(mTexture2, x - mTexture2.getWidth() / 2 * scale, y - mTexture2.getHeight() / 2 * scale,
mTexture2.getWidth() * scale, mTexture2.getHeight() * scale);
mBatch.end();
int width = Gdx.graphics.getWidth();
int height = Gdx.graphics.getHeight();
FloatBuffer buffer = BufferUtils.newFloatBuffer(width * height);
Gdx.gl20.glReadPixels(0, 0, width, height, GL20.GL_DEPTH_COMPONENT, GL20.GL_FLOAT,
buffer);
for (int i = 0; i < width * height; i++) {
float pixel = buffer.get(i);
if (pixel != 1.0f && pixel != 0.0f) {
// why is this never thrown??
// it means depth buffer wasn't changed.
throw new IllegalStateException("OMG IT WORKS!! " + pixel);
}
}
if (Gdx.gl20.glGetError()!=0) {
throw new Error("OPENGL ERROR: " + Gdx.gl20.glGetError());
}
}
Vertex shader
#ifdef GL_ES
precision mediump float;
#endif
attribute vec3 a_position;
attribute vec4 a_color;
attribute vec2 a_texCoord0;
uniform mat4 u_projTrans;
varying vec4 v_color;
varying vec2 v_texCoord;
void main()
{
gl_Position = u_projTrans * vec4(a_position, 1);
v_color = a_color * 2.0;
v_texCoord = a_texCoord0;
}
Fragment shader
#ifdef GL_ES
precision mediump float;
#endif
uniform sampler2D u_texture;
varying vec4 v_color;
varying vec2 v_texCoord;
void main()
{
vec4 texel = v_color * texture2D(u_texture, v_texCoord);
if (texel.r > texel.g)
{
gl_FragDepth = 0.0;
}
else
{
gl_FragDepth = 0.5;
}
gl_FragColor = texel;
}
Ok, I found the problem.
SpriteBatch.begin() does
glDepthMask(false)
Setting glDepthMask to false prevents OpenGL from writing to depth buffer.
The solution is to call glDepthMask(true) after SpriteBatch.begin()
This is mostly a general question, since I can't get any shader to work at all. The usual sprites and textures render just fine, it just doesn't happen anything with the shaders. Not getting any error messages from the shader log either. As far as I understand, for a filter of the type below, one only need to set the shader to the batch, like batch.setShader(shader), and set any uniforms, and the batch will take care of the rest. If I am wrong, please tell me my errors.
Fragment shader, supposed to blur
//"in" attributes from our vertex shader
varying vec2 v_texCoord0;
//declare uniforms
uniform sampler2D uImage0;
uniform vec2 uResolution;
uniform float radius;
uniform float dirx;
uniform float diry;
void main()
{
//this will be our RGBA sum
vec4 sum = vec4(0.0);
//our original texcoord for this fragment
vec2 tc = v_texCoord0;
//the amount to blur, i.e. how far off center to sample from
//1.0 -> blur by one pixel
//2.0 -> blur by two pixels, etc.
float blur = radius / uResolution.x;
//the direction of our blur
//(1.0, 0.0) -> x-axis blur
//(0.0, 1.0) -> y-axis blur
float hstep = dirx;
float vstep = diry;
//apply blurring, using a 9-tap filter with predefined gaussian weights
sum += texture2D(uImage0, vec2(tc.x - 4.0*blur*hstep, tc.y - 4.0*blur*vstep)) * 0.0162162162;
sum += texture2D(uImage0, vec2(tc.x - 3.0*blur*hstep, tc.y - 3.0*blur*vstep)) * 0.0540540541;
sum += texture2D(uImage0, vec2(tc.x - 2.0*blur*hstep, tc.y - 2.0*blur*vstep)) * 0.1216216216;
sum += texture2D(uImage0, vec2(tc.x - 1.0*blur*hstep, tc.y - 1.0*blur*vstep)) * 0.1945945946;
sum += texture2D(uImage0, vec2(tc.x, tc.y)) * 0.2270270270;
sum += texture2D(uImage0, vec2(tc.x + 1.0*blur*hstep, tc.y + 1.0*blur*vstep)) * 0.1945945946;
sum += texture2D(uImage0, vec2(tc.x + 2.0*blur*hstep, tc.y + 2.0*blur*vstep)) * 0.1216216216;
sum += texture2D(uImage0, vec2(tc.x + 3.0*blur*hstep, tc.y + 3.0*blur*vstep)) * 0.0540540541;
sum += texture2D(uImage0, vec2(tc.x + 4.0*blur*hstep, tc.y + 4.0*blur*vstep)) * 0.0162162162;
//discard alpha for our simple demo, multiply by vertex color and return
gl_FragColor = vec4(sum.rgb, 1.0);
}
Vertex shader
attribute vec4 a_color;
attribute vec2 a_texCoord0;
attribute vec3 a_position;
uniform mat4 u_projTrans;
varying vec4 v_color;
varying vec2 v_texCoord0;
void main(){
v_color = a_color;
v_texCoord0 = a_texCoord0;
gl_Position = u_projTrans * vec4(a_position,1.0) ;
}
Setting up the shader. Tried different values here
public void setupShader(){
ShaderProgram.pedantic=true;
shader = new ShaderProgram(Gdx.files.internal("shaders/pass.vert"),Gdx.files.internal("shaders/scanlines.frag"));
shader.begin();
shader.setUniformf("radius", 5f);
shader.setUniformf("dirx", 5f);
shader.setUniformf("diry", 5f);
shader.end();
if(shader.isCompiled())
batch.setShader(shader);
else
Settings.log(shader.getLog());
}
The render method. I've not put anything concerning shaders here.
#Override
public void render(float delta) {
Settings.clearScreen(); //usual clear screen calls from here
batch.setProjectionMatrix(cam.combined);
cam.update();
detectClicks();
checkBallScreenEdges();
batch.begin();
draw(delta);
batch.end();
}
Thanks to Tenfour04 I got a nice scanlines shader to work (tested with another than the one above):
#Override
public void render(float delta) {
Settings.clearScreen();
batch.setProjectionMatrix(cam.combined);
cam.update();
batch.setShader(SpriteBatch.createDefaultShader());
main.buffer.begin();
batch.begin();
draw(delta);
batch.end();
main.buffer.end();
//POST PROCESSING
Texture bufferedTexture = main.buffer.getColorBufferTexture();
batch.setShader(main.shader);
batch.begin();
batch.draw(bufferedTexture, 0, 0, Settings.WIDTH, Settings.HEIGHT, 0, 0, Settings.WIDTH, Settings.HEIGHT, false, true); //need to flip texture
batch.end();
}
and setting up the shader:
buffer = new FrameBuffer(Pixmap.Format.RGBA8888,Settings.WIDTH,Settings.HEIGHT,false);
ShaderProgram.pedantic=false;
shader = new ShaderProgram(Gdx.files.internal("shaders/pass.vert"),Gdx.files.internal("shaders/scanlines.frag"));
shader.begin();
shader.setUniformf("uResolution",(float)Settings.WIDTH,(float)Settings.HEIGHT);
shader.end();
Wished the libgdx wiki had more examples.