I am trying to implement a color grading shader using LUT into processing as described here by Matt DesLauriers and here by Lev Zelensky but I get a strange result when I apply the shader with a standard lut :
image test|690x345
On the left you can see the result I get when applying the LUT shader vs the desired result on the right.
Here my implementation on processing :
PImage source;
PShader PP_LUT;
PGraphics buffer;
PGraphics lut;
PImage lutsrc;
void setup() {
size(512, 512), P2D);
source = loadImage("test.png");
lutsrc = loadImage("_LUT/lookup.png");
lut = createGraphics(lutsrc.width, lutsrc.height, P2D);
((PGraphicsOpenGL)lut).textureSampling(2);
lut.beginDraw();
lut.image(lutsrc, 0, 0);
lut.endDraw();
buffer = createGraphics(source.width, source.height, P3D);
PP_LUT = loadShader("PP_LUT.glsl");
PP_LUT.set("resolution", (float) buffer.width, (float) buffer.height);
PP_LUT.set("lut", lut);
}
void draw() {
buffer.beginDraw();
buffer.background(0);
buffer.shader(PP_LUT);
buffer.image(source, 0, 0);
buffer.endDraw();
image(buffer, 0, 0, width, height);
image(lut, 0, 0, width * 0.25, height * 0.25);
}
and the shader part :
#version 150
#ifdef GL_ES
#endif
uniform sampler2D texture;
uniform sampler2D lut;
in vec4 vertTexCoord;
out vec4 fragColor;
//https://github.com/mattdesl/glsl-lut
vec4 lookup(vec4 color_, sampler2D lut_){
color_ = clamp(color_, vec4(0), vec4(1));
//define blue
mediump float blue = color_.b * 63.0;
//define quad 1
mediump vec2 quaduv1;
quaduv1.y = floor(floor(blue) / 8.0); //devide blue by the number of col on the LUT
quaduv1.x = floor(blue) - (quaduv1.y * 8.0);
//define quad 2
mediump vec2 quaduv2;
quaduv2.y = floor(ceil(blue) / 8.0); //devide blue by the number of col on the LUT
quaduv2.x = ceil(blue) - (quaduv2.y * 8.0);
//define colorUV 1
highp vec2 coloruv1;
coloruv1.x = (quaduv1.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.r);
coloruv1.y = (quaduv1.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.g);
//define colorUV 2
highp vec2 coloruv2;
coloruv2.x = (quaduv2.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.r);
coloruv2.y = (quaduv2.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color_.g);
//PROCESSING NEED TO FLIP y uv
//coloruv1.y = 1.0 - coloruv1.y;
//coloruv2.y = 1.0 - coloruv2.y;
//define new color 1 & 2
lowp vec4 ncolor1 = texture2D(lut_, coloruv1);
lowp vec4 ncolor2 = texture2D(lut_, coloruv2);
//return looked up color
lowp vec4 lookedcolor = mix(ncolor1, ncolor2, fract(blue));
return vec4(lookedcolor.rgb, color_.w);
}
void main()
{
vec2 uv = vertTexCoord.xy;
vec4 color = texture2D(texture, uv);
vec4 lutColor = lookup(color, lut);
fragColor = lutColor;
}
As I understand it seems to be a problem on the texture filtering part so I tried to write my lut into an offscreen buffer and set the texture filtering mode as nearest as described on the wiki page of processing but the result is quite the same
I don't know what I am missing here. Can anyone has an idea on this ?
Thanks
Related
I created this shader from following this tutorial on single pass wireframe rendering: http://codeflow.org/entries/2012/aug/02/easy-wireframe-display-with-barycentric-coordinates/
Fragment:
#version 450
layout (location = 0) out vec4 outColor;
in vec3 vBC;
const float lineWidth = 0.5;
const vec3 color = vec3(0.7, 0.7, 0.7);
float edgeFactor(){
vec3 d = fwidth(vBC);
vec3 a3 = smoothstep(vec3(0.0), d*1.5, vBC);
return min(min(a3.x, a3.y), a3.z);
}
void main(){
outColor = vec4(min(vec3(edgeFactor()), color), 1.0);
}
Vertex:
#version 450
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 baryCentric;
out vec3 vBC;
uniform mat4 T_MVP;
void main() {
//texCoord0 = texCoord;
gl_Position = T_MVP * vec4(position, 1.0);
vBC = baryCentric;
}
And here is the gl prep before rendering:
wir.bind();
wir.updateUniforms(super.getTransform(), mat, engine);
GL45.glEnable(GL45.GL_SAMPLE_ALPHA_TO_COVERAGE);
GL45.glEnable(GL45.GL_BLEND);
GL45.glBlendFunc(GL45.GL_SRC_ALPHA, GL45.GL_ONE_MINUS_SRC_ALPHA);
mesh.draw("baryCentric", GL15.GL_TRIANGLES);
And here is how i bind the vertex atrribs;
The shader worked perfectly fine on my old amd integrated graphics card. But it dosnt on my rtx 2060 super.
Shader and Gl version
on old: OpenGL version: 4.5.13399 Compatibility Profile Context 15.200.1062.1004
on new: 4.6.0 NVIDIA 445.87
First of all I dont know what causes this but i think its the model files.
How i solved this was instead of pre processing the Bary centric coords i would calculate them or rather assign them in a geometry shader like so:
vBC = vec3(1, 0, 0);
gl_Position = gl_in[0].gl_Position;
EmitVertex();
vBC = vec3(0, 1, 0);
gl_Position = gl_in[1].gl_Position;
EmitVertex();
vBC = vec3(0, 0, 1);
gl_Position = gl_in[2].gl_Position;
EmitVertex();
and nothing else just pass them onto the fragment shader and it would do the rest:
#version 400
precision mediump float;
layout (location = 0) out vec4 outColor;
in vec3 vBC;
const float lineWidth = 0.5;
const vec3 lineColor = vec3(0.7, 0.7, 0.7);
float edgeFactor() {
vec3 d = fwidth(vBC);
vec3 f = step(d * lineWidth, vBC);
return min(min(f.x, f.y), f.z);
}
void main(){
outColor = vec4(255, 191, 0.0, (1.0-edgeFactor())*0.95);
}
The vertex shader only defines the positions nothing else the most basic.
Here is the full geometry shader if any one needs it:
#version 400
layout(triangles) in;
layout(triangle_strip, max_vertices = 3) out;
out vec3 vBC;
void main()
{
vBC = vec3(1, 0, 0);
gl_Position = gl_in[0].gl_Position;
EmitVertex();
vBC = vec3(0, 1, 0);
gl_Position = gl_in[1].gl_Position;
EmitVertex();
vBC = vec3(0, 0, 1);
gl_Position = gl_in[2].gl_Position;
EmitVertex();
}
Here are some pictures:
As you can see its working with transparency which is done using:
Here is the articles i looked at:
https://tchayen.github.io/wireframes-with-barycentric-coordinates/
http://codeflow.org/entries/2012/aug/02/easy-wireframe-display-with-barycentric-coordinates/
And a cool book that helped me a lot:
https://people.inf.elte.hu/plisaai/pdf/David%20Wolff%20-%20OpenGL%204.0%20Shading%20Language%20Cookbook%20(2).pdf
Just in case here is the Vertex shader:
#version 400
precision mediump int;
precision mediump float;
layout (location = 0) in vec3 position;
uniform mat4 T_MVP;
void main() {
gl_Position = T_MVP * vec4(position, 1.0);
}
I'm trying to implement 2 spotlights at the top of the scene using Processing that changes their respective directions overtime. I tried using the default spotlight(r,g,b,x,y,z,nx,ny,nz,angle,concentration) method to create the spotlights and tried changing the nx, ny and nz variables to change the light direction. However, the method don't seem to take in the 3 variables. This is the glsl that I'm using.
precision mediump float;
varying vec3 normalInterp;
varying vec3 vertPos;
uniform int lightCount;
uniform vec4 lightPosition[8];
uniform vec3 lightNormal[8];
//ambient
const vec3 ambientColor = vec3(0.1, 0, 0);
//diffuse
const vec3 diffuseColor = vec3(0.5, 0.0, 0.0);
//specular
const vec3 specColor = vec3(1.0, 1.0, 1.0);
//specular reflection parameter
const float n = 30.0;
//Depth cueing
//not implemented
void main() {
float lightR = 0.0;
float lightG = 0.0;
float lightB = 0.0;
for (int i = 0; i < lightCount; i++)
{
vec3 normal = normalize(normalInterp);
vec3 lightDir = normalize(lightPosition[i] - vertPos);
//diffuse
float diffuse = max(dot(lightDir,normal), 0.0);
//specular
float specular = 0.0;
if(diffuse > 0.0) {
vec3 viewDir = normalize(-vertPos);
vec3 reflectDir = reflect(-lightDir, normal);
float specAngle = max(dot(reflectDir, viewDir), 0.0);
specular = pow(specAngle, n);
}
//Note: can add in depth cueing here
vec3 colorLinear = ambientColor +
diffuse * diffuseColor +
specular * specColor;
lightR += colorLinear.x;
lightG += colorLinear.y;
lightB += colorLinear.z;
}
gl_FragColor = vec4(lightR,lightG,lightB, 1.0);
}
There is an simple issue in the shader program. First there is a typo. It has to be lightPosition rather than lightPostion. But that is not the only issue.
The type of lightPosition[i] is vec4 and the typo of vertPos is vec3. That causes and error when vertPos is subtracted from lightPosition[i].
Either you have to construct a vec3 from lightPosition[i]:
vec3 lightDir = normalize(lightPostion[i] - vertPos);
vec3 lightDir = normalize(vec3(lightPosition[i]) - vertPos);
Or You have to get the x, y and z component form lightPosition[i] (see Swizzling):
vec3 lightDir = normalize(lightPosition[i].xyz - vertPos);
Both solutions lead to the same result.
Of course, the light position has to be set relative to the object. Note when spotLight() is called, the the light position and direction is transformed by the current model view matrix.
See the example:
Vertex shader
uniform mat4 modelview;
uniform mat4 transform;
uniform mat3 normalMatrix;
attribute vec4 position;
attribute vec4 color;
attribute vec3 normal;
varying vec3 normalInterp;
varying vec3 vertPos;
varying vec4 vertColor;
void main() {
gl_Position = transform * position;
vertPos = vec3(modelview * position);
normalInterp = normalize(normalMatrix * normal);
}
Fragment shader
precision mediump float;
varying vec3 normalInterp;
varying vec3 vertPos;
uniform int lightCount;
uniform vec4 lightPosition[8];
uniform vec3 lightNormal[8];
uniform vec3 lightDiffuse[8];
uniform vec3 lightSpecular[8];
uniform vec2 lightSpot[8];
const vec3 ambientColor = vec3(0.2);
const vec3 diffuseColor = vec3(1.0);
const vec3 specColor = vec3(1.0);
const float n = 30.0;
void main() {
vec3 lightColor = vec3(0.0, 0.0, 0.0);
for (int i = 0; i < lightCount; i++)
{
// ambient
lightColor += lightDiffuse[i] * ambientColor;
vec3 normal = normalize(normalInterp);
vec3 lightDir = normalize(lightPosition[i].xyz - vertPos);
float spot = dot(-lightNormal[i], lightDir);
if (spot < lightSpot[i].x)
continue;
//diffuse
float diffuse = max(dot(lightDir,normal), 0.0);
lightColor += diffuse * lightDiffuse[i] * diffuseColor;
//specular
if(diffuse > 0.0) {
vec3 viewDir = normalize(-vertPos);
vec3 reflectDir = reflect(-lightDir, normal);
float specAngle = max(dot(reflectDir, viewDir), 0.0);
float specular = pow(specAngle, n);
lightColor += specular * lightSpecular[i] * specColor;
}
}
gl_FragColor = vec4(lightColor.rgb, 1.0);
}
Code
PShader lightShader;
void setup() {
size(800, 600, P3D);
lightShader = loadShader("fragment.glsl","vertex.glsl");
}
float ry = 0.0;
void draw() {
background(0);
shader(lightShader);
translate(width/2.0, height/2.0);
spotLight(255, 0, 0, 0, 500, 500, 0, -1, -1, PI/25, 2);
spotLight(0, 0, 255, 500, 0, 500, -1, 0, -1, PI/25, 2);
rotateY(ry);
rotateX(-0.5);
ry += 0.02;
noStroke();
box(200);
}
So I'm following ThinMatrixs tutorial on multiple lights and point lights. I believe I've followed everything correctly.
I first followed the multiple lights tutorial and none of the entities and terrain were being affected. I thought doing the next tutorial, having to do with attenuation, would resolve this problem. Now all my objects are shaded black.
I'm unsure what could be wrong. Any help would be appreciated.
Code here
Thinmatrix tutorial 25 multiple lights
//FRAGMENT SHADER
#version 400 core
in vec2 pass_textureCoordinates;
in vec3 surfaceNormal;
in vec3 toLightVector[4];
in vec3 toCameraVector;
in float visibility;
out vec4 out_Color;
uniform sampler2D modelTexture;
uniform vec3 lightColour[4];
uniform vec3 attenuation[4];
uniform float shineDamper;
uniform float reflectivity;
uniform vec3 skyColour;
void main(void){
vec3 unitNormal = normalize(surfaceNormal);
vec3 unitVectorToCamera = normalize(toCameraVector);
vec3 totalDiffuse = vec3(0.0);
vec3 totalSpecular = vec3(0.0);
for(int i = 0; i < 4; i++) {
float distance = length(toLightVector[i]);
float attFactor = attenuation[i].x + (attenuation[i].y * distance) + (attenuation[i].z * distance * distance);
vec3 unitLightVector = normalize(toLightVector[i]);
float nDot1 = dot(unitNormal, unitLightVector);
float brightness = max(nDot1, 0.0);
vec3 lightDirection = -unitLightVector;
vec3 reflectedLightDirection = reflect(lightDirection, unitNormal);
float specularFactor = dot(reflectedLightDirection, unitVectorToCamera);
specularFactor = max(specularFactor, 0.0);
float dampedFactor = pow(specularFactor, shineDamper);
totalDiffuse = totalDiffuse + (brightness * lightColour[i])/attFactor;
totalSpecular = totalSpecular + (dampedFactor * reflectivity * lightColour[i])/attFactor;
}
totalDiffuse = max(totalDiffuse, 0.2);
vec4 textureColour = texture(modelTexture,pass_textureCoordinates);
if(textureColour.a<0.5) {
discard;
}
out_Color = vec4(totalDiffuse,1.0) * textureColour + vec4(totalSpecular,1.0);
out_Color = mix(vec4(skyColour,1.0),out_Color, visibility);
}
VERTEX SHADER:
#version 400 core
in vec3 position;
in vec2 textureCoordinates;
in vec3 normal;
out vec2 pass_textureCoordinates;
out vec3 surfaceNormal;
out vec3 toLightVector[4];
out vec3 toCameraVector;
out float visibility;
uniform mat4 transformationMatrix;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
uniform vec3 lightPosition[4];
uniform float useFakeLighting;
uniform float numberOfRows;
uniform vec2 offset;
const float density = 0.0035;
const float gradient = 5.0;
void main(void){
vec4 worldPosition = transformationMatrix * vec4(position,1.0);
vec4 positionRelativeToCam = viewMatrix * worldPosition;
gl_Position = projectionMatrix * positionRelativeToCam;
pass_textureCoordinates = (textureCoordinates/numberOfRows) + offset;
vec3 actualNormal = normal;
if(useFakeLighting > 0.5) {
actualNormal = vec3(0.0,1.0,0.0);
}
surfaceNormal = (transformationMatrix * vec4(actualNormal,0.0)).xyz;
for(int i =0; i< 4;i++) {
toLightVector[i] = lightPosition[i] - worldPosition.xyz;
}
toCameraVector = (inverse(viewMatrix) * vec4(0.0,0.0,0.0,1.0)).xyz - worldPosition.xyz;
float distance = length(positionRelativeToCam.xyz);
visibility = exp(-pow((distance*density),gradient));
visibility = clamp(visibility,0.0,0.9);
}
In your StaticShader class:
for(int i = 0; i < MAX_LIGHTS; i++) {
location_lightPosition[i] = super.getUniformLocation("lightPosition{" + i + "}");
location_lightColour[i] = super.getUniformLocation("lightColour{" + i + "}");
location_attenuation[i] = super.getUniformLocation("attenuation[" + i + "}");
}
You are using }/{ instead of ]/[, because of that opengl can't find the uniform and the brightness calculation doesn't work.
If you want to check if a uniform is found just chage this code in your ShaderProgram class:
protected int getUniformLocation(String uniformName){
int loc = GL20.glGetUniformLocation(programID,uniformName);
if(loc==-1) System.err.println("Uniform with name \""+uniformName+"\" not found!");
return loc;
}
From the opengl documentation:
glGetUniformLocation returns an integer that represents the location
of a specific uniform variable within a program object. name must be a
null terminated string that contains no white space. name must be an
active uniform variable name in program that is not a structure, an
array of structures, or a subcomponent of a vector or a matrix. This
function returns -1 if name does not correspond to an active uniform
variable in program, if name starts with the reserved prefix "gl_", or
if name is associated with an atomic counter or a named uniform block.
If it still doesn't work check the individual color values.
First check the texture:
out_Color = textureColour;
Second check the diffuse light:
out_Color = vec4(totalDiffuse,1.0);
Third check the specular light:
out_Color = vec4(totalSpecular,1.0);
I hope this helps.
I'm trying to implement depth testing for 2D isometric game. To get something working, I started off with this sample, but I cannot get it to work correctly.
I'm trying to draw 2 images in a specific order.
first.png
second.png
first.png is drawn first, and second.png is drawn on top. Using fragment shader, I compute that red color has lower depth than green color, hence green fragments should be discarded when drawn on top of red fragments. The end result is that when second.png is drawn directly on top of first.png, the resulting square colored only red.
At the end of render function, I get the pixels of depth buffer, and looping over them I check if the values have been changed from defaults ones. It seems that no matter what I do, the values in depth buffer never change.
The depth test itself is working, if I set green fragments to depth=1.0, red fragments to depth=0.0 and my depth function is GL_LESS, only red fragments are drawn, but the depth buffer is not changed.
The code is in Java, but OpenGL functions are the same.
private SpriteBatch mBatch;
private Texture mTexture1;
private Texture mTexture2;
#Override
public void create() {
mBatch = new SpriteBatch();
mBatch.setShader(new ShaderProgram(Gdx.files.internal("test.vsh"), Gdx.files.internal("test.fsh")));
mTexture1 = new Texture("first.png");
mTexture2 = new Texture("second.png");
Gdx.gl20.glEnable(GL20.GL_DEPTH_TEST);
Gdx.gl20.glDepthFunc(GL20.GL_LESS);
Gdx.gl20.glDepthMask(true);
}
#Override
public void render() {
Gdx.gl20.glClear(GL20.GL_COLOR_BUFFER_BIT | GL20.GL_DEPTH_BUFFER_BIT);
mBatch.begin();
float scale = 4.0f;
float x = Gdx.graphics.getWidth() / 2;
float y = Gdx.graphics.getHeight() / 2;
mBatch.draw(mTexture1, x - mTexture1.getWidth() / 2 * scale, y - mTexture1.getHeight() / 2 * scale,
mTexture1.getWidth() * scale, mTexture1.getHeight() * scale);
mBatch.flush();
mBatch.draw(mTexture2, x - mTexture2.getWidth() / 2 * scale, y - mTexture2.getHeight() / 2 * scale,
mTexture2.getWidth() * scale, mTexture2.getHeight() * scale);
mBatch.end();
int width = Gdx.graphics.getWidth();
int height = Gdx.graphics.getHeight();
FloatBuffer buffer = BufferUtils.newFloatBuffer(width * height);
Gdx.gl20.glReadPixels(0, 0, width, height, GL20.GL_DEPTH_COMPONENT, GL20.GL_FLOAT,
buffer);
for (int i = 0; i < width * height; i++) {
float pixel = buffer.get(i);
if (pixel != 1.0f && pixel != 0.0f) {
// why is this never thrown??
// it means depth buffer wasn't changed.
throw new IllegalStateException("OMG IT WORKS!! " + pixel);
}
}
if (Gdx.gl20.glGetError()!=0) {
throw new Error("OPENGL ERROR: " + Gdx.gl20.glGetError());
}
}
Vertex shader
#ifdef GL_ES
precision mediump float;
#endif
attribute vec3 a_position;
attribute vec4 a_color;
attribute vec2 a_texCoord0;
uniform mat4 u_projTrans;
varying vec4 v_color;
varying vec2 v_texCoord;
void main()
{
gl_Position = u_projTrans * vec4(a_position, 1);
v_color = a_color * 2.0;
v_texCoord = a_texCoord0;
}
Fragment shader
#ifdef GL_ES
precision mediump float;
#endif
uniform sampler2D u_texture;
varying vec4 v_color;
varying vec2 v_texCoord;
void main()
{
vec4 texel = v_color * texture2D(u_texture, v_texCoord);
if (texel.r > texel.g)
{
gl_FragDepth = 0.0;
}
else
{
gl_FragDepth = 0.5;
}
gl_FragColor = texel;
}
Ok, I found the problem.
SpriteBatch.begin() does
glDepthMask(false)
Setting glDepthMask to false prevents OpenGL from writing to depth buffer.
The solution is to call glDepthMask(true) after SpriteBatch.begin()
I need a shader that create a diffuse light that is only affected by the angle between the normal and the light source. My current shader that is affected by both distance and angle looks like this:
private final String vertexShaderCode2 =
"uniform mat4 uMVPMatrix;" +
"uniform mat4 uMVMatrix;" +
"uniform vec3 uLightPos;" +
"attribute vec4 vPosition;" +
"attribute vec4 aColor;" +
"attribute vec3 aNormal;" +
"varying vec4 vColor;" +
"void main() {" +
" vec3 modelViewVertex = vec3(uMVMatrix * vPosition); " +
" vec3 modelViewNormal = vec3(uMVMatrix * vec4(aNormal, 0.0));" +
" float distance = length(uLightPos - modelViewVertex);" +
" vec3 lightVector = normalize(uLightPos - modelViewVertex);" +
" float diffuse = max(dot(modelViewNormal, lightVector), 0.1);" +
" diffuse = diffuse * (1.0 / (1.0 + (0.25 * distance * distance)));" +
" vColor = aColor * diffuse;" +
" gl_Position = uMVPMatrix * vPosition;" +
"}";
private final String fragmentShaderCode =
"precision mediump float;" +
"varying vec4 vColor;" +
"void main() {" +
" gl_FragColor = vColor;" +
"}";
Here is code for populating the shaders:
// Calculate position of the light.
Matrix.setIdentityM(mLightModelMatrix, 0);
Matrix.rotateM(mLightModelMatrix, 0, LightAngleInDegrees, 0.0f, 1.0f, 1.0f);
Matrix.translateM(mLightModelMatrix, 0, 0.0f, 100.0f, -10.0f);
Matrix.multiplyMV(mLightPosInWorldSpace, 0, mLightModelMatrix, 0, mLightPosInModelSpace, 0);
Matrix.multiplyMV(mLightPosInEyeSpace, 0, mViewMatrix, 0, mLightPosInWorldSpace, 0);
// Player
Matrix.setIdentityM(mModelMatrix, 0);
Matrix.translateM(mModelMatrix, 0, py, px, pz);
Matrix.rotateM(mModelMatrix, 0, mAngle, 0, 0, 1.0f);
Matrix.scaleM(mModelMatrix, 0, scalef, scalef, scalef * 2);
Matrix.multiplyMM(mMVMatrix, 0, mViewMatrix, 0, mModelMatrix, 0);
Matrix.multiplyMM(mMVPMatrix, 0, mProjectionMatrix, 0, mMVMatrix, 0);
cube.draw(mMVPMatrix, mMVMatrix, mLightPosInWorldSpace);
And finally, the cube.draw method:
public void draw(float[] mvpMatrix, float[] mvMatrix, float[] mLightPosInWorldSpace) {
...
// Pass in the light position in eye space.
GLES20.glUniform3f(LightPosHandle, mLightPosInWorldSpace[0], mLightPosInWorldSpace[1], mLightPosInWorldSpace[2]);
...
}
If I leave the following line out of the Shader:
diffuse = diffuse * (1.0 / (1.0 + (0.25 * distance * distance)));
Then, distance is not accounted for but a side effect is that rotating the cube is not affecting the color of the cube sides, like if the light source is following the cube around. LightAngleInDegrees (light) and mAngle (cube) is two separate variables.
The light position for diffuse calculations needs to be done in world space. That means you need to pass in mLightPosInWorldSpace, not mLightPosInEyeSpace.
public void draw(float[] mvpMatrix, float[] mvMatrix, float[] mLightPosInEyeSpace) {
...
// Pass in the light position in world space.
GLES20.glUniform3f(LightPosHandle, mLightPosInWorldSpace[0], mLightPosInWorldSpace[1], mLightPosInWorldSpace[2]);
...
}
Good thing you named your variables so clearly, otherwise I would not have spotted that.