Camera rotation calculation? - java

i wanted to tranlate camera in world space. It doesn`t move as i expected. My Algorithm is
first i translate the camera in View space.
Camera is always located in (0,0,0). After i calculate the new camera location points in viewspace.Then I multiply the points with inverse view matrix, So i thought i would get the camera location in the world space.
second i calculate the new view matrix
Then i calculate the view matrix by using setLookAtM function of OpenGL. and set the new viewmatrix.
but my problem is its new locations couldn`t be calculated. Its values are NAN
here is my code.
private void pivotRotation(double angle,int[] pivot, double distance){
float[] temp = new float[4];
float[] temp2 = new float[4];
float[] extObj = new float[16];
float[] startPointsOnViewSpace = new float[4];
float[] newEyeOrigin = new float[4];
float[] newLookOrigin = new float[4];
int[] viewport = new int[16];
viewport[0] = 0;
viewport[1] = 0;
viewport[2] = (int)myRenderer.screenWidth;
viewport[3] = (int)myRenderer.screenHeight;
int[] newEye = new int[2];
newEye[0] = (int)((pivot[0] + ((viewport[2]/2) - pivot[0]) * Math.cos(angle) - ((viewport[3]/2) - pivot[1]) * Math.sin(angle)));
newEye[1] = (int)((pivot[1] + ((viewport[2]/2) - pivot[0]) * Math.sin(angle) + ((viewport[3]/2) - pivot[1]) * Math.cos(angle)));
GLU.gluUnProject((float)newEye[0], (float)(viewport[3] - newEye[1]), 0.0f, myRenderer.modelViewMatrix, 0, myRenderer.projectionMatrix, 0, viewport, 0, extObj, 0);
Matrix.multiplyMV(startPointsOnViewSpace, 0, myRenderer.modelViewMatrix, 0, extObj, 0);
// new camera location in viewspace
startPointsOnViewSpace[0] /= startPointsOnViewSpace[3];
startPointsOnViewSpace[1] /= startPointsOnViewSpace[3];
startPointsOnViewSpace[2] /= startPointsOnViewSpace[3];
newEyeOrigin[0] = startPointsOnViewSpace[0];
newEyeOrigin[1] = startPointsOnViewSpace[1];
newEyeOrigin[2] += distance / pinchParameter;
newEyeOrigin[3] = 1.0f;
newLookOrigin[0] = newEyeOrigin[0];
newLookOrigin[1] = newEyeOrigin[1];
newLookOrigin[2] = newEyeOrigin[2] - 1.0f;
newLookOrigin[3] = 1.0f;
// temp[0], temp[1], temp[2] are new camera location in the world space. i think
Matrix.multiplyMV(temp, 0, myRenderer.inverseViewMatrix, 0, newEyeOrigin, 0);
Matrix.multiplyMV(temp2, 0, myRenderer.inverseViewMatrix, 0, newLookOrigin, 0);
for(int i = 0; i < 3; i++){
myRenderer.eyeLocation[i] = temp[i];
myRenderer.lookPosition[i] = temp2[i];
}
myRenderer.eyeSettings();

Related

LibGDX's Model made with ModelBuilder not rendering

I'm learning to use LibGDX and my goal is to create a cube, with which you can control the resolution (number of vertices along each face). I already did that, and managed to use MeshBuilder to make it out of 6 different meshes and then render the resulting Mesh successfully using basic shaders :
Cube Mesh
//creates a square face with a normal vector and resolution number of vertices along any edge of the face
public Mesh createFace(Vector3 normal, int resolution) {
//creates 2 vectors perpendicular to each other and to the vector normal
Vector3 axisA = new Vector3(normal.y,normal.z,normal.x);
Vector3 axis = u.crs(normal, axisA);
Vector3 axisB = new Vector3(u.sqrt(axis.x),u.sqrt(axis.y),u.sqrt(axis.z));
//creates the arrays to hold the vertices and triangles
Vector3[] vertices = new Vector3[resolution * resolution];
//code for triangles
short[] triangles = new short[(resolution - 1) * (resolution - 1) * 6];
int triIndex = 0;
//looping over each vertex in the face
for (int y = 0; y < resolution; y++) {
for (int x = 0; x < resolution; x++) {
int vertexIndex = x + y * resolution;
//vector representing how close to the end of the x or y axis the loop is
Vector2 t = new Vector2(x / (resolution - 1f),y / (resolution - 1f));
//calculates the position of the vertex to place on the face
Vector3 mulA = u.mul(axisA, (2*t.x - 1));
Vector3 mulB = u.mul(axisB, (2*t.y-1));
Vector3 point = u.add3(normal, mulA, mulB);
//point = u.normalize(point);
vertices[vertexIndex] = point;
//puts the vertices into triangles
if (x != resolution - 1 && y != resolution - 1) {
triangles[triIndex + 0] = (short) vertexIndex;
triangles[triIndex + 1] = (short) (vertexIndex + resolution + 1);
triangles[triIndex + 2] = (short) (vertexIndex + resolution);
triangles[triIndex + 3] = (short) vertexIndex;
triangles[triIndex + 4] = (short) (vertexIndex + 1);
triangles[triIndex + 5] = (short) (vertexIndex + resolution + 1);
triIndex += 6;
}
}
}
float[] verticeList = u.vectorToList(vertices);
Mesh m = new Mesh(true, resolution * resolution, triangles.length, new VertexAttribute(Usage.Position,3,"a_Position"));
m.setIndices(triangles);
m.setVertices(verticeList);
return m;
}
//generates a cube Mesh with resolution vertices along each face
public Mesh generateFaces(int resolution, float scale) {
MeshBuilder meshBuilder = new MeshBuilder();
meshBuilder.begin(new VertexAttributes(new VertexAttribute (Usage.Position, 3 ,"a_Position")));
Vector3[] faceNormals = {
new Vector3(0,1*scale,0), //up
new Vector3(0,-1*scale,0), //down
new Vector3(-1*scale,0,0), //left
new Vector3(1*scale,0,0), //right
new Vector3(0,0,1*scale), //forward
new Vector3(0,0,-1*scale) //back
};
for (int i = 0; i < faceNormals.length; i++) {
meshBuilder.part("part"+ Integer.toString(i), GL20.GL_TRIANGLES);
meshBuilder.addMesh(createFace(faceNormals[i], resolution));
}
Mesh mesh = meshBuilder.end();
return mesh;
}
u is just a utilities class i created to store some math functions.
I then render it like so:
#Override
public void render () {
camController.update();
Gdx.gl.glViewport(0, 0, Gdx.graphics.getWidth(), Gdx.graphics.getHeight());
Gdx.gl.glClearColor(0.5f, 0.5f, 0.5f, 0.5f);
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT | GL20.GL_DEPTH_BUFFER_BIT);
shader.bind();
shader.setUniformMatrix("matViewProj", cam.combined);
//rendering mesh
mesh1.render(shader, GL20.GL_LINE_STRIP);
[...]
}
I now want to make a model out of that mesh where each of the 6 faces will have a different color.
I thus tried to do it using a ModelBuilder following the LibGDX wiki, like so:
public Model generateModel(int resolution, float scale, Color[] colors) {
Vector3[] faceNormals = {
new Vector3(0,1*scale,0), //up
new Vector3(0,-1*scale,0), //down
new Vector3(-1*scale,0,0), //left
new Vector3(1*scale,0,0), //right
new Vector3(0,0,1*scale), //forward
new Vector3(0,0,-1*scale) //back
};
ModelBuilder modelBuilder = new ModelBuilder();
modelBuilder.begin();
for (int i = 0; i < faceNormals.length; i++) {
Mesh mesh = createFace(faceNormals[i], resolution);
MeshPart part = new MeshPart("part"+Integer.toString(i),mesh, 0, mesh.getNumVertices() ,GL20.GL_TRIANGLES);
modelBuilder.node().parts.add(new NodePart(part, new Material(ColorAttribute.createDiffuse(colors[i]))));
}
Model m = modelBuilder.end();
return m;
}
And then i rendered it using a ModelBatch and ModelInstance :
#Override
public void create () {
//creates an environment to handle lighting and such
environment = new Environment();
environment.set(new ColorAttribute(ColorAttribute.AmbientLight,0.4f,0.4f,0.4f,1f));
environment.add(new DirectionalLight().set(0.8f,0.8f,0.8f,-1f,-0.8f,-0.2f));
modelBatch = new ModelBatch();
//handling the inputProcessors of the camera and stage(UI)
multiplexer = new InputMultiplexer();
stage = new Stage();
multiplexer.addProcessor(stage);
scroll = new ScrolledInputProcessor();
multiplexer.addProcessor(scroll);
//camera (3D inputProcessor)
cam = new PerspectiveCamera(67,Gdx.graphics.getWidth(),Gdx.graphics.getHeight());
cam.position.set(10f,10f,10f);
cam.lookAt(0,0,0);
cam.near = 1f;
cam.far = 300f;
cam.update();
camController = new CameraInputController(cam);
multiplexer.addProcessor(camController);
//shaders for every vertex and every pixel(fragment)
shader = new ShaderProgram(Gdx.files.internal("shader/vertexshader.glsl").readString() ,Gdx.files.internal("shader/fragmentshader.glsl").readString());
shader2 = new ShaderProgram(Gdx.files.internal("shader/vertexshader.glsl").readString() ,Gdx.files.internal("shader/fragmentshader2.glsl").readString());
//The 2D box encompassing the screen (UI)
table = new Table();
table.setFillParent(true);
stage.addActor(table);
//skins for UI
skin = new Skin(Gdx.files.internal("uiskin.json"));
//making a slider and dressing it in the skin
Drawable knobDown = skin.newDrawable("default-slider-knob", Color.GRAY);
SliderStyle sliderStyle = skin.get("default-horizontal", SliderStyle.class);
sliderStyle.knobDown = knobDown;
slider = new Slider(3.0f, 70.0f, 1.0f, false, sliderStyle);
table.right().top();
table.add(slider).row();
//creates the unit cube and unit sphere
model = generateModel(res, 1, colors);
instance = new ModelInstance(model);
font = new BitmapFont(Gdx.files.internal("uiskin.fnt"));
batch = new SpriteBatch();
Gdx.input.setInputProcessor(multiplexer);
}
#Override
public void render () {
camController.update();
Gdx.gl.glViewport(0, 0, Gdx.graphics.getWidth(), Gdx.graphics.getHeight());
Gdx.gl.glClearColor(0.5f, 0.5f, 0.5f, 0.5f);
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT | GL20.GL_DEPTH_BUFFER_BIT);
shader.bind();
shader.setUniformMatrix("matViewProj", cam.combined);
modelBatch.begin(cam);
modelBatch.render(instance, environment);
modelBatch.end();
batch.begin();
font.draw(batch, "Zoom Level : " + zoomLevel, 1000f, 100f);
batch.end();
stage.act(Gdx.graphics.getDeltaTime());
stage.draw();
}
However, when i run the program, nothing is rendered, just the gray void.
Gray void of nothingness
My question is: How do I get my model to render?

LibGdx: Intersect Ray with Mesh from ModelInstance

I want to place some objects (ModelInstance) on the floor (also a ModelInstance) of my game world. To get the position for these objects, I let a Ray intersect the floor. The point of intersection should then be the required position.
My plan is to set the origin of the ray below the floor, so that the direction of the ray goes straight up and hits the floor from below. Both ModelInstances are .g3db Models made in Blender.
Vector3 dir = new Vector3(0, 10, 0); //Vector points upwards
Ray ray = new Ray(new Vector3(), dir.cpy());
Mesh mesh = landscape.model.meshes.first(); //The floor ModelInstance, has only a single mesh
int fac = mesh.getVertexSize();
float[] verts = new float[mesh.getNumVertices() * fac];
short[] inds = new short[mesh.getNumIndices()];
mesh.getVertices(verts);
mesh.getIndices(inds);
for (int j = 0; j < 10; j++) { //add 10 objects to the floor
Vector3 out = new Vector3(- 15, -50f, - j * 5);
ray.origin.set(out.cpy()); //set the origin of the vector below the floor
if (Intersector.intersectRayTriangles(ray, verts, inds, fac, out)) {
System.out.println(j + " out = " + out); //out should be the position for my objects
}
}
The output of the intersectRayTriangles Method is exactly the initial position below the floor. But this point is not anywhere close to the floor. How do I get the proper point of intersection?
I finally found a (semi optimal) solution which works.
landscape is a ModelInstance, created with Blender.
ArrayList<Vector3> vertices = new ArrayList<>();
landscape.calculateTransforms();
Renderable rend = new Renderable();
Mesh mesh = landscape.getRenderable(rend).meshPart.mesh;
int vertexSize = mesh.getVertexSize() / 4;
float[] verts = new float[mesh.getNumVertices() * vertexSize];
short[] inds = new short[mesh.getNumIndices()];
mesh.getVertices(verts);
mesh.getIndices(inds);
for (int i = 0; i < inds.length; i++) {
int i1 = inds[i] * vertexSize;
Vector3 v = new Vector3(verts[i1], verts[i1 + 1], verts[i1 + 2]);
v.set(v.prj(rend.worldTransform));
vertices.add(v);
}
Vector3 dir = new Vector3(0, 10, 0);
Vector3 pos = new Vector3(random.nextFloat(),random.nextFloat(),random.nextFloat());
Ray ray = new Ray(pos, dir.cpy());
for (int i = 0; i < vertices.size() - 3; i+=3){
if (Intersector.intersectRayTriangle(ray, vertices.get(i), vertices.get(i + 1), vertices.get(i + 2), pos)) {
//pos now contains the correct coordinates
break;
}
}
Note that the y-Axis faces upwards

Merging multiple meshes with transformation matrix into a single mesh in OpenGL

I try to merge multiple meshes with a transformation matrix into a single mesh.
Each mesh has 4 data sets.
Vertices
Indices
Texture Coordinates
Normals
The way I'm trying to do it is supposed to be lazy and not cost that much CPU.
It is a 3 step process.
Multiply each vertex and normal with the transformation matrix.
Merge the Vertices, Texture Coordinates and Normals of each mesh into 3 big arrays.
Merge the Indices of each mesh into a single array but use the sum of the previous meshes as an offset. For example: If mesh 1 has 800 indices then 800 has to be added to all of the indices from mesh 2.
This method has two big problems.
Duplicate vertices are not shared
Parts that are invisible due to clipping are not removed
But that is OK as this is supposed to be a lazy method with not much CPU usage. It is already optimal for creating meshes for grass and bushes.
I have attempted an implementation of this method which looks like this:
public static final MeshData mergeLazy(List<MeshData> meshes, List<Matrix4f> transformations) {
int lengthVertices = 0;
int lengthNormals = 0;
int lengthTexCoords = 0;
int lengthIndices = 0;
ArrayList<Integer> indexLengths = new ArrayList<>();
for(MeshData mesh : meshes) {
lengthVertices += mesh.getVertices().length;
lengthNormals += mesh.getNormals().length;
lengthTexCoords += mesh.getTextureCoordinates().length;
int length = mesh.getIndices().length;
lengthIndices += length;
indexLengths.add(length);
}
float[] vertices = new float[lengthVertices];
float[] texCoords = new float[lengthTexCoords];
float[] normals = new float[lengthNormals];
int[] indices = new int[lengthIndices];
int iv = 0;
int ivt = 0;
int ivn = 0;
int i = 0;
int indexLength = 0;
for(int im = 0; im < meshes.size(); im++) {
MeshData mesh = meshes.get(im);
float[] mVertices = mesh.getVertices();
float[] mTexCoords = mesh.getTextureCoordinates();
float[] mNormals = mesh.getNormals();
int[] mIndices = mesh.getIndices();
Matrix4f transformation = transformations.get(im);
for(int index = 0; index < mVertices.length; index += 3) {
Vector3f vertex = MatrixUtil.multiply(transformation, mVertices[index], mVertices[index + 1], mVertices[index + 2]);
vertices[iv++] = vertex.x;
vertices[iv++] = vertex.y;
vertices[iv++] = vertex.z;
Vector3f normal = MatrixUtil.multiply(transformation, mNormals[index], mNormals[index + 1], mNormals[index + 2]);
normals[ivn++] = normal.x;
normals[ivn++] = normal.y;
normals[ivn++] = normal.z;
}
for(int index = 0; index < mTexCoords.length; index++) {
texCoords[ivt++] = mTexCoords[index];
}
for(int index = 0; index < mIndices.length; index++) {
indices[i++] = indexLength + mIndices[index];
}
indexLength += indexLengths.get(im);
}
MeshData data = new MeshData();
data.setIndices(indices);
data.setNormals(normals);
data.setTextureCoordinates(texCoords);
data.setVertices(vertices);
return data;
}
In the end I actually have a single mesh and the multiplying of the transformation also works.... for rotation and scaling, but here come the problems.
The multiplying with the transformation does NOT work for the translation.
My method for multiplying a matrix with a vector looks like this:
public static final Vector3f multiply(Matrix4f matrix, float x, float y, float z) {
Vector3f result = new Vector3f();
result.x = x * matrix.m00 + y * matrix.m01 + z * matrix.m02;
result.y = x * matrix.m10 + y * matrix.m11 + z * matrix.m12;
result.z = x * matrix.m20 + y * matrix.m21 + z * matrix.m22;
return result;
}
And the second problem is that the textures of the second mesh are somewaht off.
Here is a picture:
As you can see the second mesh only has about 1/4 of the actual texture.
The code I used to generate this mesh looks like this:
Material grassMaterial = new Material();
grassMaterial.setMinBrightness(0.1F);
grassMaterial.setColorMap(new Texture(new XImgTextureReader().read(new FileInputStream("res/textures/grass2.ximg"))));
grassMaterial.setAffectedByLight(true);
grassMaterial.setTransparent(true);
grassMaterial.setUpwardsNormals(true);
grassMaterial.setFog(fog);
MeshData quad = Quad.generateMeshData(
new Vector3f(0.0F, 1F, 0.0F),
new Vector3f(0.0F, 0.0F, 0.0F),
new Vector3f(1F, 0.0F, 0.0F),
new Vector3f(1F, 1F, 0.0F)
);
StaticMesh grassMesh = new StaticMesh(MeshUtil.mergeLazy(Arrays.asList(quad, quad), Arrays.asList(
MatrixUtil.createTransformationMatrx(
new Vector3f(0.0F, 0.0F, 0.0F),
new Vector3f(0.0F, 0.0F, 0.0F),
new Vector3f(1.0F, 1.0F, 1.0F)
),
MatrixUtil.createTransformationMatrx(
new Vector3f(0F, 0.0F, -0F),
new Vector3f(0.0F, 90.0F, 0.0F),
new Vector3f(1.0F, 1.0F, 1.0F)
)
)));
grassMesh.setCullMode(StaticMesh.CULLING_DISABLED);
Entity grass = new Entity();
grass.setShaderPipeline(shaderPipeline);
grass.setMaterial(grassMaterial);
grass.setMesh(grassMesh);
grass.setTranslation(0, 0, 1);
My question now is: What did I do wrong? Why is the texture so weird and why does the multiplication with the transformation not work for the translation?
If you need more of the code, I have a GitHub Repo with the Eclipse Project here: https://github.com/RalleYTN/Heroica-Fabulis
Thanks to #Rabbid76 I came closer to my answer and now I have finally found the problem.
The first problem with the translation not working was fixed by multiplying the transformation vertically instead of horizontally. Thanks again #Rabidd76 .
And the reason why the textures where so weird is because I merged the indices incorrectly. I should not have taken the sum of all indices in the meshes before as offset but the sum of the vertices.
Here is now the working method:
public static final MeshData mergeLazy(List<MeshData> meshes, List<Matrix4f> transformations) {
ArrayList<Float> vertices = new ArrayList<>();
ArrayList<Float> texCoords = new ArrayList<>();
ArrayList<Float> normals = new ArrayList<>();
ArrayList<Integer> indices = new ArrayList<>();
int offset = 0;
int m = 0;
for(MeshData mesh : meshes) {
Matrix4f transformation = transformations.get(m);
float[] mVertices = mesh.getVertices();
float[] mNormals = mesh.getNormals();
for(int index = 0; index < mesh.getVertices().length; index += 3) {
Vector3f vertex = MatrixUtil.multiply(transformation, mVertices[index], mVertices[index + 1], mVertices[index + 2]);
vertices.add(vertex.x);
vertices.add(vertex.y);
vertices.add(vertex.z);
Vector3f normal = MatrixUtil.multiply(transformation, mNormals[index], mNormals[index + 1], mNormals[index + 2]);
normals.add(normal.x);
normals.add(normal.y);
normals.add(normal.z);
}
ListUtil.addFloatArray(texCoords, mesh.getTextureCoordinates());
int[] mIndices = mesh.getIndices();
for(int index : mIndices) {
indices.add(index + offset);
}
offset += mVertices.length / 3;
m++;
}
MeshData mesh = new MeshData();
mesh.setIndices(ListUtil.toPrimitiveIntArray(indices));
mesh.setNormals(ListUtil.toPrimitiveFloatArray(normals));
mesh.setTextureCoordinates(ListUtil.toPrimitiveFloatArray(texCoords));
mesh.setVertices(ListUtil.toPrimitiveFloatArray(vertices));
return mesh;
}

fill color inside triangle by three co-ordinates

I am using Point Cloud. I have 3D points with me .
Let's say : Point P(x,y,z), Point Q(x,y,z), Point R(x,y,z) assuming this points as Triangle PQR we proceed further .
Triangle Like this :
.
How can it be possible to fill the area inside this points plotted , so that the triangle will be filled with colour.
Like this :
.
My study that might have helped :
Convex hull
Java how to draw and fill a Polygon which has holes
Edit :
Some way to success :
public void make_polygon(float[] points_x,float[] points_y,float[] points_z)
{
Material mSphereMaterial_z = new Material();
//mSphereMaterial_z.setColor(Color.BLUE);
Bitmap p_z_bitty = getTriangleBitmap(BitmapFactory.decodeResource(mContext.getResources(),R.drawable.label_bg_sm),5,points_x,points_y,points_z);
Texture t = new Texture("text",p_z_bitty);
try {
mSphereMaterial_z.addTexture(t);
}
catch(Exception e)
{
e.printStackTrace();
}
Object3D p_z = new Plane();
p_z.setPosition(points_x[0],points_y[1],points_z[2]);
p_z.setMaterial(mSphereMaterial_z);
p_z.setDoubleSided(true);
getCurrentScene().addChild(p_z);
}
public static Bitmap getTriangleBitmap(Bitmap bitmap, int radius,float[] a,float[] b,float[] c) {
Bitmap finalBitmap;
if (bitmap.getWidth() != radius || bitmap.getHeight() != radius)
finalBitmap = Bitmap.createScaledBitmap(bitmap, radius, radius,
false);
else
finalBitmap = bitmap;
Bitmap output = Bitmap.createBitmap(finalBitmap.getWidth(),
finalBitmap.getHeight(), Bitmap.Config.ARGB_8888);
Canvas canvas = new Canvas(output);
Paint paint = new Paint();
final Rect rect = new Rect(0, 0, finalBitmap.getWidth(),
finalBitmap.getHeight());
// Point point1_draw = new Point(75, 0);
// Point point2_draw = new Point(0, 180);
// Point point3_draw = new Point(180, 180);
PointF point1_draw = new PointF(a[0],a[1]);
PointF point2_draw = new PointF(b[0], b[1]);
PointF point3_draw = new PointF(c[0],c[1] );
Path path = new Path();
path.moveTo(point1_draw.x, point1_draw.y);
path.lineTo(point2_draw.x, point2_draw.y);
path.lineTo(point3_draw.x, point3_draw.y);
path.lineTo(point1_draw.x, point1_draw.y);
path.close();
canvas.drawARGB(0, 0, 0, 0);
paint.setColor(Color.parseColor("#BAB399"));
canvas.drawPath(path, paint);
paint.setXfermode(new PorterDuffXfermode(PorterDuff.Mode.SRC_IN));
canvas.drawBitmap(finalBitmap, rect, rect, paint);
return output;
}
Now result is :
Can some one guide me or suggest some article about it for Android java ?
If any more information / code needed , it shall be provided.
You should be able to do this by simply passing the vertices from the triangle to Rajawali in order to draw the triangle as a primitive. That way you can avoid using Canvas or drawing a bitmap and simplify the rendering.
// p, q, and r are triangle vertices and are of the form {x, y, z}.
public void make_triangle(float[] p, float[] q, float[] r)
{
Object3D triangle = new Object3D();
// Copy three points into a flat array as our vertices.
float[] vertices = new float[3 * 3];
for (int i = 0; i < 3; i++) {
vertices[i] = p[i];
vertices[3 + i] = q[i];
vertices[6 + i] = r[i];
}
// Set up index buffer to point at our three vertices and thus draw one triangle.
int[] indices = new int[3];
for (int i = 0; i < 3; i++) {
indices[i] = i;
}
// Set up the rendering data.
triangle.setData(vertices, null, null, null, indices, true);
// Render the triangle double sided.
triangle.setDoubleSided(true);
// Use a blue material.
Material material = new Material();
material.setColor(Color.BLUE);
triangle.setMaterial(material);
// Add the triangle to the current scene.
getCurrentScene().addChild(triangle);
}

Corner detection not accurate

I'm trying to detect corners, but the coordinates I get are always off-center and saddle-points are detected Multiple times.
I tried cornerHarris, cornerMinEigenVal, preCornerDetect, goodFeaturesToTrack, and cornerEigenValsAndVecs, but they all seem to lead to the same result. I haven't tried findChessboardCorners because my corners are not laid out in a nice grid of n×m, are not all saddle-type, and many more reasons.
What I have now:
Given the (pre-processed) camera image below with some positive, negative, and saddle corners:
After cornerHarris(img, energy, 20, 9, 0.1) (I increased blockSize to 20 for illustrative purposes but small values don't work either) I get this image:
It seems to detect 10 corners but the way they are positioned is odd. I superimposed this image on the original to show my problem:
The point of highest matching energy is offset towards the inside of the corner and there is a plume pointing away from the corner. The saddle corners seem to generate four separate plumes all superimposed.
Indeed, when I perform a corner-search using this energy image, I get something like:
/
What am I doing wrong and how can I detect corners accurately like in this mock image?
[[edit]] MCVE:
public class CornerTest {
static {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
}
private static Mat energy = new Mat();
private static Mat idx = new Mat();
public static void main(String... args) {
Mat byteImage = Highgui.imread("KXw7O.png");
if (byteImage.channels() > 1)
Imgproc.cvtColor(byteImage, byteImage, Imgproc.COLOR_BGR2GRAY);
// Preprocess
Mat floatImage = new Mat();
byteImage.convertTo(floatImage, CvType.CV_32F);
// Corner detect
Mat imageToShow = findCorners(floatImage);
// Show in GUI
imageToShow.convertTo(byteImage, CvType.CV_8U);
BufferedImage bufImage = new BufferedImage(byteImage.width(), byteImage.height(), BufferedImage.TYPE_BYTE_GRAY);
byte[] imgArray = ((DataBufferByte)bufImage.getRaster().getDataBuffer()).getData();
byteImage.get(0, 0, imgArray);
JFrame frame = new JFrame();
frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
frame.getContentPane().add(new JLabel(new ImageIcon(bufImage)));
frame.pack();
frame.setVisible(true);
}
private static Mat findCorners(Mat image) {
Imgproc.cornerHarris(image, energy, 20, 9, 0.1);
// Corner-search:
int minDistance = 16;
Core.MinMaxLocResult minMaxLoc = Core.minMaxLoc(
energy.submat(20, energy.rows() - 20, 20, energy.rows() - 20));
float thr = (float)minMaxLoc.maxVal / 4;
Mat tmp = energy.reshape(1, 1);
Core.sortIdx(tmp, idx, 16); // 16 = CV_SORT_EVERY_ROW | CV_SORT_DESCENDING
int[] idxArray = new int[idx.cols()];
idx.get(0, 0, idxArray);
float[] energyArray = new float[idx.cols()];
energy.get(0, 0, energyArray);
int n = 0;
for (int p : idxArray) {
if (energyArray[p] == -1) continue;
if (energyArray[p] < thr) break;
n++;
int x = p % image.cols();
int y = p / image.cols();
// Exclude a disk around this corner from potential future candidates
int u0 = Math.max(x - minDistance, 0) - x;
int u1 = Math.min(x + minDistance, image.cols() - 1) - x;
int v0 = Math.max(y - minDistance, 0) - y;
int v1 = Math.min(y + minDistance, image.rows() - 1) - y;
for (int v = v0; v <= v1; v++)
for (int u = u0; u <= u1; u++)
if (u * u + v * v <= minDistance * minDistance)
energyArray[p + u + v * image.cols()] = -1;
// A corner is found!
Core.circle(image, new Point(x, y), minDistance / 2, new Scalar(255, 255, 255), 1);
Core.circle(energy, new Point(x, y), minDistance / 2, new Scalar(minMaxLoc.maxVal, minMaxLoc.maxVal, minMaxLoc.maxVal), 1);
}
System.out.println("nCorners: " + n);
// Rescale energy image for display purpose only
Core.multiply(energy, new Scalar(255.0 / minMaxLoc.maxVal), energy);
// return image;
return energy;
}
}

Categories