JavaCV Perspective Correction - java

I converted the perspective correction code implemented using OpenCV and C++ at:
https://opencv-code.com/tutorials/automatic-perspective-correction-for-quadrilateral-objects/
to obtain the following OpenCV code implemented in Java:
public class project
{
static Point2f center;
public static void main(String args[])
{
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
center = new Point2f(0,0);
Mat src = new Mat();
src = Highgui.imread("image.jpg");
if(src == null)
{
System.out.println("Image not loaded");
System.exit(1);
}
Mat bw = new Mat();
Imgproc.cvtColor(src, bw, Imgproc.COLOR_BGR2GRAY);
Imgproc.blur(bw, bw, new Size(3,3));
Imgproc.Canny(bw, bw, 100, 100, 3,true);
Mat lines = new Mat();
int threshold = 70;
int minLineSize = 30;
int lineGap = 10;
Imgproc.HoughLinesP(bw, lines, 1, Math.PI / 180, threshold,
minLineSize, lineGap);
for (int x = 0; x < lines.cols(); x++)
{
double[] vec = lines.get(0, x);
double[] val = new double[4];
val[0] = 0;
val[1] = ((float) vec[1] - vec[3]) / (vec[0] - vec[2]) * -vec[0] + vec[1];
val[2] = src.cols();
val[3] = ((float) vec[1] - vec[3]) / (vec[0] - vec[2]) * (src.cols() - vec[2]) + vec[3];
lines.put(0, x, val);
}
List<Point2f> corners = new ArrayList<Point2f>();
for (int i = 0; i < lines.cols(); i++)
{
for (int j = i+1; j < lines.cols(); j++)
{
Mat m1 = null,m2 = null;
double[] d1 = lines.get(0,i);
double[] d2 = lines.get(0, j);
m1.put(0, i, d1);
m2.put(0, j, d2);
Point2f pt = computeIntersect(m1, m2);
if (pt.x >= 0 && pt.y >= 0)
corners.add(pt);
}
}
List<Point2f> approx = new ArrayList<Point2f>();
List<Point2f> curve;
MatOfPoint2f mat2f = new MatOfPoint2f();
for(int k=0;k<corners.size();++k)
{
Point2f rec = corners.get(k);
Point p = new Point(rec.x,rec.y);
mat2f.fromArray(p);
}
MatOfPoint2f mat2frec = new MatOfPoint2f();
Imgproc.approxPolyDP(mat2f, mat2frec, Imgproc.arcLength(mat2f, true) * 0.02,true);
if (approx.size() != 4)
{
System.out.println("The object is not quadrilateral!");
}
// Get mass center
for (int i = 0; i < corners.size(); i++)
{
center.x = center.x + corners.get(i).x;
center.y = center.y + corners.get(i).y;
}
center.x *= (1. / corners.size());
center.y *= (1. / corners.size());
sortCorners(corners, center);
Mat dst = src.clone();
// Draw lines
for (int i = 0; i < lines.cols(); i++)
{
double[] v = lines.get(0, i);
Scalar cc = new Scalar(0,255,0,0);
Core.line(dst, new Point(v[0], v[1]), new Point(v[2], v[3]), cc);
}
Scalar c1 = new Scalar(0,0,255,0);
Scalar c2 = new Scalar(0,255,0,0);
Scalar c3 = new Scalar(255,0,0,0);
Scalar c4 = new Scalar(255,255,255,0);
// Draw corner points
Core.circle(dst, new Point(corners.get(0).x,corners.get(0).y), 3, c1, 2);
Core.circle(dst, new Point(corners.get(1).x,corners.get(1).y), 3, c2, 2);
Core.circle(dst, new Point(corners.get(2).x,corners.get(2).y), 3, c3, 2);
Core.circle(dst, new Point(corners.get(3).x,corners.get(3).y), 3, c4, 2);
Scalar c5 = new Scalar(0,255,255,0);
// Draw mass center
Core.circle(dst, new Point(center.x,center.y), 3, c5, 2);
Mat quad = Mat.zeros(300, 220, CvType.CV_8UC3);
List<Point2f> quad_pts = new ArrayList<Point2f>();
quad_pts.add(new Point2f(0, 0));
quad_pts.add(new Point2f(quad.cols(), 0));
quad_pts.add(new Point2f(quad.cols(), quad.rows()));
quad_pts.add(new Point2f(0, quad.rows()));
Mat transmtx = Imgproc.getPerspectiveTransform((Mat) corners, (Mat) quad_pts);
Imgproc.warpPerspective(src, quad, transmtx, quad.size());
MatOfByte matOfByte = new MatOfByte();
Highgui.imencode(".jpg", dst, matOfByte);
byte[] byteArray = matOfByte.toArray();
BufferedImage bufImage = null;
try
{
InputStream in = new ByteArrayInputStream(byteArray);
bufImage = ImageIO.read(in);
File outputfile = new File("Image.jpg");
ImageIO.write(bufImage, "jpg", outputfile);
}
catch (Exception e) {
e.printStackTrace();
}
MatOfByte matOfByte2 = new MatOfByte();
Highgui.imencode(".jpg", dst, matOfByte2);
byte[] byteArray2 = matOfByte2.toArray();
BufferedImage bufImage2 = null;
try
{
InputStream in = new ByteArrayInputStream(byteArray2);
bufImage2 = ImageIO.read(in);
File outputfile2 = new File("Quadrilateral.jpg");
ImageIO.write(bufImage, "jpg", outputfile2);
}
catch (Exception e) {
e.printStackTrace();
}
}
static Point2f computeIntersect(Mat es, Mat es2)
{
int size = (int) es.total() * es.channels();
float[] buff = new float[size];
es.get(0, 0, buff);
int size1 = (int) es.total() * es.channels();
float[] buff1 = new float[size1];
es.get(0, 0, buff1);
float x1=buff[0], y1 = buff[1], x2 = buff[2], y2 = buff[3];
float x3 = buff1[0], y3 = buff1[1], x4 = buff1[2], y4 = buff1[3];
float denom;
float d;
d = (Float) null;
d = (float)((x1 - x2) * (y3 - y4)) - ((y1 - y2) * (x3 - x4));
if (d != (Float) null)
{
Point2f pt = new Point2f();
pt.x = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / d;
pt.y = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / d;
return pt;
}
else
return new Point2f(-1, -1);
}
static void sortCorners(List<Point2f> corners,Point2f center)
{
List<Point2f> top = null, bot = null;
for (int i = 0; i < corners.size(); i++)
{
if (corners.get(i).y < center.y)
top.add(corners.get(i));
else
bot.add(corners.get(i));
}
Point2f tl = top.get(0).x > top.get(1).x ? top.get(1) : top.get(0);
Point2f tr = top.get(0).x > top.get(1).x ? top.get(0) : top.get(1);
Point2f bl = bot.get(0).x > bot.get(1).x ? bot.get(1) : bot.get(0);
Point2f br = bot.get(0).x > bot.get(1).x ? bot.get(0) : bot.get(1);
corners.clear();
corners.add(tl);
corners.add(tr);
corners.add(br);
corners.add(bl);
}
}
I'm having trouble converting List< Point2f > to MatOfPoint2f. The arcLength(..) function is therefore not working and the code doesn't seem to work. I'm hoping someone can help.

This a part of the implementation that i used in my project.I already had the exact corner points using an algo i developed but the rest is given in this code.Do not use point2fs. Use point arrays and them convert them into matofpoint2fs.
the jarfile containing Imshow can be downloaded from here.It is very effective in testing your o/p at any point of time. Add this package to your program: https://github.com/master-atul/ImShow-Java-OpenCV
Details regarding approxpolydp:
http://docs.opencv.org/java/org/opencv/imgproc/Imgproc.html#approxPolyDP%28org.opencv.core.MatOfPoint2f,org.opencv.core.MatOfPoint2f,double,boolean%29
And u don't have to use arclength. Just give an approx value for epsilon depending on the clarity of your input.(like 2.0 or 3.0..)
(sort is the function used to sort the corners).
int a[][],imgarr[][];
Point p[];
BufferedImage img;
int w,h;
void sort()
{
int x = (a[0][0] + a[1][0] + a[2][0] + a[3][0])/4;
int y = (a[0][1] + a[1][1] + a[2][1] + a[3][1])/4;
int j = 0;
int order[] = new int[4];
double tans[] = new double[4];
double tans1[] = new double[4];
int tmpar[][] = new int[4][2];
p = new Point[4];
for(int i = 0;i<4;i++)
{
tans1[i] = tans[i] = Math.atan2(a[i][1] - y , a[i][0] - x);//finding angles for sorting corners
}
Arrays.sort(tans1);
for(int i = 0;i<2;i++)
{
double temp = tans1[i];
tans1[i]= tans1[3-i];
tans1[3-i] = temp;
}
for(int i=0;i<4;i++)
{
for(j = 0;j<4;j++)
{
if(tans1[i]==tans[j])
break;
}
order[i] = j;
}
for(int i = 0;i<4;i++)
{
for(j=0;j<2;j++)
{
tmpar[i][j] = a[i][j];
}
}
for(int i = 0;i<4;i++)
{
for(j = 0;j<2;j++)
{
a[i][j] = tmpar[order[i]][j];
}
}
p[0] = new Point(a[0][1],a[0][0]);
p[1] = new Point(a[1][1],a[1][0]);
p[2] = new Point(a[2][1],a[2][0]);
p[3] = new Point(a[3][1],a[3][0]);
}
void transform() throws Exception
{
Point farray[] = new Point[4];
try
{
img = ImageIO.read(new File("C:/Users/Documents/a.jpg"));
}
catch(Exception r)
{
System.out.println("no file");
}
PixelGrabber pg;
if(img==null)
{
return;
}
w = img.getWidth();
h = img.getHeight();
imgarr = new int[h][w];
try
{
for(int i = 0; i < h ; i++)
{
pg = new PixelGrabber(img,0,i,w,1,imgarr[i],0,w);
pg.grabPixels();
}
changeto256();
}
catch(Exception e)
{
System.out.println("here "+e);
}
int m=0;
byte array[] = new byte[w*h];
int iar[] = new int[w*h];
for(int i = 0 ; i < h ; i++)
{
for(int j = 0 ; j < w ; j++)
{
array[m++]= (byte)imgarr[i][j];
}
}
farray[3] = new Point(0,0);
farray[0] = new Point(w,0);
farray[1] = new Point(w,h);
farray[2] = new Point(0,h);
Mat mat = new Mat(h,w, CvType.CV_8U);
mat.put(0, 0, array);
Imshow is = new Imshow("try");
MatOfPoint2f quad = new MatOfPoint2f(p);
MatOfPoint2f rect = new MatOfPoint2f(farray);
Mat transmtx = Imgproc.getPerspectiveTransform(quad,rect);
Mat output = new Mat(w,h,CvType.CV_8U);
Imgproc.warpPerspective(mat, output, transmtx, new size(w,h),Imgproc.INTER_CUBIC);
is.showImage(output);
MatOfByte matOfByte = new MatOfByte();
Highgui.imencode(".jpg", output, matOfByte);
byte[] byteArray = matOfByte.toArray();
File f = new File("retrieve1.jpg");
BufferedImage img1 =null;
InputStream in = new ByteArrayInputStream(byteArray);
img1 = ImageIO.read(in);
WritableRaster raster = (WritableRaster)img1.getData();
raster.setDataElements(0,0,byteArray);
img1.setData(raster);
try
{
ImageIO.write(img1,"jpg",f);
}
catch(Exception e)
{}
}

Related

BufferedImage Kernel - Image Processing

I would like to create an image filter and have read the following Wikipedia article. I wanted to test the example from Wikipedia and get an incorrect result.
https://en.wikipedia.org/wiki/Kernel_(image_processing)
(For some reason I cannot upload images)
Result:
https://imgur.com/FiYFuZS
Expected result:
https://upload.wikimedia.org/wikipedia/commons/2/20/Vd-Rige1.png
I've also read the following source and still do not know how to fix it :/
Bluring a Java buffered image
URL url = new URL("https://upload.wikimedia.org/wikipedia/commons/5/50/Vd-Orig.png");
BufferedImage image = ImageIO.read(url);
float[][] kernel = {
{0, -1, 0},
{-1, 4, -1},
{0, -1, 0}
};
int w = image.getWidth();
int h = image.getHeight();
// Center point
int cx = kernel.length / 2;
int cy = kernel[0].length / 2;
BufferedImage cImage = new BufferedImage(w, h, image.getType());
for (int x = 0; x < w; x++) {
for (int y = 0; y < h; y++) {
float r = 0;
float g = 0;
float b = 0;
for (int dx = -cx; dx <= cx; dx++) {
for (int dy = -cy; dy <= cy; dy++) {
float e = kernel[dx + cx][dy + cy];
int xImage = x + dx;
int yImage = y + dy;
if (xImage < 0 || xImage >= w || yImage < 0 || yImage >= h) {
continue;
}
Color pixel = new Color(image.getRGB(xImage, yImage));
r += pixel.getRed() * e;
g += pixel.getGreen() * e;
b += pixel.getBlue() * e;
}
}
// Boundaries
r = Math.min(255, Math.max(0, r));
g = Math.min(255, Math.max(0, g));
b = Math.min(255, Math.max(0, b));
Color newPixel = new Color((int) r, (int) g, (int) b);
cImage.setRGB(x, y, newPixel.getRGB());
}
}
ImageIO.write(cImage, "png", Files.newOutputStream(Path.of("c.png")));

drawImage error; no method found for drawImage

/**
* The purpose of this program is to make an image and turn it into a kaleidoscope
*
* #author (Danny Meijo)
* #version (07/27/2017)
*/
import java.awt.*;
import java.awt.geom.AffineTransform;
import java.awt.geom.Ellipse2D;
public class KaleidoscopeImage
{
private Picture canvas = null;
private Picture canvas2 = null;
private Picture pictureObj = null;
private Picture scaledPicture = null;
private Picture clippedPicture = null;
private Graphics g = null;
private Graphics g2 = null;
private Graphics gV2 = null;
private Graphics g2V2 = null;
private Graphics gV3 = null;
private Graphics g2V3 = null;
KaleidoscopeImage(Picture Canvas, Picture image, Picture Canvas2)
{
canvas = Canvas;
canvas2 = Canvas2;
pictureObj = image;
g = canvas.getGraphics();
g2 = (Graphics2D)g;
}
public Picture firstPanel()
{
g2.drawImage(pictureObj.getImage(), 0, canvas.getHeight() / 2, null);
Pixel bottomLeftPixel = null;
Pixel topRightPixel = null;
Color sourceColor1 = null;
for(int ty = 0, by = canvas.getHeight(); ty < canvas.getHeight() / 2; ty++, by--)
{
for(int lx = 0, rx = canvas.getWidth(); lx < canvas.getWidth() / 2; lx++, rx--)
{
bottomLeftPixel = canvas.getPixel(lx, by - 1);
sourceColor1 = bottomLeftPixel.getColor();
topRightPixel = canvas.getPixel(rx - 1, ty);
topRightPixel.setColor(sourceColor1);
}
}
Pixel sourcePixel = null;
Pixel targetPixel = null;
Color sourceColor2 = null;
Color targetColor = null;
for(int y = 0; y < canvas.getHeight() / 2; y++)
{
for(int lx = 0, rx = canvas.getWidth(); lx < canvas.getWidth() / 2; lx++, rx--)
{
sourcePixel = canvas.getPixel(rx - 1,y);
sourceColor2 = sourcePixel.getColor();
targetPixel = canvas2.getPixel(lx,y);
targetPixel.setColor(sourceColor2);
}
}
return canvas2;
}
public Picture secondPanel()
{
Pixel leftPixel = null;
Pixel rightPixel = null;
Color sourceColor = null;
for(int y = 0; y < canvas2.getHeight() / 2; y++)
{
for(int lx = 0, rx = canvas2.getWidth(); lx < canvas2.getWidth() / 2; lx++, rx--)
{
leftPixel = canvas2.getPixel(lx,y);
sourceColor = leftPixel.getColor();
rightPixel = canvas2.getPixel(rx - 1, y);
rightPixel.setColor(sourceColor);
}
}
return canvas2;
}
public Picture thirdPanel()
{
Pixel topPixel = null;
Pixel bottomPixel = null;
Color sourceColor = null;
for(int lx = 0, rx = canvas2.getWidth(); lx < canvas2.getWidth() / 2; lx++, rx--)
{
for(int ty = 0, by = canvas2.getHeight(); ty < canvas2.getHeight() / 2; ty++, by--)
{
topPixel = canvas2.getPixel(rx - 1, ty);
sourceColor = topPixel.getColor();
bottomPixel = canvas2.getPixel(rx - 1, by - 1);
bottomPixel.setColor(sourceColor);
}
}
return canvas2;
}
public Picture fourthPanel()
{
Pixel leftPixel = null;
Pixel rightPixel = null;
Color sourceColor = null;
for(int lx = 0, rx = canvas2.getWidth(); lx < canvas2.getWidth() / 2; lx++, rx--)
{
for(int ty = 0, by = canvas2.getHeight(); ty < canvas2.getHeight() / 2; ty++, by--)
{
leftPixel = canvas2.getPixel(rx - 1, by - 1);
sourceColor = leftPixel.getColor();
rightPixel = canvas2.getPixel(lx, by - 1);
rightPixel.setColor(sourceColor);
}
}
return canvas2;
}
public Picture scalePicture(double xFactor, double yFactor)
{
AffineTransform scaleTransform = new AffineTransform();
scaleTransform.scale(xFactor, yFactor);
scaledPicture = new Picture((int)(canvas2.getWidth() * xFactor), (int)(canvas2.getHeight() * yFactor));
gV2 = scaledPicture.getGraphics();
g2V2 = (Graphics2D)gV2;
g2V2.drawImage(canvas2.getImage(), scaleTransform, null);
return scaledPicture;
}
public Picture clipPicture(Color color)
{
Picture canvas3 = new Picture(canvas2.getWidth(), canvas2.getHeight());
Pixel sourcePixel = null;
Pixel targetPixel = null;
Color sourceColor = null;
Color targetColor = null;
for(int y = 0; y < canvas2.getHeight(); y++)
{
for(int x = 0; x < canvas.getWidth(); x++)
{
sourcePixel = canvas2.getPixel(x,y);
sourceColor = sourcePixel.getColor();
targetPixel = canvas3.getPixel(x,y);
targetPixel.setColor(sourceColor);
}
}
gV3 = canvas3.getGraphics();
g2V3 = (Graphics2D)gV3;
canvas3.setAllPixelsToAColor(color);
Ellipse2D.Double clip = new Ellipse2D.Double(0,0, canvas3.getHeight(), canvas3.getWidth());
g2V3.setClip(clip);
g2V3.drawImage(canvas2.getImage(), 0, 0, canvas3.getHeight(), canvas3.getWidth(), null);
return canvas3;
}
}
Sorry, this is my first post, and I am also very new to java, since I'm learning it over the summer. I was not sure how to cut it to just the parts that I need, but the problem I'm having is in the scalePicture method. I was copying what I saw in a demo program to scale the image down to 0.75x0.75. But, in my program, there is an error with the drawImage method, where as the demo progam had no error.
If you are curious this is the demo that I was copying:
import java.awt.geom.AffineTransform;
import java.awt.Graphics;
import java.awt.Graphics2D;
class ScalingDemo
{
private Picture originalPicture = null;
private Picture newPicture = null;
private Graphics g = null;
private Graphics2D g2 = null;
ScalingDemo(Picture pic)
{
originalPicture = pic;
}
public Picture scalePicture(double xFactor, double yFactor)
{
AffineTransform scaleTransform = new AffineTransform();
scaleTransform.scale(xFactor, yFactor);
newPicture = new Picture((int)(originalPicture.getWidth()*xFactor), (int)(originalPicture.getHeight()*yFactor));
g = newPicture.getGraphics();
g2 = (Graphics2D)g;
g2.drawImage(originalPicture.getImage(), scaleTransform, null);
return newPicture;
}
}
Looks like the error is at line:
g2V2.drawImage(canvas2.getImage(), scaleTransform, null); - there's no such method in java.awt.Graphics interface.
You should use method with another signature:
drawImage(Image img, int x, int y, ImageObserver observer) - see here

How can I set the value of c when generating a Mandelbrot fractal?

The equation for generating a Mandelbrot fractal is Zn+1 = Zn^2+C. The issue is that, in a computer program, C is used for zoom/resolution and location on screen. My question is, how can I make it so that I can get a fractal like this:
Wolfram
(equation f(z) = sin(z/c), z0 = c )
My code (from Rosetta Code):
public class MandelbrotSet extends JFrame {
private static final long serialVersionUID = 5513426498262284949L;
private final int MAX_ITER = 570;
private final double ZOOM = 150;
private BufferedImage image;
private double zx, zy, cX, cY, tmp;
public MandelbrotSet() {
super("Mandelbrot Set");
setBounds(100, 100, 800, 600);
setResizable(false);
setDefaultCloseOperation(EXIT_ON_CLOSE);
image = new BufferedImage(getWidth(), getHeight(), BufferedImage.TYPE_INT_RGB);
for (int y = 0; y < getHeight(); y++) {
for (int x = 0; x < getWidth(); x++) {
zx = zy = 0;
cX = (x - 400) / ZOOM;
cY = (y - 300) / ZOOM;
int iter = MAX_ITER;
while (zx * zx + zy * zy < 4 && iter > 0) {
tmp = zx * zx - zy * zy + cX;
zy = 2.0 * zx * zy + cY;
zx = tmp;
iter--;
}
image.setRGB(x, y, iter | (iter << 8));
}
}
}
#Override
public void paint(Graphics g) {
g.drawImage(image, 0, 0, this);
}
public static void main(String[] args) {
new MandelbrotSet().setVisible(true);;
}
}
By trigonometric theorems
sin(A+i*B)=sin(A)*cos(i*B)+ cos(A)*sin(i*B)
=sin(A)*cosh(B )+i*cos(A)*sinh(B )
and for the quotient using z=x+i*y and c=a+i*b
(x+i*y)/(a+i*b)=(x+i*y)*(a-i*b)/(a*a+b*b)
so that for the sine expression above
A = (a*x+b*y)/(a*a+b*b)
B = (a*y-b*x)/(a*a+b*b)
In javascript a small script to generate this fractal can look like this:
function cosh(x) { return 0.5*(Math.exp(x)+Math.exp(-x)); }
function sinh(x) { return 0.5*(Math.exp(x)-Math.exp(-x)); }
function rgb(r,g,b) { return "rgb("+r+","+g+","+b+")"; }
var colors = new Array(24);
for(var k=0; k<8; k++) {
colors[ k] = rgb(k*64,(7-k)*64,(7-k)*64);
colors[ 8+k] = rgb((7-k)*64,k*64,(7-k)*64);
colors[16+k] = rgb((7-k)*64,(7-k)*64,k*64);
}
var cvs = document.getElementById('sine-fractal');
var ctx = cvs.getContext('2d');
var cx = 0.0, cy = 0.0;
var dx = 1.0;
var tiles = 100;
var scale = Math.min(cvs.width, cvs.height) / tiles;
ctx.scale(scale, scale);
function localx(i) { return cx-dx + 2*i*dx/tiles; }
function localy(j) { return cy-dx + 2*j*dx/tiles; }
for (var i = 0; i < tiles; i++) {
var a = localx(i);
for (var j = 0; j < tiles; j++) {
var b = localy(j);
var r2 = a*a + b*b;
var x = a, y = b;
var rounds = 0;
var max = 500;
while (x * x + y * y < 4 && rounds < max) {
var u = (a*x + b*y) / r2, v = (a*y - b*x) / r2;
x = Math.sin(u) * cosh(v);
y = Math.cos(u) * sinh(v);
rounds++;
}
ctx.fillStyle = colors[rounds % 24];
ctx.fillRect(i, j, 1, 1);
}
}
<canvas id='sine-fractal' width=200 height=200></canvas>

Detecting Hough circles JAVA OpenCV

I am confused where the exception is.
This is the first time am coding detection program
public static void chargement(Mat img) {
NByte = (int) (img.total() * img.elemSize());
buffer = new byte[NByte];
img.get(0, 0, buffer);
}
public static Point[] detectCercle(Mat img, int rayon) {
Mat circles = new Mat();
Point[] circlesList = null;
Imgproc.HoughCircles(img, circles, Imgproc.CV_HOUGH_GRADIENT, 1, 60, 200, 20, 30, 0);
System.out.println("#rows " + circles.rows() + " #cols " + circles.cols());
double x = 0.0;
double y = 0.0;
int r = 0;
for (int i = 0; i < circles.rows(); i++) {
double[] data = circles.get(i, 0);
for (int j = 0; j < data.length; j++) {
x = data[0];
y = data[1];
r = (int) data[2];
}
circlesList[i] = new Point(x, y);
}
return circlesList;
}
public static void main(String[] args) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
Mat matimage1 = Imgcodecs.imread("src/imge1.jpg", Imgcodecs.CV_LOAD_IMAGE_COLOR);
Mat matimage2 = Imgcodecs.imread("src/image2.jpg", Imgcodecs.CV_LOAD_IMAGE_COLOR);
Mat matimage3 = Imgcodecs.imread("src/image3.jpg", Imgcodecs.CV_LOAD_IMAGE_COLOR);
detectCercle(matimage3,2);
}
Exception :
emphasized text
Image Test :
HoughCircles requires an 8-bit image, but the flag CV_LOAD_IMAGE_COLOR converts the image to a color image. Try CV_LOAD_IMAGE_GRAYSCALE to maintain the 8-bit image that you are reading.
See the docs: imread docs

Texture Mapping is reversed! Java3D

I could use some extra eyes to help me see why my texture map is reversed on my object. It prints backwards -- as if seeing it in a mirror. I had suspected that maybe reversing the indices would help, but that just changed where it was mapped without reversing the reversal.
public class MobiusBanner extends Applet {
public static void main(String[] args) {
new MainFrame(new MobiusBanner(), 800, 600);
}
#Override
public void init() {
GraphicsConfiguration gc = SimpleUniverse.getPreferredConfiguration();
Canvas3D canvas = new Canvas3D(gc);
this.setLayout(new BorderLayout());
this.add(canvas, BorderLayout.CENTER);
SimpleUniverse su = new SimpleUniverse(canvas);
su.getViewingPlatform().setNominalViewingTransform();
BranchGroup bg = createSceneGraph();
bg.compile();
su.addBranchGraph(bg);
}
private BranchGroup createSceneGraph() {
BranchGroup root = new BranchGroup();
Shape3D shape = new Shape3D();
shape.setGeometry(mobius().getIndexedGeometryArray());
//Scaling transform
Transform3D tr = new Transform3D();
tr.setScale(0.5);
//Spin transform group
TransformGroup spin = new TransformGroup();
spin.setCapability(TransformGroup.ALLOW_TRANSFORM_WRITE);
spin.setCapability(TransformGroup.ALLOW_TRANSFORM_READ);
root.addChild(spin);
//Set appearance
Appearance ap = createTextureAppearance();
ap.setPolygonAttributes(new PolygonAttributes(PolygonAttributes.POLYGON_FILL,
PolygonAttributes.CULL_BACK, 0));
//Set materials
Material mat = new Material();
mat.setLightingEnable(true);
mat.setShininess(30);
ap.setMaterial(mat);
//Overarching Transform group
TransformGroup tg = new TransformGroup(tr);
tg.addChild(shape);
spin.addChild(tg);
shape.setAppearance(ap);
//Set rotation
MouseRotate rotator = new MouseRotate(spin);
BoundingSphere bounds = new BoundingSphere();
rotator.setSchedulingBounds(bounds);
spin.addChild(rotator);
//Set translation
MouseTranslate translator = new MouseTranslate(spin);
translator.setSchedulingBounds(bounds);
spin.addChild(translator);
//Set zoom
MouseZoom zoom = new MouseZoom(spin);
zoom.setSchedulingBounds(bounds);
spin.addChild(zoom);
//Set background
Background background = new Background(1.0f, 1.0f, 1.0f);
background.setApplicationBounds(bounds);
root.addChild(background);
f//Set lighting
AmbientLight light = new AmbientLight(true, new Color3f(Color.BLACK));
light.setInfluencingBounds(bounds);
root.addChild(light);
PointLight ptlight = new PointLight(new Color3f(Color.white),
new Point3f(0.5f, 0.5f, 1f),
new Point3f(1f, 0.2f, 0f));
ptlight.setInfluencingBounds(bounds);
root.addChild(ptlight);
return root;
}//Close branchgroup method
//Create the Mobius shape
private GeometryInfo mobius() {
int m = 100; //number of row points
int n = 100; //number of col points
int p = 4 * ((m - 1) * (n - 1)); //faces * points per face
IndexedQuadArray iqa = new IndexedQuadArray(m * n,
GeometryArray.COORDINATES | GeometryArray.TEXTURE_COORDINATE_2, p);
Point3d[] vertices = new Point3d[m * n];
int index = 0;
//Create vertices
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
double u = i * (4 * (Math.PI)) / (m - 1);
double v = -0.3 + (j * (0.6 / (n - 1)));
double x = (1 + v * Math.cos(u / 2)) * Math.cos(u);
double y = (1 + v * Math.cos(u / 2)) * Math.sin(u);
double z = v * Math.sin(u / 2);
vertices[index] = new Point3d(x, y, z);
index++;
}//close nested for loop
}//close for loop
index = 0;
//set texture coordinates
TexCoord2f[] tex = new TexCoord2f[m * n];
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
tex[index] = new TexCoord2f();
tex[index] = new TexCoord2f(i * 1f / m, j * 1f / n);
index++;
}
}
iqa.setCoordinates(0, vertices);
iqa.setTextureCoordinates(0, 0, tex);
index = 0;
//set index for coordinates
int[] texIndices = new int[p];
for (int i = 0; i < m - 1; i++) {
for (int j = 0; j < n - 1; j++) {
iqa.setCoordinateIndex(index, i * m + j);
texIndices[index] = i * m + j;
index++;
iqa.setCoordinateIndex(index, i * m + j + 1);
texIndices[index] = i * m + j + 1;
index++;
iqa.setCoordinateIndex(index, (i + 1) * m + j + 1);
texIndices[index] = (i + 1) * m + j + 1;
index++;
iqa.setCoordinateIndex(index, (i + 1) * m + j);
texIndices[index] = (i + 1) * m + j;
index++;
}//close nested for loop
}//close for loop
iqa.setTextureCoordinateIndices(0, 0, texIndices);
//create geometry info and generate normals for shape
GeometryInfo gi = new GeometryInfo(iqa);
NormalGenerator ng = new NormalGenerator();
ng.generateNormals(gi);
return gi;
}
Appearance createTextureAppearance() {
Appearance ap = new Appearance();
BufferedImage bi = new BufferedImage(1024, 128,
BufferedImage.TYPE_INT_ARGB);
Graphics2D g2 = (Graphics2D) bi.getGraphics();
g2.setColor(Color.white);
g2.fillRect(0, 0, 1024, 128);
g2.setFont(new Font("Serif", Font.BOLD, 36));
g2.setColor(new Color(200, 0, 0));
g2.drawString("Mobius Strip", 0, 100);
ImageComponent2D image = new ImageComponent2D(ImageComponent2D.FORMAT_RGBA, bi);
Texture2D texture = new Texture2D(Texture.BASE_LEVEL, Texture.RGBA,
image.getWidth(), image.getHeight());
texture.setImage(0, image);
texture.setMagFilter(Texture.BASE_LEVEL_LINEAR);
ap.setTexture(texture);
//Combine Texture and Lighting
TextureAttributes textatt = new TextureAttributes();
textatt.setTextureMode(TextureAttributes.COMBINE);
ap.setTextureAttributes(textatt);
ap.setMaterial(new Material());
return ap;
}
}
You want to reverse only one of the texture coordinates, not both. Try changing
tex[index] = new TexCoord2f(i * 1f / m, j * 1f / n);
to
tex[index] = new TexCoord2f((m-1-i) * 1f / m, j * 1f / n);
or
tex[index] = new TexCoord2f(i * 1f / m, (n-1-j) * 1f / n);

Categories