Detecting Hough circles JAVA OpenCV - java

I am confused where the exception is.
This is the first time am coding detection program
public static void chargement(Mat img) {
NByte = (int) (img.total() * img.elemSize());
buffer = new byte[NByte];
img.get(0, 0, buffer);
}
public static Point[] detectCercle(Mat img, int rayon) {
Mat circles = new Mat();
Point[] circlesList = null;
Imgproc.HoughCircles(img, circles, Imgproc.CV_HOUGH_GRADIENT, 1, 60, 200, 20, 30, 0);
System.out.println("#rows " + circles.rows() + " #cols " + circles.cols());
double x = 0.0;
double y = 0.0;
int r = 0;
for (int i = 0; i < circles.rows(); i++) {
double[] data = circles.get(i, 0);
for (int j = 0; j < data.length; j++) {
x = data[0];
y = data[1];
r = (int) data[2];
}
circlesList[i] = new Point(x, y);
}
return circlesList;
}
public static void main(String[] args) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
Mat matimage1 = Imgcodecs.imread("src/imge1.jpg", Imgcodecs.CV_LOAD_IMAGE_COLOR);
Mat matimage2 = Imgcodecs.imread("src/image2.jpg", Imgcodecs.CV_LOAD_IMAGE_COLOR);
Mat matimage3 = Imgcodecs.imread("src/image3.jpg", Imgcodecs.CV_LOAD_IMAGE_COLOR);
detectCercle(matimage3,2);
}
Exception :
emphasized text
Image Test :

HoughCircles requires an 8-bit image, but the flag CV_LOAD_IMAGE_COLOR converts the image to a color image. Try CV_LOAD_IMAGE_GRAYSCALE to maintain the 8-bit image that you are reading.
See the docs: imread docs

Related

Sobel edge detection creates noise

I tried to implement the Sobel edge detection in java.
It kind of works but I get a lot of seemingly random noise...
I loaded the image as BufferedImages and converted those to greyscaleimages first (via an algorithm i found online). After that I calculate the edges in x and y direction.
This is my code:
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import javax.imageio.ImageIO;
public class Sobel {
static int [] sobel_x = {1, 0, -1,
2, 0, -2,
1, 0, -1};
static int [] sobel_y = {1, 2, 1,
0, 0, 0,
-1, -2, -1};
public static void main(String argc[]) throws IOException {
BufferedImage imgIn = ImageIO.read(new File("test.jpeg"));
BufferedImage imgGrey = greyscale(imgIn);
ImageIO.write(imgGrey, "PNG", new File("greyscale.jpg"));
BufferedImage edgesX = edgeDetection(imgGrey, sobel_x);
ImageIO.write(edgesX, "PNG", new File("edgesX.jpg"));
BufferedImage edgesY = edgeDetection(imgGrey, sobel_y);
ImageIO.write(edgesY, "PNG", new File("edgesY.jpg"));
BufferedImage sobel = sobel(edgesX,edgesY);
ImageIO.write(sobel, "PNG", new File("sobel.jpg"));
}
private static BufferedImage sobel (BufferedImage edgesX, BufferedImage edgesY){
BufferedImage result = new BufferedImage(edgesX.getWidth(), edgesX.getHeight(), BufferedImage.TYPE_BYTE_GRAY);
int height = result.getHeight();
int width = result.getWidth();
for(int x = 0; x < width ; x++){
for(int y = 0; y < height; y++){
int tmp = Math.abs(edgesX.getRGB(x, y) + Math.abs(edgesY.getRGB(x, y)));
result.setRGB(x, y, tmp);
}
}
return result;
}
private static BufferedImage edgeDetection(BufferedImage img, int[] kernel){
int height = img.getHeight();
int width = img.getWidth();
BufferedImage result = new BufferedImage(width -1, height -1, BufferedImage.TYPE_BYTE_GRAY);
for(int x = 1; x < width -1 ; x++){
for(int y = 1; y < height - 1; y++){
int [] tmp = {img.getRGB(x-1, y-1),img.getRGB(x, y-1),img.getRGB(x+1, y-1),img.getRGB(x-1, y),img.getRGB(x, y),img.getRGB(x+1, y),img.getRGB(x-1, y+1),img.getRGB(x, y+1),img.getRGB(x+1, y+1)};
int value = convolution (kernel, tmp);
result.setRGB(x,y, value);
}
}
return result;
}
private static int convolution (int [] kernel, int [] pixel){
int result = 0;
for (int i = 0; i < pixel.length; i++){
result += kernel[i] * pixel[i];
}
return result / 9;
}
private static BufferedImage greyscale(BufferedImage img){
//get image width and height
int width = img.getWidth();
int height = img.getHeight();
//convert to grayscale
for(int y = 0; y < height; y++){
for(int x = 0; x < width; x++){
int p = img.getRGB(x,y);
int a = (p>>24)&0xff;
int r = (p>>16)&0xff;
int g = (p>>8)&0xff;
int b = p&0xff;
//calculate average
int avg = (r+g+b)/3;
//replace RGB value with avg
p = (a<<24) | (avg<<16) | (avg<<8) | avg;
img.setRGB(x, y, p);
}
}
return img;
}
}
And this is an example of the noise I'm talking about:
An image of Lena :
I don't know why I get all this noise.
Any advice is appreciated.
You have to make the following changes:
in convolution take the absolute value
private static int convolution (int [] kernel, int [] pixel){
int result = 0;
for (int i = 0; i < pixel.length; i++){
result += kernel[i] * pixel[i];
}
return (int)(Math.abs(result) / 9);
}
in edgeDetection apply the value to all three channels
private static BufferedImage edgeDetection(BufferedImage img, int[] kernel){
int height = img.getHeight();
int width = img.getWidth();
BufferedImage result = new BufferedImage(width -1, height -1, BufferedImage.TYPE_INT_RGB);
for(int x = 1; x < width -1 ; x++){
for(int y = 1; y < height - 1; y++){
int [] tmp = {img.getRGB(x-1, y-1)&0xff,img.getRGB(x, y-1)&0xff,img.getRGB(x+1, y-1)&0xff,
img.getRGB(x-1, y)&0xff,img.getRGB(x, y)&0xff,img.getRGB(x+1, y)&0xff,img.getRGB(x-1, y+1)&0xff,
img.getRGB(x, y+1)&0xff,img.getRGB(x+1, y+1)&0xff};
int value = convolution (kernel, tmp);
result.setRGB(x,y, 0xff000000|(value<<16)|(value<<8)|value);
}
}
return result;
}
And finally declare the images as INT_RGB type
BufferedImage result = new BufferedImage(edgesX.getWidth(), edgesX.getHeight(), BufferedImage.TYPE_INT_RGB);
BufferedImage result = new BufferedImage(width -1, height -1, BufferedImage.TYPE_INT_RGB);

Edge detection using sobel operator

So I am trying to write a program that uses sobel operator to detect edges in an image. Below is my method.
/**
* Detects edges.
* #param url - filepath to the iamge.
*/
private void detect(String url) {
BufferedImage orgImage = readImage(url);
int width = orgImage.getWidth();
int height = orgImage.getHeight();
BufferedImage resImage = new BufferedImage(width, height, BufferedImage.TYPE_BYTE_BINARY);
WritableRaster inraster = orgImage.getRaster();
WritableRaster outraster = resImage.getRaster();
System.out.println("size: " + width + "X" + height);
// Loop through every pixel, ignores the edges as these will throw out of
//bounds.
for (int i = 1; i < width-2; i++) {
for (int j = 1; j < height-2; j++) {
// Compute filter result, loops over in a
// box pattern.
int sum = 0;
for (int x = -1; x <= 1; x++) {
for (int y = -1; y <= 1; y++) {
int sum1 = i+y;
int sum2 = j+x;
int p = inraster.getSample(sum1, sum2, 0);
sum = sum + p;
}
}
int q = (int) Math.round(sum / 9.0);
if(q<150){
q = 0;
}else{
q = 255;
}
outraster.setSample(i, j, 0, q);
}
}
writeImage(resImage, "jpg", "EdgeDetection " + url);
}
This mostly just gives me a black and white image:
Before
After
I am obviosly calculating the pixel value wrong somehow. I am also note sure what value to use when deciding if the pixel should be black or white.

Image to 2D Array then getImage from it after process

I am learn Image Processing Techniques and have some homework.
In my homework, which asked me to cover a RBG to gray image.
I've converted the images into 2D matrix, do somethings, and when i cover again from 2D matrix to image, some wrong happen.
This my code:
private static SampleModel samM;
public static int[][] imageToArrayPixel(File file) {
try {
BufferedImage img = ImageIO.read(file);
Raster raster = img.getData();
int w = raster.getWidth(), h = raster.getHeight();
int pixels[][] = new int[w][h];
for (int x = 0; x < w; x++) {
for (int y = 0; y < h; y++) {
pixels[x][y] = raster.getSample(x, y, 0);
System.out.print(" " + pixels[x][y]);
}
System.out.println("");
}
samM = raster.getSampleModel();
return pixels;
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
public static java.awt.Image getImage(int pixels[][]) {
int w = pixels.length;
int h = pixels[0].length;
WritableRaster raster = Raster.createWritableRaster(samM, new Point(0, 0));
for (int i = 0; i < w; i++) {
for (int j = 0; j < pixels[i].length; j++) {
if (pixels[i][j] > 128) {
raster.setSample(i, j, 1, 255);
} else {
raster.setSample(i, j, 1, 0);
}
}
}
BufferedImage image = new BufferedImage(w, h, BufferedImage.TYPE_BYTE_GRAY);
image.setData(raster);
File output = new File("check.jpg");
try {
ImageIO.write(image, "jpg", output);
} catch (Exception e) {
e.printStackTrace();
}
return image;
}
public static java.awt.Image getImageWithRBG(Pixel pixels[][]) {
int w = pixels.length;
int h = pixels[0].length;
WritableRaster raster = Raster.createWritableRaster(samM, new Point(0, 0));
int[] pixelValue = new int[3];
for (int i = 0; i < w; i++) {
for (int j = 0; j < h; j++) {
pixelValue[0] = pixels[i][j].red;
pixelValue[1] = pixels[i][j].blue;
pixelValue[2] = pixels[i][j].green;
raster.setPixel(j, i, pixelValue);
}
}
BufferedImage image = new BufferedImage(h, w, BufferedImage.TYPE_CUSTOM);
image.setData(raster);
File output = new File("check.jpg");
try {
ImageIO.write(image, "jpg", output);
} catch (Exception e) {
e.printStackTrace();
}
return image;
}
public static void main(String[] args) throws IOException {
int pixel[][] = imageToArrayPixel(new File("C:\\Users\\KimEricko\\Pictures\\1402373904964_500.jpg"));
getImage(pixel);
}
This my image which i use to covert:
before
and here is the photo that I received after restoration:
after
I don't understand why the picture after restoring contains only 1/3 of the original photograph.
What can I do to fix this?
looks to me like there is a bug in getImageWithRBG, that
raster.setPixel(j, i, pixelValue);
should be
raster.setPixel(i, j, pixelValue);
setPixel and setSample have similar inputs: x then y
I don't know if there are other problems, that is just the first thing I noticed.

Finding pixel position

public static void sample(BufferedImage image) {
int width = image.getWidth();
int height = image.getHeight();
int value[][] = new int[width][height];
int valueR[][] = new int[width][height];
int valueG[][] = new int[width][height];
int valueB[][] = new int[width][height];
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
int pixel = image.getRGB(i, j);
value[i][j] = pixel;
Color c = new Color(pixel);
valueR[i][j]= c.getRed();
valueG[i][j] = c.getGreen();
valueB[i][j] = c.getBlue();
System.out.println("Red value = "+valueR[i][j]);
System.out.println("Green value ="+valueG[i][j]);
System.out.println("Blue value"+valueB[i][j]);
}
}
}
The above code is to store RGB values and pixel color values of an image in an array separately.
public static BigInteger modPow(BigInteger a1, BigInteger e, BigInteger n) {
BigInteger r = 1;
for (int i = e.bitLength() - 1; i >= 0; i--) {
r = (r.multiply(r)).mod(n);
if (e.testBit(i)) {
r = (r.multiply(a1)).mod(n);
}
}
System.out.println("C value = " + r);
int lng = 3;
BigInteger bi = BigInteger.valueOf(lng);
BigInteger a = r.divide(bi);
BigInteger b = r.mod(bi);
System.out.println("pixel position a = " + a);
System.out.println("modulus value b = " + b);
return r;
}
In the above code am finding pixel position where i need to embed the secret bit.so i need to go to that specific pixel to embed the message.But in the previous code am storing pixel color in array value[][].i need to search through the array value[][] to get the pixel position which i got in last code.
Note: a1 is the position of current bit of information file to embed
{e,n} is public key
My question is how to find the pixel positions?
To find the position of a pixel is a simple concept with a complex execution. I've written some code here that takes a BufferedImage and searches through it for a pixel of a specific color.
import java.awt.Color;
import java.awt.image.BufferedImage;
import java.io.File;
import javax.imageio.ImageIO;
import java.io.IOException;
public class pixelSearch {
public static void main(String[] args) {
//I don't know where you're getting your image but i'll get one from file
File image = new File("image.bmp");
try {
BufferedImage imageToSearch = ImageIO.read(image);
Color colorToFind = new Color(255,255,255); //define color to search for with RGB vals 255,255,255
//for more information on constructing colors look here: http://docs.oracle.com/javase/7/docs/api/java/awt/Color.html
int[] pixelCoordinates = pSearch( colorToFind, imageToSearch ); //search for the pixel
System.out.println("Found pixel at (" + pixelCoordinates[0] + "," + pixelCoordinates[1] + ")."); //display coordinates
} catch (IOException e) {
System.out.println(e.toString());
}
}
private static int[] pSearch ( Color c, BufferedImage pic ){
int cVal = c.getRGB(); //get integer value of color we are trying to find
int x1 = 0;
int y1 = 0;
int x2 = pic.getWidth();
int y2 = pic.getHeight();
int[] XArray = new int[x2-x1+1]; //create an array to hold all X coordinates in image
int iterator = 0;
while (iterator <= x2) {
XArray[iterator] = x1 + iterator;
iterator++;
}
int [] YArray = new int[y2-y1+1]; //create an array to hold all Y coordinates in image
iterator = 0;
while (iterator <= y2) {
YArray[iterator] = y1 + iterator;
iterator++;
}
//next we iterate throug all the possible coordinates to check each pixel
for (int yVal : YArray) {
for (int xVal : XArray) {
int color = pic.getRGB(xVal, yVal); //get the color of pixel at coords (xVal, yVal)
if (color == cVal) { //if the color is equal to the one we inputted to the function
int[] cPos = {xVal, yVal}; //store the coordinates
return cPos; //return the coordinates
}
}
}
int[] returnVal = {-1,-1}; //if we didn't find it return -1, -1
return returnVal;
}
}

How to flip an image horizontally

HiI was wondering how to flip and image horizontally, for a practce task I was given a code that reads an image, inverting it to an image indicating it's brightness from 0-5, I had to flip an image.
This is my code of my reading an image and drawing it
public int[][] readImage(String url) throws IOException
{
// fetch the image
BufferedImage img = ImageIO.read(new URL(url));
// create the array to match the dimensions of the image
int width = img.getWidth();
int height = img.getHeight();
int[][] imageArray = new int[width][height];
// convert the pixels of the image into brightness values
for (int x = 0; x < width; x++)
{
for (int y = 0; y < height; y++)
{
// get the pixel at (x,y)
int rgb = img.getRGB(x,y);
Color c = new Color(rgb);
int red = c.getRed();
int green = c.getGreen();
int blue = c.getBlue();
// convert to greyscale
float[] hsb = Color.RGBtoHSB(red, green, blue, null);
int brightness = (int)Math.round(hsb[2] * (PIXEL_CHARS.length - 1));
imageArray[x][y] = brightness;
}
}
return imageArray;
}
public void draw() throws IOException
{
int[][] array = readImage("http://sfpl.org/images/graphics/chicklets/google-small.png");
for(int i=0; i<array.length; i++)
{
for(int pic=0; pic<array[i].length; pic++)
{
if(array[pic][i] == 0)
{
System.out.print("X");
}
else if(array[pic][i] == 1)
{
System.out.print("8");
}
else if(array[pic][i] == 2)
{
System.out.print("0");
}
else if(array[pic][i] == 3)
{
System.out.print(":");
}
else if(array[pic][i] == 4)
{
System.out.print(".");
}
else if (array[pic][i] == 5)
{
System.out.print(" ");
}
else
{
System.out.print("error");
break;
}
}
System.out.println();
}
}
and this is the code I tried to create to horizontally flip it,
void mirrorUpDown()
{
int[][] array = readImage("http://sfpl.org/images/graphics/chicklets/google-small.png");
int i = 0;
for (int x = 0; x < array.length; x++)
{
for (int y = 0; y < array[i].length; y++)
{{
int temp = array[x][y];
array[x][y]= array[-x][y];
array[array[i].length-x][y]=temp;
}
}
}
}
I get an error
unreported exception java.io.IException;
must be caught or declared to be thrown
I'd actually do it by this way...
BufferedImage flip(BufferedImage sprite){
BufferedImage img = new BufferedImage(sprite.getWidth(),sprite.getHeight(),BufferedImage.TYPE_INT_ARGB);
for(int xx = sprite.getWidth()-1;xx>0;xx--){
for(int yy = 0;yy < sprite.getHeight();yy++){
img.setRGB(sprite.getWidth()-xx, yy, sprite.getRGB(xx, yy));
}
}
return img;
}
Just a loop whose x starts at the end of the first image and places its rgba value on the flipped position of the second image. Clean, easy code :)
The function mirrorUpDown() , add a throws IOException there.
Also the function from which you are calling these methods, does that handle exception, does that code enclosed in a try catch block or the function is also set to throw IOException (one of either should be there)
How is your image supposed to know it should get it's data from imageArray ?
instead, you should access the raster of your image and modify the data in it.
void flip(BufferedImage image) {
WritableRaster raster = image.getRaster();
int h = raster.getHeight();
int w = raster.getWidth();
int x0 = raster.getMinX();
int y0 = raster.getMinY();
for (int x = x0; x < x0 + w; x++){
for (int y = y0; y < y0 + h / 2; y++){
int[] pix1 = new int[3];
pix1 = raster.getPixel(x, y, pix1);
int[] pix2 = new int[3];
pix2 = raster.getPixel(x, y0 + h - 1 - (y - y0), pix2);
raster.setPixel(x, y, pix2);
raster.setPixel(x, y0 + h - 1 - (y - y0), pix1);
}
}
return;
}
Sorry about posting this here over a year later but it should aid someone at a stage
try{
java.awt.image.BufferedImage bi = javax.imageio.ImageIO.read(getClass().getResource("Your image bro.jpg")) ;
int[] h = bi.getRGB(0, 0, bi.getWidth(), bi.getHeight(), null, 0, bi.getWidth());
int [] h1 = new int[h.length];
System.out.println(""+h.length);
for(int j = 0;500>j;j++){
for(int i = 500;i>0;i--){
h1[j*500+(500-i)] = h[(j*500)+(i-1)];
}
}
bi.setRGB(0, 0, bi.getWidth(), bi.getHeight(), h1, 0, bi.getWidth());
}
catch(Exception e){e.printStackTrace();}
Lets break the code down
java.awt.image.BufferedImage bi =javax.imageio.ImageIO.read(getClass().getResource("Your image bro.jpg"));
Tries to read the image and stores the read image into the BufferedImage variable bi
int[] h = bi.getRGB(0, 0, bi.getWidth(), bi.getHeight(), null, 0, bi.getWidth());
int [] h1 = new int[h.length];
instantiate two arrays, h is the original RGB Array and h1 will be the horizontally flipped RGB array.
for(int j = 0;500>j;j++){
for(int i = 500;i>0;i--){
h1[j*500+(500-i)] = h[(j*500)+(i-1)];
}
}
Lets look at something in particular more closely
h1[j*500+(500-i)] = h[(j*500)+(i-1)];
Images are scanned from position 0;0 to x.length;y.length
but it is scanned in a coninual array. Thus we use a psuedo-array to manipulate the flipping of the image. j*500 references the Y values and (500-i) references the x values.
bi.setRGB(0, 0, bi.getWidth(), bi.getHeight(), h1, 0, bi.getWidth());
Finally, the image gets stored back into the BufferedImage variable.
Note that the 500 constant is referencing your x resolution of the image. For example, 1920 x 1080 sized image uses a max value of 1920. The logic is yours to decide.

Categories