Related
Displaying different images with different views. For example, sharpen is one. I cannot get it to show Sharpen. I cannot find out how to complete sharpen. I used the professor's code as example.
There are others:
blur 3 x 3
blur 5 x 5
edge detection
grayscale
RGB > GRB
Zoom in
Zoom out
I tried adding 1 to the for loops instead of zero.
Here is code:
PImage source;
PImage destination;
int w = 80;
float[][] matrix_3_3_average = {
{1.0/9.0, 1.0/9.0, 1.0/9.0 },
{1.0/9.0, 1.0/9.0, 1.0/9.0 },
{1.0/9.0, 1.0/9.0, 1.0/9.0 }
};
float[][] matrix_3_3_sharpen =
{ { -1, -1, -1 },
{ -1, 9, -1 },
{ -1, -1, -1 } };
void setup() {
size(200, 200);
source = loadImage("sunflower.jpg");
destination = createImage(source.width, source.height, RGB);
}
void draw() {
destination.loadPixels(); // Tell Processing that we want to read the pixels of the output window
source.loadPixels();
int xStart = constrain(mouseX - w/2, 0, width);
int xEnd = constrain(mouseX + w/2, 0, width);
int yStart = constrain(mouseY - w/2, 0, height);
int yEnd = constrain(mouseY + w/2, 0, height);
if (keyPressed) {
if (key == '0') {
image(source, 0, 0);
for (int x = 1; x < source.width; x++) {
for (int y = 1; y < source.height; y++) {
int loc = x + y * source.width;
if ((x > xStart) && (x < xEnd) && (y > yStart) && (y < yEnd))
destination.pixels[loc] = convolution(x, y, matrix_3_3_average, 3, source);
else
// set the color of the corresponding pixel to the output window to the color of the pixel to the input image
destination.pixels[loc] = source.pixels[loc];
}
}
} else if (key == '1') {
for (int x = 1; x < source.width; x++) {
for (int y = 1; y < source.height; y++) {
int loc = x * y * source.width;
if ((x > xStart) && (x <xEnd) && (y > yStart) && (y < yEnd))
destination.pixels[loc] = convolution(x, y, matrix_3_3_sharpen, 3, source);
else
destination.pixels[loc] = source.pixels[loc];
}
}
}
}
}
color convolution(int x, int y, float[][] matrix, int matrixSize, PImage img) {
int offset = (matrixSize - 1)/2;
float r = 0;
float g = 0;
float b = 0;
for (int i = 0; i < matrixSize; i++) {
for (int j = 0; j < matrixSize; j++) {
int xLoc = x + i - offset;
int yLoc = y + j - offset;
int loc = xLoc * yLoc * img.width;
loc = constrain(loc, 0, img.pixels.length-1);
r = r + matrix[i][j] * red(img.pixels[loc]);
g = g + matrix[i][j] * green(img.pixels[loc]);
b = b + matrix[i][j] * blue(img.pixels[loc]);
}
}
return color(r, g, b);
}
There is an issue when the index of the pixel in the image plan is calculated in the function convolution. The index of an pixel is x + y * width rather than x * width:
int loc = xLoc * yLoc * img.width;</s>
int loc = xLoc + yLoc * img.width;
Copy the source image to the destination at startup and continuously draw the destination image:
void setup() {
size(200, 200);
source = loadImage("C:/temp/flower.jpg"); //source = loadImage("sunflower.jpg");
destination = createImage(source.width, source.height, RGB);
destination.copy(source, 0, 0, source.width, source.height, 0, 0, source.width, source.height);
}
void draw() {
// [...]
image(destination, 0, 0);
}
Use the keyPressed() event to identify if a key was pressed, which starts am image filter:
boolean average = key == '0';
boolean sharpen = key == '1';
void keyPressed() {
average = key == '0';
sharpen = key == '1';
}
When an image filter is performed, the copy the copy the source image to the destination image. Update the pixel in the filtered region. Finally copy the changed destination image to the source image, to be prepared for the next filter operation:
destination.copy(source, 0, 0, source.width, source.height, 0, 0, source.width, source.height);
for (int x = 0; x < source.width; x++) {
for (int y = 0; y < source.height; y++) {
int loc = x + y * source.width;
if (x > xStart && x < xEnd && y > yStart && y < yEnd) {
if ( average )
destination.pixels[loc] = convolution(x, y, matrix_3_3_average, 3, source);
else
destination.pixels[loc] = convolution(x, y, matrix_3_3_sharpen, 3, source);
}
}
}
source.copy(destination, 0, 0, source.width, source.height, 0, 0, source.width, source.height);
See the example, where I applied th suggestions to the code of your question:
PImage source;
PImage destination;
int w = 80;
float[][] matrix_3_3_average = {
{1.0/9.0, 1.0/9.0, 1.0/9.0 },
{1.0/9.0, 1.0/9.0, 1.0/9.0 },
{1.0/9.0, 1.0/9.0, 1.0/9.0 }
};
float[][] matrix_3_3_sharpen =
{ { -1, -1, -1 },
{ -1, 9, -1 },
{ -1, -1, -1 } };
void setup() {
size(200, 200);
source = loadImage("C:/temp/flower.jpg"); //source = loadImage("sunflower.jpg");
destination = createImage(source.width, source.height, RGB);
destination.copy(source, 0, 0, source.width, source.height, 0, 0, source.width, source.height);
}
boolean average = key == '0';
boolean sharpen = key == '1';
void keyPressed() {
average = key == '0';
sharpen = key == '1';
}
void draw() {
int xStart = constrain(mouseX - w/2, 0, width);
int xEnd = constrain(mouseX + w/2, 0, width);
int yStart = constrain(mouseY - w/2, 0, height);
int yEnd = constrain(mouseY + w/2, 0, height);
println(xStart, xEnd, yStart, yEnd);
if (average || sharpen) {
destination.copy(source, 0, 0, source.width, source.height, 0, 0, source.width, source.height);
for (int x = 0; x < source.width; x++) {
for (int y = 0; y < source.height; y++) {
int loc = x + y * source.width;
if (x > xStart && x < xEnd && y > yStart && y < yEnd) {
if ( average )
destination.pixels[loc] = convolution(x, y, matrix_3_3_average, 3, source);
else
destination.pixels[loc] = convolution(x, y, matrix_3_3_sharpen, 3, source);
}
}
}
source.copy(destination, 0, 0, source.width, source.height, 0, 0, source.width, source.height);
average = sharpen = false;
}
image(destination, 0, 0);
}
color convolution(int x, int y, float[][] matrix, int matrixSize, PImage img) {
int offset = (matrixSize - 1)/2;
float r = 0;
float g = 0;
float b = 0;
for (int i = 0; i < matrixSize; i++) {
for (int j = 0; j < matrixSize; j++) {
int xLoc = x + i - offset;
int yLoc = y + j - offset;
int loc = xLoc + yLoc * img.width;
loc = constrain(loc, 0, img.pixels.length-1);
r = r + matrix[i][j] * red(img.pixels[loc]);
g = g + matrix[i][j] * green(img.pixels[loc]);
b = b + matrix[i][j] * blue(img.pixels[loc]);
}
}
return color(r, g, b);
}
The image rotates with code below, but wrong, some black dots appears on original image. I believe it's something with rotation code. Any solution? Thanks. The image dimensions is 32x32 pixels loaded on center of screen (320x240).
public class RendPanel extends JPanel {
private static final long serialVersionUID = 1L;
int widthe = 320;
int heighte = 240;
double angle = Math.toRadians(220);
double sin = Math.sin(angle);
double cos = Math.cos(angle);
double x0 = 0.5 * (widthe - 1); // point to rotate about
double y0 = 0.5 * (heighte - 1); // center of image
public static BufferedImage fbuffer;
public RendPanel(int width, int height) {
fbuffer = new BufferedImage(320, 240, BufferedImage.TYPE_INT_RGB);
BufferedImage in = null;
try { in = ImageIO.read(new File("square.png")); } //32x32 square .png
catch (IOException e) { e.printStackTrace(); }
for (int i = 0; i < in.getWidth(); i++) {
for (int j = 0; j < in.getHeight(); j++) {
fbuffer.setRGB(i + (320 / 2) - 16, j + (240 / 2) - 16, in.getRGB(i, j));
}
}
BufferedImage neww = new BufferedImage(320, 240, BufferedImage.TYPE_INT_RGB);
for (int x = 0; x < widthe; x++) {
for (int y = 0; y < heighte; y++) {
if(x >= x0 - 32 && x <= x0 + 32 && y >= y0 - 32 && y <= y0 + 32){
double a = x - x0;
double b = y - y0;
int xx = (int) (+a * cos - b * sin + x0);
int yy = (int) (+a * sin + b * cos + y0);
// plot pixel (x, y) the same color as (xx, yy) if it's in bounds
if (xx >= 0 && xx < width && yy >= 0 && yy < height) {
neww.setRGB(xx, yy, fbuffer.getRGB(x, y));
}
}
}
}
fbuffer = neww;
repaint();
setPreferredSize(new Dimension(width, height));
}
protected void paintComponent(Graphics g) {
super.paintComponent(g);
g.drawImage(fbuffer, 0, 0, null);
}
}
A beginner's mistake (sorry).
Taking every source pixel in turn, transforming the coordinates to the destination and copying the pixel value is not the right way.because the regular input grid will not map to a regular grid, and there will be voids (and overlaps).
The correct way is to scan the destination image (so that every destination pixel is reached) and counter-transform the coordinates to fetch the pixel value from the source.
As a refinement, you can use the four neighboring pixel from where you land in the source and perform bilinear interpolation, to reduce aliasing.
Man, it's strange, because in this code it works properly!
Heres a working code:
public class RendPanel extends JPanel {
private static final long serialVersionUID = 1L;
int widthe = 320;
int heighte = 240;
int ang = 0;
double x0 = 0.5 * (widthe - 1); // point to rotate about
double y0 = 0.5 * (heighte - 1); // center of image
public static BufferedImage fbuffer;
public RendPanel(int width, int height) {
fbuffer = new BufferedImage(320, 240, BufferedImage.TYPE_INT_RGB);
BufferedImage in = null;
try { in = ImageIO.read(new File("square.png")); } //32x32 square .png
catch (IOException e) { e.printStackTrace(); }
for (int i = 0; i < in.getWidth(); i++) {
for (int j = 0; j < in.getHeight(); j++) {
fbuffer.setRGB(i + (320 / 2) - 16, j + (240 / 2) - 16, in.getRGB(i, j));
}
}
setPreferredSize(new Dimension(width, height));
}
BufferedImage neww;
public void r(){
neww = new BufferedImage(320, 240, BufferedImage.TYPE_INT_RGB);
double angle = Math.toRadians(ang);
double sin = Math.sin(angle);
double cos = Math.cos(angle);
for (int x = 0; x < widthe; x++) {
for (int y = 0; y < heighte; y++) {
if(x >= x0 - 32 && x <= x0 + 32 && y >= y0 - 32 && y <= y0 + 32){
double a = x - x0;
double b = y - y0;
int xx = (int) (+a * cos - b * sin + x0);
int yy = (int) (+a * sin + b * cos + y0);
// plot pixel (x, y) the same color as (xx, yy) if it's in bounds
if (xx >= 0 && xx < widthe && yy >= 0 && yy < heighte) {
neww.setRGB(x, y, fbuffer.getRGB(xx, yy));
}
}
}
}
ang++;
repaint();
}
protected void paintComponent(Graphics g) {
super.paintComponent(g);
g.drawImage(neww, 0, 0, null);
}
}
Thanks for:
https://introcs.cs.princeton.edu/java/31datatype/Rotation.java.html
EDIT:
You have to invert the vars on bf2.setRGB(x, y, fbuffer.getRGB(xx, yy)); to the rotated coordinate.
I have a code that turns a RGB bitmap into a bitmap of black and white colors, using this code:
public static Bitmap setDefaultValues(Bitmap bmp) {
Mat srcMat = new Mat();
org.opencv.android.Utils.bitmapToMat(bmp, srcMat, true);
final Bitmap bitmap = Bitmap.createBitmap(srcMat.clone().width(), srcMat.clone().height(), Bitmap.Config.ARGB_8888);
Imgproc.cvtColor(srcMat, srcMat, Imgproc.COLOR_BGR2GRAY, 0);
Mat srcMat1 = srcMat;
Imgproc.GaussianBlur(srcMat1, srcMat1, new Size(3, 3), 0);
//Mat srcMat1 = new Mat(srcMat.rows(), srcMat.cols(), CV_8UC1);
//int kernalsize = 3;
//Imgproc.bilateralFilter(srcMat, srcMat1, kernalsize, kernalsize * 2, kernalsize / 2);
srcMat1.convertTo(srcMat1, 0, 1.9, -120);
srcMat1.convertTo(srcMat1, CvType.CV_8U, 1.9, -120);
Imgproc.cvtColor(srcMat1, srcMat1, Imgproc.COLOR_GRAY2RGBA, 4);
org.opencv.android.Utils.matToBitmap(srcMat, bitmap, true);
return bitmap;
}
I have implement this code for convert RGB image into black and white.
this is return me as right, but my question is here i cant remove shadow from image.
also i have compare other application this is convert perfectly, i don't understand where i am wrong.
this is original Image :
this is my application output
this is other application output
So please help me how can i achieve my goal.
Please use following code for convert your color image to black and white.
public static Bitmap createContrast(Bitmap src, double value) {
// image size
int width = src.getWidth();
int height = src.getHeight();
// create output bitmap
Bitmap bmOut = Bitmap.createBitmap(width, height, src.getConfig());
// color information
int A, R, G, B;
int pixel;
// get contrast value
double contrast = Math.pow((100 + value) / 100, 2);
// scan through all pixels
for(int x = 0; x < width; ++x) {
for(int y = 0; y < height; ++y) {
// get pixel color
pixel = src.getPixel(x, y);
A = Color.alpha(pixel);
// apply filter contrast for every channel R, G, B
R = Color.red(pixel);
R = (int)(((((R / 255.0) - 0.5) * contrast) + 0.5) * 255.0);
if(R < 0) { R = 0; }
else if(R > 255) { R = 255; }
G = Color.red(pixel);
G = (int)(((((G / 255.0) - 0.5) * contrast) + 0.5) * 255.0);
if(G < 0) { G = 0; }
else if(G > 255) { G = 255; }
B = Color.red(pixel);
B = (int)(((((B / 255.0) - 0.5) * contrast) + 0.5) * 255.0);
if(B < 0) { B = 0; }
else if(B > 255) { B = 255; }
// set new pixel color to output bitmap
bmOut.setPixel(x, y, Color.argb(A, R, G, B));
}
}
return bmOut;}
Please try this if you will get the solution
public static Bitmap test(Bitmap src){
int width = src.getWidth();
int height = src.getHeight();
// create output bitmap
Bitmap bmOut = Bitmap.createBitmap(width, height, src.getConfig());
// color information
int A, R, G, B;
int pixel;
for (int x = 0; x < width; ++x) {
for (int y = 0; y < height; ++y) {
// get pixel color
pixel = src.getPixel(x, y);
A = Color.alpha(pixel);
R = Color.red(pixel);
G = Color.green(pixel);
B = Color.blue(pixel);
int gray = (int) (0.2989 * R + 0.5870 * G + 0.1140 * B);
// use 128 as threshold, above -> white, below -> black
if (gray > 128) {
gray = 255;
}
else{
gray = 0;
}
// set new pixel color to output bitmap
bmOut.setPixel(x, y, Color.argb(A, gray, gray, gray));
}
}
return bmOut;
}
Please see the answer on this thread. He has explained and provide a good result in output.
#Threshold image using opencv (Java)
I would like to take a picture in true black and white in my app. I searched for solutions (in this site too), but I always found solution to put a photo in gray scale (for example in this topic), but it's not what I am looking for ...
I also found a topic proposing this :
public static Bitmap createContrast(Bitmap src, double value) {
// image size
int width = src.getWidth();
int height = src.getHeight();
// create output bitmap
Bitmap bmOut = Bitmap.createBitmap(width, height, src.getConfig());
// color information
int A, R, G, B;
int pixel;
// get contrast value
double contrast = Math.pow((100 + value) / 100, 2);
// scan through all pixels
for (int x = 0; x < width; ++x) {
for (int y = 0; y < height; ++y) {
// get pixel color
pixel = src.getPixel(x, y);
A = Color.alpha(pixel);
// apply filter contrast for every channel R, G, B
R = Color.red(pixel);
R = (int) (((((R / 255.0) - 0.5) * contrast) + 0.5) * 255.0);
if (R < 0) {
R = 0;
} else if (R > 255) {
R = 255;
}
G = Color.red(pixel);
G = (int) (((((G / 255.0) - 0.5) * contrast) + 0.5) * 255.0);
if (G < 0) {
G = 0;
} else if (G > 255) {
G = 255;
}
B = Color.red(pixel);
B = (int) (((((B / 255.0) - 0.5) * contrast) + 0.5) * 255.0);
if (B < 0) {
B = 0;
} else if (B > 255) {
B = 255;
}
// set new pixel color to output bitmap
bmOut.setPixel(x, y, Color.argb(A, R, G, B));
}
}
return bmOut;
}
But the image quality is horrible ...
Is anyone having an idea please?
Thank you
If you like the image to be 1bit black/white you can use a simple (& slow) threshold algorithm
public static Bitmap createBlackAndWhite(Bitmap src) {
int width = src.getWidth();
int height = src.getHeight();
// create output bitmap
Bitmap bmOut = Bitmap.createBitmap(width, height, src.getConfig());
// color information
int A, R, G, B;
int pixel;
// scan through all pixels
for (int x = 0; x < width; ++x) {
for (int y = 0; y < height; ++y) {
// get pixel color
pixel = src.getPixel(x, y);
A = Color.alpha(pixel);
R = Color.red(pixel);
G = Color.green(pixel);
B = Color.blue(pixel);
int gray = (int) (0.2989 * R + 0.5870 * G + 0.1140 * B);
// use 128 as threshold, above -> white, below -> black
if (gray > 128)
gray = 255;
else
gray = 0;
// set new pixel color to output bitmap
bmOut.setPixel(x, y, Color.argb(A, gray, gray, gray));
}
}
return bmOut;
}
But depending on what that will not look good, for better results you need a dithering algorithm, see Algorithm overview - this one is the threshold method.
For 256 levels of gray conversion:
according to http://www.mathworks.de/help/toolbox/images/ref/rgb2gray.html you calculate the gray value of each pixel as gray = 0.2989 * R + 0.5870 * G + 0.1140 * B which would translate to
public static Bitmap createGrayscale(Bitmap src) {
int width = src.getWidth();
int height = src.getHeight();
// create output bitmap
Bitmap bmOut = Bitmap.createBitmap(width, height, src.getConfig());
// color information
int A, R, G, B;
int pixel;
// scan through all pixels
for (int x = 0; x < width; ++x) {
for (int y = 0; y < height; ++y) {
// get pixel color
pixel = src.getPixel(x, y);
A = Color.alpha(pixel);
R = Color.red(pixel);
G = Color.green(pixel);
B = Color.blue(pixel);
int gray = (int) (0.2989 * R + 0.5870 * G + 0.1140 * B);
// set new pixel color to output bitmap
bmOut.setPixel(x, y, Color.argb(A, gray, gray, gray));
}
}
return bmOut;
}
But that is pretty slow since you have to do that for millions of pixels separately.
https://stackoverflow.com/a/9377943/995891 has a much nicer way of achieving the same.
// code from that answer put into method from above
public static Bitmap createGrayscale(Bitmap src) {
int width = src.getWidth();
int height = src.getHeight();
Bitmap bmOut = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
Canvas canvas = new Canvas(bmOut);
ColorMatrix ma = new ColorMatrix();
ma.setSaturation(0);
Paint paint = new Paint();
paint.setColorFilter(new ColorMatrixColorFilter(ma));
canvas.drawBitmap(src, 0, 0, paint);
return bmOut;
}
G = Color.red(pixel);
G = Color.green(pixel);
B = Color.red(pixel);
B = Color.blue(pixel);
See if this changes (in bold) helps.
Can anyone let me know how can I read pixels from buffer in JOGL. Please illustrate with a code.
After rendering is done, call this method:
public BufferedImage toImage(GL gl, int w, int h) {
gl.glReadBuffer(GL.GL_FRONT); // or GL.GL_BACK
ByteBuffer glBB = ByteBuffer.allocate(3 * w * h);
gl.glReadPixels(0, 0, w, h, GL.GL_BGR, GL.GL_BYTE, glBB);
BufferedImage bi = new BufferedImage(w, h, BufferedImage.TYPE_INT_ARGB);
int[] bd = ((DataBufferInt) bi.getRaster().getDataBuffer()).getData();
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int b = 2 * glBB.get();
int g = 2 * glBB.get();
int r = 2 * glBB.get();
bd[(h - y - 1) * w + x] = (r << 16) | (g << 8) | b | 0xFF000000;
}
}
return bi;
}