I'm trying to create an array of bufferedimages that contain cropped images of a bigger image but I keep getting this error:
Exception in thread "main" java.awt.image.RasterFormatException: (y + height) is outside of Raster
at sun.awt.image.ByteInterleavedRaster.createWritableChild(ByteInterleavedRaster.java:1233)
at java.awt.image.BufferedImage.getSubimage(BufferedImage.java:1156)
at com.search.visual.ImageChunks.createImageArrays(ImageChunks.java:95)
at com.search.visual.ImageChunks.calculateChunks(ImageChunks.java:122)
at com.search.visual.ImageChunks.<init>(ImageChunks.java:59)
at com.search.visual.Driver.main(Driver.java:10)
This is the code I'm using:
private BufferedImage[][] createImageArrays()
{
BufferedImage[][] bitmapsArray = new BufferedImage[3][3];
//bitmap bmap = bitmapfactory.decoderesource(getresources(), image);
//bufferedimage bmapscaled = bufferedimage.createscaledbitmap(bmp, 240, 240, true);
//Image imageMap = scaledImage;
bitmapsArray[0][0] = bmp.getSubimage(0,0, 80, 80);
bitmapsArray[0][1] = bmp.getSubimage(80,0, 80, 80);
bitmapsArray[0][2] = bmp.getSubimage( 160, 0, 80, 80);
bitmapsArray[1][0] = bmp.getSubimage( 0, 80, 80, 80);
bitmapsArray[1][1] = bmp.getSubimage( 80, 80, 80, 80);
bitmapsArray[1][2] = bmp.getSubimage( 160, 80, 80, 80);
bitmapsArray[2][0] = bmp.getSubimage( 0, 160, 80, 80);
bitmapsArray[2][1] = bmp.getSubimage( 80, 160, 80, 80);
bitmapsArray[2][2] = bmp.getSubimage( 160, 160, 80, 80);
return bitmapsArray;
}
/*
* 1. split the complete image/bitmaps into several smaller images/bitmaps
* 2. calculate the average RGB for each of these smaller images/bitmaps
* 3. fill up the RGB[][] rgbList
*
*/
private void calculateChunks()
{
// 1. split the complete image/bitmaps into several smaller images/bitmaps
// this is a real con, as I've only implemented it as 3x3 of of 80x80
//BufferedImage[][] bmpList = createImageArrays();
// 2. calculate the average RGB for each of these smaller images/bitmaps
BufferedImage[][] bmpList = createImageArrays();
System.out.print(bmpList);
RGB rgb_temp = null;
BufferedImage bmp_temp = null;
int rgb = 0;
int red = 0;
int green = 0;
int blue = 0;
int running_total_red = 0;
int running_total_blue = 0;
int running_total_green = 0;
RGB temp_rgb = null;
for(int I=0; I < 3; I++)
for(int j=0; j < 3; j++)
{
bmp_temp = bmpList[I][j];
red = 0;
green = 0;
blue = 0;
for(int k=0; k < 80; k++)
for(int l=0; l < 80; l++)
{
temp_rgb = getPixelRGBvalues( bmp_temp.getRGB(k, l) );
/* rgb = bmp_temp.getPixel(k, l); //gets the values of the pixel at point k and l
red += (rgb & 0x00ff0000) >> 16; //stores only the red values of rgb to red
green += (rgb & 0x0000ff00) >> 8; //stores only the green values of rgb to green
blue += rgb & 0x000000ff; //stores only the blue values of rgb to blue
*/
running_total_red += temp_rgb.getR();
running_total_blue += temp_rgb.getB();
running_total_green += temp_rgb.getG();
}
// normalise these values
red = running_total_red/ (80*80);
green = running_total_green/ (80*80);
blue = running_total_blue/ (80*80);
// 3. fill up the RGB[][] rgbList
rgbList[I][j] = new RGB(red, green, blue);
}
}
Why does this keep happening?
Your source image is smaller then 240*240 pixels. The subimages you are trying to acquire are out of the original image.
Related
I am writing a program which need to detect red circle-alikes from this picture.
I have tried canny edge detection and find contours but none of them find this red "circles". I also tried to convert this to hsv and detect this by color but I couldn't determine good range for this color, maybe background color confuses it?
I put here a piece of my code with my final attempt..
Mat image = new Mat();
image = Imgcodecs.imread("image.jpg");
Mat hsvImage = new Mat();
Mat grayscaleImage = new Mat();
Mat binaryImage = new Mat();
Imgproc.blur(image, image, new Size(1, 1));
Imgproc.cvtColor(image, hsvImage, Imgproc.COLOR_BGR2HSV);
Imgproc.cvtColor(image, grayscaleImage, Imgproc.COLOR_BGR2GRAY);
Imgproc.equalizeHist(grayscaleImage, grayscaleImage);
Imgproc.Canny(grayscaleImage, grayscaleImage, 50, 150, 3,false);
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(grayscaleImage.clone(), contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
for (int id=0;id<contours.size();id++){
MatOfPoint2f mop2f = new MatOfPoint2f();
contours.get(id).convertTo(mop2f,CvType.CV_32F);
RotatedRect rectangle = Imgproc.minAreaRect(mop2f);
if (rectangle.boundingRect().width>80)
Imgproc.drawContours(image,contours,id,new Scalar(0,255,0));
}
If you want to process that marked image, you really might want to detect colors. Typically this is done in HSV color-space.
Here is some C++ code to detect "red" color. The result isn't good enough to use findContours yet, but maybe after some dilation. Maybe you can convert the code to Java.
If you want to detect different color, change the line redMask = thresholdHue(hsv, 0, 20, 50, 50); to mask = thresholdHue(hsv, yourWantedHueColorValue, 20, 50, 50);`
// for example to shift a circluar hue-channel
cv::Mat shiftChannel(cv::Mat H, int shift, int maxVal = 180)
{
// CV_8UC1 only!
cv::Mat shiftedH = H.clone();
//int shift = 25; // in openCV hue values go from 0 to 180 (so have to be doubled to get to 0 .. 360) because of byte range from 0 to 255
for (int j = 0; j < shiftedH.rows; ++j)
for (int i = 0; i < shiftedH.cols; ++i)
{
shiftedH.at<unsigned char>(j, i) = (shiftedH.at<unsigned char>(j, i) + shift) % maxVal;
}
return shiftedH;
}
cv::Mat thresholdHue(cv::Mat hsvImage, int hueVal, int range = 30, int minSat = 50, int minValue = 50)
{
// hsvImage must be CV_8UC3 HSV image.
// hue val and range are in openCV's hue range (0 .. 180)
// range shouldnt be bigger than 90, because that's max (all colors), after shifting the hue channel.
// this function will
// 1. shift the hue channel, so that even colors near the border (red color!) will be detectable with same code.
// 2. threshold the hue channel around the value 90 +/- range
cv::Mat mask; // return-value
std::vector<cv::Mat> channels;
cv::split(hsvImage, channels);
int targetHueVal = 180 / 2; // we'll shift the hue-space so that the target val will always be 90 afterwards, no matter which hue value was chosen. This can be important if
int shift = targetHueVal - hueVal;
if (shift < 0) shift += 180;
cv::Mat shiftedHue = shiftChannel(channels[0], shift, 180);
// merge the channels back to hsv image
std::vector<cv::Mat> newChannels;
newChannels.push_back(shiftedHue);
newChannels.push_back(channels[1]);
newChannels.push_back(channels[2]);
cv::Mat shiftedHSV;
cv::merge(newChannels, shiftedHSV);
// threshold
cv::inRange(shiftedHSV, cv::Vec3b(targetHueVal - range, minSat, minValue), cv::Vec3b(targetHueVal + range, 255, 255), mask);
return mask;
}
int main(int argc, char* argv[])
{
cv::Mat input = cv::imread("C:/StackOverflow/Input/redCircleLikeContours.jpg");
cv::Mat redMask;
cv::Mat hsv;
cv::cvtColor(input, hsv, CV_BGR2HSV);
redMask = thresholdHue(hsv, 0, 20, 50, 50);
cv::imshow("red", redMask);
cv::imshow("input", input);
cv::imwrite("C:/StackOverflow/Output/redCircleLikeContoursMask.png", redMask);
cv::waitKey(0);
return 0;
}
Here's the result:
Here is my code if somebody would want to look :)
public static void main (String args[]){
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
Mat image = new Mat();
image = Imgcodecs.imread("imageorg.jpg");
if ( image == null) System.out.println("Image is fine");
else System.out.println("Wrong path to image");
Mat hsvImage = new Mat();
Imgproc.blur(image, image, new Size(3,3));
Imgproc.cvtColor(image, hsvImage, Imgproc.COLOR_BGR2HSV);
Mat redMask = new Mat();
redMask = thresholdHue(hsvImage,0,20,50,50);
Mat kernel = new Mat();
kernel = Imgproc.getStructuringElement(Imgproc.MORPH_DILATE, new Size(2,2));
Mat dilateMat = new Mat();
Imgproc.dilate(redMask, dilateMat, kernel);
Imgcodecs.imwrite("redCircleLikeContours.png", redMask);
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(dilateMat.clone(), contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
List<MatOfPoint> removedContoursList = new ArrayList<MatOfPoint>();
for (int id=0;id<contours.size();id++){
MatOfPoint2f mop2f = new MatOfPoint2f();
contours.get(id).convertTo(mop2f,CvType.CV_32F);
RotatedRect rectangle = Imgproc.minAreaRect(mop2f);
if (rectangle.boundingRect().height<10){
removedContoursList.add(contours.get(id));
System.out.println("removing: "+rectangle.boundingRect());
contours.remove(id);
id--;
}
}
}
public static Mat thresholdHue(Mat hsvImage, int hueVal, int range, int minSat, int minValue)
{
Mat mask = new Mat();
List<Mat> channels = new ArrayList<Mat>();
Core.split(hsvImage, channels);
int targetHueVal = 180 / 2;
int shift = targetHueVal - hueVal;
if (shift < 0) shift += 180;
Mat shiftedHue = shiftChannel(channels.get(0), shift, 180);
List<Mat> newChannels = new ArrayList<Mat>();
newChannels.add(shiftedHue);
newChannels.add(channels.get(1));
newChannels.add(channels.get(2));
Mat shiftedHSV = new Mat();
Core.merge(newChannels, shiftedHSV);
Core.inRange(shiftedHSV, new Scalar(targetHueVal - range, minSat, minValue), new Scalar(targetHueVal + range, 255, 255), mask);
return mask;
}
private static Mat shiftChannel(Mat H, int shift, int maxVal)
{
Mat shiftedH = H.clone();
for (int j = 0; j < shiftedH.rows(); ++j)
for (int i = 0; i < shiftedH.cols(); ++i)
{
shiftedH.put(j, i,(shiftedH.get(j,i)[0] + shift) % maxVal);
}
return shiftedH;
}
I am working with OpenCV 3.0 for Android. I have an image in which i want to detect angle of hands inside circular dials. for that i am working on HoughLinesP to detect hands.
Here is the code.
Mat imgSource = new Mat(), imgCirclesOut = new Mat(),imgLinesOut=new Mat();
//grey opencv
Imgproc.cvtColor(Image, imgSource, Imgproc.COLOR_BGR2GRAY);
Imgproc.GaussianBlur( imgSource, imgSource, new Size(9, 9), 2, 2 );
int threshold = 0;
int minLineSize = 0;
int lineGap = 0;
Imgproc.HoughLinesP(imgSource, imgLinesOut, 1, Math.PI/180, threshold, minLineSize, lineGap);
for( int j = 0; i < imgLinesOut.cols(); i++ )
{
double[] vec=imgLinesOut.get(0,j);
Point pt1, pt2;
pt1=new Point(vec[0],vec[1]);
pt2=new Point(vec[2],vec[3]);
Imgproc.line( Image, pt1, pt2, new Scalar(0,0,255), 3, Core.LINE_AA,0);
}
But result is
What i need is the angle of hands in these circles. Any help regarding this issue is highly appreciated. Thanks in ADvance
Edit
I have updated my code with this
Mat imgSource = new Mat(), imgCirclesOut = new Mat(),imgLinesOut=new Mat();
Imgproc.GaussianBlur( Image, imgSource, new Size(5, 5), 2, 2 );
int threshold = 20;
int minLineSize = 0;
int lineGap = 10;
Imgproc.Canny(imgSource, imgSource, 70, 100);
Imgproc.HoughLinesP(imgSource, imgLinesOut, 1, Math.PI/180, threshold, minLineSize, lineGap);
for( int j = 0; j < imgLinesOut.cols(); j++ )
{
double[] vec=imgLinesOut.get(0,j);
Point pt1, pt2;
pt1=new Point(vec[0],vec[1]);
pt2=new Point(vec[2],vec[3]);
Imgproc.line( imgSource, pt1, pt2, new Scalar(0,0,255), 3, Core.LINE_AA,0);
}
as suggested by #Micka, there is no need of Graying image(I removed cvtcolor). I also decreased value of GuassianBlur Size to 5. I have added Canny on image too for edges.
Resulting blur image is
Detecting lines can be a problem in such small images, since you have to few points to fill the Hough accumulator properly.
I propose to use a different approach:
Segment each circle (dial)
Extract the largest dark blob (hand)
Below is a simple implementation of this idea. The code is in C++, but you can easily port to Java, or at least use as a reference.
#include "opencv2/opencv.hpp"
using namespace cv;
int main(int, char**)
{
Mat1b img = imread("path_to_image", IMREAD_GRAYSCALE);
Mat3b res;
cvtColor(img, res, COLOR_GRAY2BGR);
// Find dials
vector<Vec3f> circles;
HoughCircles(img, circles, CV_HOUGH_GRADIENT, 1, img.cols/10, 400, 40);
// For each dial
for (int i = 0; i < circles.size(); ++i)
{
// Segment the dial
Mat1b dial(img.size(), uchar(255));
Mat1b mask(img.size(), uchar(0));
circle(mask, Point(circles[i][0], circles[i][1]), circles[i][2], Scalar(255), CV_FILLED);
img.copyTo(dial, mask);
// Apply threshold and open
Mat1b bin;
threshold(dial, bin, 127, 255, THRESH_BINARY_INV);
Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(5,5));
morphologyEx(bin, bin, MORPH_OPEN, kernel);
// Get min area rect
vector<Point> points;
findNonZero(bin, points);
RotatedRect r = minAreaRect(points);
// Draw min area rect
Point2f pts[4];
r.points(pts);
for (int j = 0; j < 4; ++j) {
line(res, pts[j], pts[(j + 1) % 4], Scalar(0, 255, 0), 1);
}
}
imshow("Result", res);
waitKey();
return 0;
}
Starting from this image:
I find hands here:
for( int j = 0; j < imgLinesOut.size(); j++ )
This will give the size of the vector.To iterate through that vector
I'm trying to implement Floyd Steinberg algorithm in Java, working with java.awt.image.BufferedImage.
I've used the algorithm described here
with a custom palette, and I was expecting to get more or less the same image as in the wikipedia example (or as generated by Gimp for example), but I get a very different version.
You can see what I get
I'm obviously missing something (output image has color which doesn't belong to my palette), but I can't figure out what.
What I'm doing wrong ?
Here's the code :
import javax.imageio.ImageIO;
import java.awt.*;
import java.awt.image.BufferedImage;
import java.awt.image.IndexColorModel;
import java.io.File;
import java.io.IOException;
public class FloydSteinbergTest {
private static final Color[] PALETTE = new Color[]{
new Color(221, 221, 221),
new Color(19, 125, 62),
new Color(179, 80, 188),
new Color(107, 138, 201),
new Color(177, 166, 39),
new Color(65, 174, 56),
new Color(208, 132, 153),
new Color(64, 64, 64),
new Color(154, 161, 161),
new Color(46, 110, 137),
new Color(126, 61, 181),
new Color(46, 56, 141),
new Color(79, 50, 31),
new Color(53, 70, 27),
new Color(150, 52, 48),
new Color(25, 22, 22)};
public static void main(String[] args) {
String lImgFile = "/tmp/test.jpg";
try {
// Load image
BufferedImage lImage = ImageIO.read(new File(lImgFile));
BufferedImage lOutImage = applyDitheredPalette(lImage, PALETTE);
ImageIO.write(lOutImage, "png", new File("/tmp/out.png"));
} catch (IOException lEx) {
System.out.println(lEx.getMessage());
}
}
/**
* #param pPalette Color palette to apply.
* #param pImage Image to apply palette on.
* #return {#link java.awt.image.BufferedImage} corresponding to pPalette applied on pImage using naive Floyd-Steinberg implementation
*/
public static BufferedImage applyDitheredPalette(BufferedImage pImage, Color[] pPalette) {
int lWidth = pImage.getWidth();
int lHeight = pImage.getHeight();
IndexColorModel lColorModel = paletteToColorModel(pPalette);
BufferedImage lImageOut = new BufferedImage(lWidth, lHeight, BufferedImage.TYPE_BYTE_INDEXED, lColorModel);
for (int y = (lHeight - 1); y >= 0; y--) {
for (int x = 0; x < lWidth; x++) {
// Get original pixel color channels
int lInitialPixelColor = pImage.getRGB(x, y);
// Finding nearest color in the palette
Color lNearestColor = getNearestColor(lInitialPixelColor, pPalette);
// Set quantized pixel
lImageOut.setRGB(x, y, lNearestColor.getRGB());
// Applying Floyd-Steinberg dithering
int quantizationError = lInitialPixelColor - lNearestColor.getRGB();
if ((x + 1) < lWidth) {
int lPixel = pImage.getRGB(x + 1, y);
lImageOut.setRGB(x + 1, y, lPixel + (quantizationError * (7 / 16)));
}
if ((x - 1) > 0 && (y + 1) < lHeight) {
int lPixel = pImage.getRGB(x - 1, y + 1);
lImageOut.setRGB(x - 1, y + 1, lPixel + (quantizationError * (3 / 16)));
}
if ((y + 1) < lHeight) {
int lPixel = pImage.getRGB(x, y + 1);
lImageOut.setRGB(x, y + 1, lPixel + (quantizationError * (5 / 16)));
}
if ((x + 1 < lWidth) && (y + 1 < lHeight)) {
int lPixel = pImage.getRGB(x + 1, y + 1);
lImageOut.setRGB(x + 1, y + 1, lPixel + (quantizationError * (1 / 16)));
}
// End of Floyd-Steinberg dithering
}
}
return lImageOut;
}
/**
* #param pPalette to load color model from
* #return {#link java.awt.image.IndexColorModel} Color model initialized using pPalette colors
*/
private static IndexColorModel paletteToColorModel(Color[] pPalette) {
int lSize = pPalette.length;
// Getting color component for each palette color
byte[] lReds = new byte[lSize];
byte[] lGreens = new byte[lSize];
byte[] lBlues = new byte[lSize];
for (int i = 0; i < lSize; i++) {
Color lColor = pPalette[i];
lReds[i] = (byte) lColor.getRed();
lGreens[i] = (byte) lColor.getGreen();
lBlues[i] = (byte) lColor.getBlue();
}
return new IndexColorModel(4, lSize, lReds, lGreens, lBlues);
}
/**
* #param pColor Color to approximate
* #param pPalette Color palette to use for quantization
* #return {#link java.awt.Color} nearest from pColor value took in pPalette
*/
private static Color getNearestColor(int pColor, Color[] pPalette) {
Color lNearestColor = null;
double lNearestDistance = Integer.MAX_VALUE;
double lTempDist;
for (Color lColor : pPalette) {
Color lRgb = new Color(pColor);
lTempDist = distance(lRgb.getRed(), lRgb.getGreen(), lRgb.getBlue(), lColor.getRed(), lColor.getGreen(), lColor.getBlue());
if (lTempDist < lNearestDistance) {
lNearestDistance = lTempDist;
lNearestColor = lColor;
}
}
return lNearestColor;
}
/**
* #return Distance between 2 pixels color channels.
*/
private static double distance(int pR1, int pG1, int pB1, int pR2, int pG2, int pB2) {
double lDist = Math.pow(pR1 - pR2, 2) + Math.pow(pG1 - pG2, 2) + Math.pow(pB1 - pB2, 2);
return Math.sqrt(lDist);
}}
This site is for questions, not for debugging. But as an attempt to at least answer the question "What I'm doing wrong?":
The term (7 / 16) will perform an integer division, and the result will be 0. Use (7.0 / 16.0) instead
You may not do arithmetic with RGB values! When you have an RGB value like 0x000000FF (blue) and you multiply it with 256, then the result will be 0x0000FF00 (green). The computations like lPixel + (quantizationError * (3.0 / 16.0) have to be done separately for the R, G and B channel
You're processing the image from the bottom to the top. Then distributing the error among the lower right pixels (as it is described on the wikipedia site) does not longer make sense. Change your loops from
for (int y = (lHeight - 1); y >= 0; y--)
to
for (int y = 0; y < lHeight; y++)
You can not store the quantization error directly in the pixels of BufferedImage, because the error may also be negative. The image can not handle this. (I also have doubts about your color model, but this is only a gut feeling)
The image that you described as the "expected result" contains colors that definitely are not contained in your palette.
Finally: Have a look at https://stackoverflow.com/a/5940260/3182664
I'm trying to convert from RGB to GrayScale Image.
The method that does this task is the following:
public BufferedImage rgbToGrayscale(BufferedImage in)
{
int width = in.getWidth();
int height = in.getHeight();
BufferedImage grayImage = new BufferedImage(width, height, BufferedImage.TYPE_BYTE_GRAY);
WritableRaster raster = grayImage.getRaster();
int [] rgbArray = new int[width * height];
in.getRGB(0, 0, width, height, rgbArray, 0, width);
int [] outputArray = new int[width * height];
int red, green, blue, gray;
for(int i = 0; i < (height * width); i++)
{
red = (rgbArray[i] >> 16) & 0xff;
green = (rgbArray[i] >> 8) & 0xff;
blue = (rgbArray[i]) & 0xff;
gray = (int)( (0.30 * red) + (0.59 * green) + (0.11 * blue));
if(gray < 0)
gray = 0;
if(gray > 255)
gray = 255;
outputArray[i] = (gray & 0xff);
}
}
raster.setPixels(0, 0, width, height, outputArray);
return grayImage;
}
I have a method that saves the pixels value in a file:
public void writeImageValueToFile(BufferedImage in, String fileName)
{
int width = in.getWidth();
int height = in.getHeight();
try
{
FileWriter fstream = new FileWriter(fileName + ".txt");
BufferedWriter out = new BufferedWriter(fstream);
int [] grayArray = new int[width * height];
in.getRGB(0, 0, width, height, grayArray, 0, width);
for(int i = 0; i < (height * width); i++)
{
out.write((grayArray[i] & 0xff) + "\n");
}
out.close();
} catch (Exception e)
{
System.err.println("Error: " + e.getMessage());
}
}
The problem that I have is that, the RGB value I get from my method, is always bigger than the expected one.
I created an image and I filled it with color 128, 128, 128. According to the first method, if I print the outputArray's data, I get:
r, g, b = 128, 128, 128. Final = 127 ---> correct :D
However, when I called the second method, I got the RGB value 187 which is incorrect.
Any suggestion?
Thanks!!!
Take a look at javax.swing.GrayFilter, it uses the RBGImageFilter class to accomplish the same thing and has very similar implementation. It may make your life simpler.
I'm not an expert at these things but aren't RGB values stored as hex (base16)? If so, theproblem lies in your assumption that the operation & 0xff will cause your int to be stored/handled as base16. It is just a notation and default int usage in strings will always be base10.
int a = 200;
a = a & 0xff;
System.out.println(a);
// output
200
You need to use an explicit base16 toString() method.
System.out.println(Integer.toHexString(200));
// output
c8
I have an integer array of RGB pixels that looks something like:
pixels[0] = <rgb-value of pixel(0,0)>
pixels[1] = <rgb-value of pixel(1,0)>
pixels[2] = <rgb-value of pixel(2,0)>
pixels[3] = <rgb-value of pixel(0,1)>
...etc...
And I'm trying to create a BufferedImage from it. I tried the following:
BufferedImage img = new BufferedImage(width, height, BufferedImage.TYPE_INT_RGB);
img.getRaster().setPixels(0, 0, width, height, pixels);
But the resulting image has problems with the color bands. The image is unclear and there are diagonal and horizontal lines through it.
What is the proper way to initialize the image with the rgb values?
EDIT:
Here is what my image looks like
thanks,
Jeff
Try setDataElements instead of setPixels.
Another option is for the image to share the array instead of copying from it (see this answer for an example.)
Not sure how to do it with a single array value. I believe you need three array values to specify the color when you use TYPE_INT_RGB:
import java.awt.*;
import java.awt.image.*;
import javax.swing.*;
public class ImageFromArray2 extends JFrame
{
int width = 50;
int height = 50;
int imageSize = width * height;
public ImageFromArray2()
{
JPanel panel = new JPanel();
getContentPane().add( panel );
int[] pixels = new int[imageSize * 3];
// Create Red Image
for (int i = 0; i < imageSize * 3; i += 3)
{
pixels[i] = 255;
pixels[i+1] = 0;
pixels[i+2] = 0;
}
panel.add( createImageLabel(pixels) );
// Create Green Image
for (int i = 0; i < imageSize * 3; i += 3)
{
pixels[i] = 0;
pixels[i+1] = 255;
pixels[i+2] = 0;
}
panel.add( createImageLabel(pixels) );
// Create Blue Image
for (int i = 0; i < imageSize * 3; i += 3)
{
pixels[i] = 0;
pixels[i+1] = 0;
pixels[i+2] = 255;
}
panel.add( createImageLabel(pixels) );
// Create Cyan Image
for (int i = 0; i < imageSize * 3; i += 3)
{
pixels[i] = 0;
pixels[i+1] = 255;
pixels[i+2] = 255;
}
panel.add( createImageLabel(pixels) );
}
private JLabel createImageLabel(int[] pixels)
{
BufferedImage image = new BufferedImage(width, height, BufferedImage.TYPE_INT_RGB);
WritableRaster raster = image.getRaster();
raster.setPixels(0, 0, width, height, pixels);
JLabel label = new JLabel( new ImageIcon(image) );
return label;
}
public static void main(String args[])
{
JFrame frame = new ImageFromArray2();
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
frame.pack();
frame.setLocationRelativeTo( null );
frame.setVisible( true );
}
}
The reason you cant get the correct image is that those pixels include the rgb colors, in order to set well each pix you most do the next
double[] pixelsArr=new double[4];
pixelsArr[0]=(Integer.parseInt(string2.trim())>>16) & 0xFF;
pixelsArr[1]=(Integer.parseInt(string2.trim())>>8) & 0xFF;
pixelsArr[2]=(Integer.parseInt(string2.trim())) & 0xFF;
pixelsArr[3]=0xFF;
img.getRaster().setPixels(col,row,1,1, pixelsArr);
string2 is an integer pixel col is the position of each pix and row the same, and 1,1 is the size of each pixel.