OpenCV-Android: Import .png preserving transparency of empty areas - java

I am currently trying to transform an input .png file to a OpenCV:Mat, preserving its transparency on empty areas. Working on eclipse, using openCV4Android.
I've tried this (using a Drawable):
Inputs:
icon.png is a resource file that is correctly loaded as RGBA (4 channels).
Mat mcSprite (global field).
onCameraFrame method{
try {
mcSprite = Utils.loadResource(this, R.drawable.icon);
System.out.println(mcSprite.empty());
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
Imgproc.resize(mcSprite, mZoomWindow, mZoomWindow.size());
}
The resulting image:
PS: If i import using:
mcSprite = Utils.loadResource(this, R.drawable.icon , -1);
>0 Return a 3-channel color image.
=0 Return a grayscale image.
<0 Return the loaded image as is (with alpha channel).
no image is displayed.

A lot faster solution, using masks:
public Mat overlayImage(Mat background, Mat foreground)//, Point location)
{
Mat mask = new Mat();
Imgproc.resize(mCurrentMask, mask, background.size());
Mat source = new Mat();
Imgproc.resize(foreground, source, background.size());
source.copyTo(background,mask);
source.release();
mask.release();
return background;
}
public void createMask (Mat sprite){
mCurrentMask = new Mat(sprite.height(),sprite.width(),24);
double f[] = {1,1,1,0};
double e[] = {0,0,0,0};
for(int y = 0; y < (int)(sprite.rows()) ; ++y)
{
for(int x = 0; x < (int)(sprite.cols()) ; ++x)
{
double info[] = sprite.get(y, x);
if(info[3]>0) //rude but this is what I need
{
mCurrentMask.put(y, x, f);
}
else mCurrentMask.put(y, x, e);
}
}
}

Solved:
public void overlayImage(Mat background, Mat foreground,Mat output)//, Point location)
{
background.copyTo(output);
Mat dst = new Mat();
Imgproc.resize(foreground, dst, background.size());
double alpha;
// start at row 0/col 0
for(int y = 0; y < background.rows() ; ++y)
{
for(int x = 0; x < background.cols() ; ++x)
{
double info[] = dst.get(y, x);
alpha = info[3];
// and now combine the background and foreground pixel, using the opacity,but only if opacity > 0.
if(alpha>0) //rude but this is what I need
{
double infof[] = dst.get(y, x);
output.put(y, x, infof);
}
}
}
Final Result:

Related

Create image from double Array with color intensity

I've got a two dimensional double Array with color intensity values of an image, looking similar to this (my arrays size is 256x256 filled up with values as below):
790.0 739.0 690.0 601.0 582.0 630.0 730.0 773.0
982.0 879.0 754.0 695.0 687.0 631.0 630.0 666.0
1046.0 1080.0 1070.0 990.0 872.0 730.0 647.0 657.0
1008.0 998.0 962.0 959.0 944.0 930.0 921.0 932.0
Is it possible to create an Image Object out of this file?
My current code:
Double imageLayer[][] = vtkLayers.get(layer);
int xLenght = imageLayer.length;
int yLength = imageLayer[0].length;
System.out.println(xLenght);
System.out.println(yLength);
BufferedImage b = new BufferedImage(xLenght, yLength, 3);
for(int x = 0; x < xLenght; x++) {
for(int y = 0; y < yLength; y++) {
int rgb = (imageLayer[x][y]).intValue() << 16 | (imageLayer[x][y]).intValue() << 8 | (imageLayer[x][y]).intValue();
b.setRGB(x, y, rgb);
}
}
try {
File outputfile = new File("C:\\temp\\image.png");
ImageIO.write(b, "png", outputfile);
}
catch (IOException e){
System.out.println("Could not create picture");
}
To test it I tried to create a png file. But this code currently only produces a blank png File.
As I'm new to Java I've used this post as guide.
It would be the best, if I can create an Image object directly, without creating a png first.
I could solve the problem by myself:
Double imageLayer[][] = vtkLayers.get(layer);
// Initialize BufferedImage, assuming Color[][] is already properly populated.
BufferedImage bufferedImage = new BufferedImage(imageLayer.length, imageLayer[0].length,
BufferedImage.TYPE_INT_RGB);
// Set each pixel of the BufferedImage to the color from the Color[][].
for (int x = 0; x < imageLayer.length; x++) {
for (int y = 0; y < imageLayer[x].length; y++) {
Color test = doubleToColor(imageLayer[x][y], 4080);
bufferedImage.setRGB(x, y, test.getRGB());
}
}
try {
File imageFile = new File(path);
ImageIO.write(bufferedImage, "png", imageFile);
}
catch (Exception e){
System.out.println("Failed to create image");
}
The function doubleToColor converts my doubles to an RGB code:
public static Color doubleToColor(double x,double maxx){
float cR=(float)(0.5 + (x / (2 * maxx)));
float cG=(float)(0.5 + (x / (2 * maxx)));
float cB=(float)(0.5 - (x / (2 * maxx)));
Color c = new Color(cR,cG,cB);
return c;
}

Implementing shadow-higlight invariant algorithm

I am new to image processing and openCV, i am trying to implement this
Algorithm
found in this paper Shadow And Highlight Invariant Colour
Segmentation Algorithm For Traffic Signs
Here is my code
public class rastara {
public static double normalizedH(double p) {
double scale=(p-0)/360;
double val = 255*scale;
return val;
}
public static double normalizedSV(double p) {
double scale= p/1;
double val = 255*scale;
return val;
}
public static void main (String[] args){
System.loadLibrary( Core.NATIVE_LIBRARY_NAME );
Mat img = Highgui.imread("/Users/speedSign.jpg");
Mat hsv = new Mat();
Imgproc.cvtColor(img, hsv, Imgproc.COLOR_RGB2HSV);
Mat hsvNew = hsv.clone();
for(int i= 0; i<hsv.rows(); i++){
for(int j= 0; j<hsv.cols(); j++){
double[] hsvPixel = hsv.get(i,j);
double H = hsvPixel[0];
double S = hsvPixel[1];
double V = hsvPixel[2];
if((H>240 && H<=255) || (H>=0 && H<10)) {
hsvPixel[0] = 255;
}
if(S<40) {
hsvPixel[0] = 0;
}
if((V<30) || (V>230)) {
hsvPixel[0] = 0;
}
hsvNew.put(i,j,hsvPixel);
}
}
}
}
What i did is converting RGB to HSV using openCV method, i didn't normalize pixels as what i understand that the range of pixels of HSV converted by openCV is same as the required in this paper which is 0<H<255 0<S<255 0<V<255 but when i tried my code it never change a pixel H value to 1 or 255.
The last part of algorithm i don't actually know how to achieve that.

Convolution Blur Filter doesn't work and brightens the image

I've read about Convolution Filters recently and decided to try it out. I wanted to make code that blurs the image but what it ends up doing is brightening it. I've been looking at my code for some time now and can't find any mistakes. Could anyone help?
Here is my code:
final static int filterHeight =3;
final static int filterWidth = 3;
static double filter[][] = new double[][]{
{1,1,1},
{1,1,1},
{1,1,1}
};
public static void main(String[] args) {
BufferedImage img;
BufferedImage result;
try
{ File in = new File("in.jpg");
File out = new File("out.jpg");
img = ImageIO.read(in);
Color[][] pixels = new Color[img.getWidth()][img.getHeight()];
for(int i=0;i<img.getWidth();i++){
for(int j=0;j<img.getHeight();j++){
pixels[i][j]=new Color(img.getRGB(i,j),true);
}
}
result = new BufferedImage(img.getWidth(), img.getHeight(), img.getType());
for(int x=0;x<img.getWidth();x++){
for(int y=0;y<img.getHeight();y++){
int r=0,g=0,b=0;
for(int i=0;i<filterWidth;i++){
for(int j=0;j<filterHeight;j++){
int imageX = (int)(x - filterWidth / 2 + i + img.getWidth()) % img.getWidth();
int imageY = (int)(y - filterHeight / 2 + j + img.getHeight()) % img.getHeight();
if(imageX<0 || imageY<0) System.out.println("ERROR: "+imageX+" "+imageY);
r+=pixels[imageX][imageY].getRed()*filter[i][j];
g+=pixels[imageX][imageY].getGreen()*filter[i][j];
b+=pixels[imageX][imageY].getBlue()*filter[i][j];
}
if(r>255) r=255;
if(r<0) r=0;
if(g>255) g=255;
if(g<0) g=0;
if(b>255) b=255;
if(b<0) b=0;
Color color = new Color(img.getRGB(x,y),true)
Color colorBlur = new Color(r,g,b,color.getAlpha());
result.setRGB(x, y, colorBlur.getRGB());
}
}
}
ImageIO.write(result, "JPG", out );
}
catch (IOException e)
{
e.printStackTrace();
}
And here is image before aplying filter:
And after:
There are two options in order to get the result you desire.
Either you create a filter matrix with a sum of 1 (Like #Spektre mentioned in his comment above) or multiply the pixel's new value with a factor of 1 / sum(filterMatrix).
For a nice beginner tutorial of the concept of blurring I would recommend:
Concept of Blurring - www.TutorialsPoint.com

Parsing Image in Java with different actions for different pixel colours

I want to colour every black pixel in my image with the color the previous pixel had. And if there are 2 or more consecutive black pixels, the colour of the last non-black pixel is taken. I figured out how to iterate through pixels but the pixels won't change their colour. I think I miss the "save changes" line.
Here the code:
public static void iteratePixels() throws IOException {
File file = new File("C:\\blackDots.png");
BufferedImage image = ImageIO.read(file);
int lastNotBlack = -1;
int actualColour = 0;
for (int x = 0; x < image.getHeight(); x++) {
for (int y = 0; y < image.getWidth(); y++) {
int black = -16777216;
try {
actualColour = image.getRGB(x, y);
} catch (Exception e) {
continue;
}
if(image.getRGB(x, y)==black){
image.setRGB(x, y, lastNotBlack);
System.out.println("black pixel at: " +x +" "+y);
}
if (actualColour != black){
lastNotBlack= actualColour;
}
}
}
}
So how do I appy the changes ? Or is there another mistake?
You're changing the pixels only in the in-memory image, but you need to write those pixels back to a file:
ImageIO.write(image, "png", new File("C:\\blackDots_modified.png"));
(to be called after all pixels have been modified)
See also: https://docs.oracle.com/javase/tutorial/2d/images/saveimage.html

Replace all occurrences of one image inside another image

In Java, I'm trying to replace one BufferedImage inside another BufferedImage.
For example, could be replaced with inside , so that is produced as a result.
Would it be possible to write a function that would replace one BufferedImage inside another BufferedImage, and return the resulting BufferedImage?
public static BufferedImage replaceInsideBufferedImage(BufferedImage containingImage, BufferedImage toBeReplaced, BufferedImage replaceWithThis){
//In containingImage, replace all occurrences of toBeReplaced with replaceWithThis
}
The method below does the trick. Pseudo-code:
(1) For every pixel of containingImage :
Begin matching the toBeReplaced (pixel by pixel)
If it finds it (all pixels matched), it goes and replaces all of them with replaceWithThis
If not, goes back to (1)
As all patterns will be found, finally, it will return returnImage .
replaceInsideBufferedImage() code:
public static BufferedImage replaceInsideBufferedImage(BufferedImage containingImage, BufferedImage toBeReplaced, BufferedImage replaceWithThis) {
BufferedImage returnImage = deepCopyImage(containingImage);
for (int x = 0; x+toBeReplaced.getWidth() < containingImage.getWidth(); x++) {
for (int y = 0; y+toBeReplaced.getHeight() < containingImage.getHeight(); y++) {
BufferedImage subImg = containingImage.getSubimage(x, y, toBeReplaced.getWidth(), toBeReplaced.getHeight());
if (imageEquals(subImg,toBeReplaced)) {
for (int sx = 0; sx < replaceWithThis.getWidth(); sx++) {
for (int sy = 0; sy < replaceWithThis.getHeight(); sy++) {
returnImage.setRGB(x+sx, y+sy, replaceWithThis.getRGB(sx, sy));
}
}
}
}
}
return returnImage;
}
Full working code:
import java.awt.image.BufferedImage;
import java.awt.image.ColorModel;
import java.awt.image.WritableRaster;
import java.io.File;
import javax.imageio.ImageIO;
public class ReplacePattern {
public static void main(String[] args) throws Exception {
BufferedImage containingImage = ImageIO.read(new File("fourWhites.png"));
BufferedImage toBeReplaced = ImageIO.read(new File("oneWhite.png"));
BufferedImage replaceWithThis = ImageIO.read(new File("oneRed.png"));
BufferedImage replaced = replaceInsideBufferedImage(containingImage, toBeReplaced, replaceWithThis);
ImageIO.write(replaced, "png", new File("fourReds.png"));
}
public static BufferedImage replaceInsideBufferedImage(BufferedImage containingImage, BufferedImage toBeReplaced, BufferedImage replaceWithThis) {
BufferedImage returnImage = deepCopyImage(containingImage);
for (int x = 0; x+toBeReplaced.getWidth() < containingImage.getWidth(); x++) {
for (int y = 0; y+toBeReplaced.getHeight() < containingImage.getHeight(); y++) {
BufferedImage subImg = containingImage.getSubimage(x, y, toBeReplaced.getWidth(), toBeReplaced.getHeight());
if (imageEquals(subImg,toBeReplaced)) {
for (int sx = 0; sx < replaceWithThis.getWidth(); sx++) {
for (int sy = 0; sy < replaceWithThis.getHeight(); sy++) {
returnImage.setRGB(x+sx, y+sy, replaceWithThis.getRGB(sx, sy));
}
}
}
}
}
return returnImage;
}
// http://stackoverflow.com/a/3514297/1850609
public static BufferedImage deepCopyImage(BufferedImage bi) {
ColorModel cm = bi.getColorModel();
boolean isAlphaPremultiplied = cm.isAlphaPremultiplied();
WritableRaster raster = bi.copyData(null);
return new BufferedImage(cm, raster, isAlphaPremultiplied, null);
}
// http://stackoverflow.com/a/11006474/1850609
private static boolean imageEquals(BufferedImage image1, BufferedImage image2) {
int width;
int height;
boolean imagesEqual = true;
if( image1.getWidth() == ( width = image2.getWidth() ) &&
image1.getHeight() == ( height = image2.getHeight() ) ){
for(int x = 0;imagesEqual == true && x < width; x++){
for(int y = 0;imagesEqual == true && y < height; y++){
if( image1.getRGB(x, y) != image2.getRGB(x, y) ){
imagesEqual = false;
}
}
}
}else{
imagesEqual = false;
}
return imagesEqual;
}
}
It would be possible, however I would not suggest doing that.
Detecting whether an image is present within another image will be very slow.
Moreover, due to possible encoding artifacts, it is possible that an image will never be detected in another image. In such case you would have to implement a more flexible detection function, which will take even longer and may lead to false positives.
Most likely, you have the data to rebuild the image from scratch. Instead of operating on images simply get the data used to generate the initial image and create a new one based on it.
But if you really need to do it, you will have to loop over both images and compare pixels. Use getRGB() function and compare the images pixel by pixel.

Categories