Related
I am trying to determine a way to change the pixel color of my masks from black to a different color. Unfortunately, I have not be able to determine a way to do this task. Essentially, what I am trying to do is take this image:
and convert the black portions to a color with values (255, 160, 130). I have tried several methods to try and achieve my goal. These include draw contours, setTo, and looping through the matrix. Unfortunately all of these attempts have failed. I have included the code and the resulting outcomes below.
Draw Contours method
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
Mat img = Imgcodecs.imread(
"C:\\Users\\Hassan\\Documents\\School\\Me\\COMP5900 Y\\Project\\Project\\src\\resources\\face.jpg");
Mat img_grey = new Mat();
Mat grad = new Mat(), grad_x = new Mat(), grad_y = new Mat();
Mat abs_grad_x = new Mat(), abs_grad_y = new Mat();
int ddepth = CvType.CV_32F;
int scale = 1;
int delta = 0;
Imgproc.GaussianBlur(img, img, new Size(3, 3), 0, 0, Core.BORDER_CONSTANT);
Imgproc.cvtColor(img, img_grey, Imgproc.COLOR_BGR2GRAY);
// Apply Sobel
Imgproc.Sobel(img_grey, grad_x, ddepth, 1, 0, 3, scale, delta, Core.BORDER_DEFAULT);
Imgproc.Sobel(img_grey, grad_y, ddepth, 0, 1, 3, scale, delta, Core.BORDER_DEFAULT);
// converting back to CV_8U
Core.convertScaleAbs(grad_x, abs_grad_x);
Core.convertScaleAbs(grad_y, abs_grad_y);
// Total Gradient (approximate)
Core.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad);
Photo.fastNlMeansDenoising(grad, grad);
Imgproc.GaussianBlur(grad, grad, new Size(3, 3), 0, 0, Core.BORDER_CONSTANT);
// isolate background
Mat background = new Mat();
Imgproc.threshold(grad, background, 2, 255, Imgproc.THRESH_BINARY);
// draw contours
List<MatOfPoint> contours = new ArrayList<>();
Mat hierarchy = new Mat();
Imgproc.findContours(background, contours, hierarchy, Imgproc.RETR_TREE, Imgproc.CHAIN_APPROX_NONE);
Mat drawing = Mat.zeros(background.size(), CvType.CV_8UC3);
List<MatOfPoint> hullList = new ArrayList<>();
for (MatOfPoint contour : contours) {
MatOfInt hull = new MatOfInt();
Imgproc.convexHull(contour, hull);
Point[] contourArray = contour.toArray();
Point[] hullPoints = new Point[hull.rows()];
List<Integer> hullContourIdxList = hull.toList();
for (int i = 0; i < hullContourIdxList.size(); i++) {
hullPoints[i] = contourArray[hullContourIdxList.get(i)];
}
hullList.add(new MatOfPoint(hullPoints));
}
for (int i = 0; i < contours.size(); i++) {
Scalar color = new Scalar(255, 160, 130);
Imgproc.drawContours(drawing, contours, i, color);
//Imgproc.drawContours(drawing, hullList, i, color );
}
Note here, that I also tried using Imgproc.RETR_EXTERNAL as well, but that produced a completely black image. Also the name of the HighGui window is called "flood fill", but I just forgot to update the name.
setTo
// replace find and draw contours portion of code above
Mat out = new Mat();
background.copyTo(out);
out.setTo(new Scalar(255, 160, 130), background);
Iterating through matrix
// replace draw contours portion of code above
for (a = 0; a < background.rows(); a++) {
for(b = 0; b < background.cols(); b++) {
if(background.get(a,b)[0] == 0) {
//background.put(a, b, CvType.CV_16F, new Scalar(255, 160, 130));
double[] data = {255, 160, 130};
background.put(a, b, data);
}
}
}
The loop is promising, but I know it will not be efficient as I have 2 other masks that I would like to update as well. Could you please suggest an efficient method, that allows me to set the value for all three channels?
Thanks
I am not sure why you are doing many operations on the image but to me it looks like applying the mask and replacing the color efficiently. So if there are other complexities than please let me know.
Below is the code I was looking for in Java.
public static void main(String s[]) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
Mat matr =Imgcodecs.imread("/home/shariq/Desktop/test.png");
Mat result = new Mat();
//create a mask based on range
Core.inRange(matr, new Scalar(0), new Scalar(50), result);
Imgcodecs.imwrite("/home/shariq/Desktop/test_in.png", result);
//apply the mask with color you are looking for, note here scalar is in hsv
matr.setTo(new Scalar(130,160,255),result);
Imgcodecs.imwrite("/home/shariq/Desktop/result.png", matr);
}
We are creating a mask for the pixel values between 0-50 for black color using inRange method.
Core.inRange(matr, new Scalar(0), new Scalar(50), result);
This mask in result variable is than applied to original matrix using setTo method. The replacement color value is provided in HSV format through Scalar object. new Scalar(a,b,c) in HSV can be understand in RGB like this Red = c, Green = b and Blue = a.
matr.setTo(new Scalar(130,160,255),result);
Its quite fast compared to iterating the pixels one by one.
I am trying to scan a MTG card using OpenCV on android. I have it to where I can detect the edges of the card and even draw an outline around it but am confused on how to extract just the card from the background and then exit from the camera preview. Here is my code so far:
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Mat result = new Mat();
Mat mask = new Mat();
List<MatOfPoint> contours = new ArrayList<>();
Mat hierarchy = new Mat();
mRgba = inputFrame.rgba();
Imgproc.Canny(mRgba, result, 40, 120);
Imgproc.GaussianBlur(result, result, new Size(9,9), 2, 2);
Imgproc.findContours(result, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE, new Point(0,0));
Imgproc.drawContours(mask, contours, -1, new Scalar(0, 255, 0), 1);
hierarchy.release();
for ( int contourIdx=0; contourIdx < contours.size(); contourIdx++ )
{
// Minimum size allowed for consideration
MatOfPoint2f approxCurve = new MatOfPoint2f();
MatOfPoint2f contour2f = new MatOfPoint2f( contours.get(contourIdx).toArray() );
//Processing on mMOP2f1 which is in type MatOfPoint2f
double approxDistance = Imgproc.arcLength(contour2f, true)*0.02;
Imgproc.approxPolyDP(contour2f, approxCurve, approxDistance, true);
//Convert back to MatOfPoint
MatOfPoint points = new MatOfPoint( approxCurve.toArray() );
// Get bounding rect of contour
Rect rect = Imgproc.boundingRect(points);
Imgproc.rectangle(mRgba, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(255, 0, 0, 255), 3);
}
Bitmap card = Bitmap.createBitmap(result.cols(), result.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(result, card);
return mRgba;
}
}
Here is an example of what it looks like when run the code. As you can see the card is outlined by a red rectangle now but how do I extract just whats in the rectangle, save it to a mat or bitmap and then exit the camera?
screenshot of outlined card
I didn't test it, but can't you just use...
return new Mat(mRgba, rect);
I'm doing a project using opencv in android studio.
My project is about comparing colour histogram of an input image captured by camera with the 6 images that is stored in the drawable. The opencv function that I used is comparHist(). The purpose of the comparison is to know if the input image has matched colour to any images in the drawable.
My problem is I need to calculate colour histogram using calcHist() for every 6 images every time there is an input image(captured by camera) so that I can compare them. I was thinking to store the colour histogram value of 6 images in an array. Can I do so?
I was searching for a solution but there is none. Here is my code that I have done.
public int resultRecog() {
drawImg = BitmapFactory.decodeResource(getResources(), R.drawable.onefront);
Mat hsv_base = new Mat();
Mat hsv_test = new Mat();
Utils.bitmapToMat(drawImg, hsv_base);
Utils.bitmapToMat(pic, hsv_test);
/// Convert to HSV
Imgproc.cvtColor(hsv_base, hsv_base, Imgproc.COLOR_BGR2HSV);
Imgproc.cvtColor(hsv_test, hsv_test, Imgproc.COLOR_BGR2HSV);
/// h s bins value
MatOfInt histSize = new MatOfInt(40, 40);
//// ranges h=0-180, s=0-256
MatOfFloat ranges = new MatOfFloat(0f, 180f, 0f, 256f);
/// two channel
MatOfInt channels = new MatOfInt(0, 1);
Mat hist_base = new Mat();
Mat hist_test = new Mat();
ArrayList<Mat> histImages = new ArrayList<Mat>();
histImages.add(hsv_base);
Imgproc.calcHist(histImages, channels, new Mat(), hist_base, histSize, ranges, false);
Core.normalize(hist_base, hist_base, 0, 1, Core.NORM_MINMAX, -1, new Mat());
histImages = new ArrayList<Mat>();
histImages.add(hsv_test);
Imgproc.calcHist(histImages, channels, new Mat(), hist_test, histSize, ranges, false);
Core.normalize(hist_test, hist_test, 0, 1, Core.NORM_MINMAX, -1, new Mat());
double result = Imgproc.compareHist(hist_base, hist_test, 0);
drawImg.recycle();
drawImg = null;
int r;
if (result >= 0.15) {
r = 1;
} else {
drawImg = BitmapFactory.decodeResource(getResources(), R.drawable.fivefront);
Mat hsv_base5 = new Mat();
Utils.bitmapToMat(drawImg, hsv_base5);
Imgproc.cvtColor(hsv_base5, hsv_base5, Imgproc.COLOR_BGR2HSV);
Mat hist_base5 = new Mat();
ArrayList<Mat> histImages5 = new ArrayList<Mat>();
histImages5.add(hsv_base5);
Imgproc.calcHist(histImages5, channels, new Mat(), hist_base5, histSize, ranges, false);
Core.normalize(hist_base5, hist_base5, 0, 1, Core.NORM_MINMAX, -1, new Mat());
result = Imgproc.compareHist(hist_base5, hist_test, 0);
drawImg.recycle();
drawImg = null;
if (result >= 0.1) {
r = 5;
} else {
drawImg = BitmapFactory.decodeResource(getResources(), R.drawable.f10);
Mat hsv_base10 = new Mat();
Utils.bitmapToMat(drawImg, hsv_base10);
Imgproc.cvtColor(hsv_base10, hsv_base10, Imgproc.COLOR_BGR2HSV);
Mat hist_base10 = new Mat();
ArrayList<Mat> histImages10 = new ArrayList<Mat>();
histImages10.add(hsv_base10);
Imgproc.calcHist(histImages10, channels, new Mat(), hist_base10, histSize, ranges, false);
Core.normalize(hist_base10, hist_base10, 0, 1, Core.NORM_MINMAX, -1, new Mat());
result = Imgproc.compareHist(hist_base10, hist_test, 0);
drawImg.recycle();
drawImg = null;
if (result >= 0.1) {
r = 10;
} else {
drawImg = BitmapFactory.decodeResource(getResources(), R.drawable.fiftyfront);
Mat hsv_base50 = new Mat();
Utils.bitmapToMat(drawImg, hsv_base50);
Imgproc.cvtColor(hsv_base50, hsv_base50, Imgproc.COLOR_BGR2HSV);
Mat hist_base50 = new Mat();
ArrayList<Mat> histImages50 = new ArrayList<Mat>();
histImages50.add(hsv_base50);
Imgproc.calcHist(histImages50, channels, new Mat(), hist_base50, histSize, ranges, false);
Core.normalize(hist_base50, hist_base50, 0, 1, Core.NORM_MINMAX, -1, new Mat());
result = Imgproc.compareHist(hist_base50, hist_test, 0);
drawImg.recycle();
drawImg = null;
if (result >= 0.1) {
r = 50;
} else {
r = 0;
}
}//rm10
}//rm5
}//rm1
return r;
}
So my question is, can I store the colour histogram value (of 6 images stored in the drawable) in an array so that the application doesn't need to re-calculate the colour histogram every time there is input image captured by camera
I've been trying to use OpenCV's findhomography and perspectivetransform to get the transformation matrix from one set of points to another.
I generate 50 random points for each data set and draw them on the screen. I then ask findHomography to calculate a transformation matrix from the 2 sets of points and use it to transform the corners of the original set. The result of the perspective transform is drawn on the screen.
The problem is the matrix findhomography produces is not correct, even when ignoring the issue of outliers. I want to use this example in a feature matching problem.
Here is my code for testing.
//
Mat outImg = new Mat(500,1000,CvType.CV_32FC3,new Scalar(0,0,0));
//make obj test points
List<Point> p1 = new ArrayList<Point>();
for(int i=0;i<50;i++){
p1.add(new OpenCVTemplateMatcher().generateRandom2DPoint(50,50,450,450));
}
//make scene test points
List<Point> p2 = new ArrayList<Point>();
for(int i=0;i<50;i++){
p2.add(new OpenCVTemplateMatcher().generateRandom2DPoint(550,50,950,450));
}
System.out.println(p1.size());
//draw the points
for(Point p:p1){
Core.circle(outImg, p, 1, new Scalar(255,0,255),2);
}
for(Point p:p2){
Core.circle(outImg, p, 1, new Scalar(0,255,0),2);
}
//find bounding boxes on points and draw them
MatOfPoint2f mp1 = new MatOfPoint2f(); mp1.fromList(p1);
MatOfPoint2f mp2 = new MatOfPoint2f(); mp2.fromList(p2);
RotatedRect r1 = Imgproc.minAreaRect(mp1);
RotatedRect r2 = Imgproc.minAreaRect(mp2);
Point[] v1 = new Point[4]; r1.points(v1);
Point[] v2 = new Point[4]; r2.points(v2);
Core.line(outImg, v1[0], v1[1], new Scalar(0, 255, 0),1);
Core.line(outImg, v1[1], v1[2], new Scalar(0, 255, 0),1);
Core.line(outImg, v1[2], v1[3], new Scalar(0, 255, 0),1);
Core.line(outImg, v1[3], v1[0], new Scalar(0, 255, 0),1);
Core.line(outImg, v2[0], v2[1], new Scalar(255, 255, 0),1);
Core.line(outImg, v2[1], v2[2], new Scalar(255, 255, 0),1);
Core.line(outImg, v2[2], v2[3], new Scalar(255, 255, 0),1);
Core.line(outImg, v2[3], v2[0], new Scalar(255, 255, 0),1);
//show the corners
for(int i=0;i<4;i++){
Core.circle(outImg, v1[i], 3, new Scalar(200,250,50),2);
Core.circle(outImg, v2[i], 3, new Scalar(0,238,250),2);
}
MatOfPoint2f p1Corners = new MatOfPoint2f(); p1Corners.fromArray(v1);
MatOfPoint2f p2Corners = new MatOfPoint2f(); p2Corners.fromArray(v2);
//find transform as H
//Mat H = Calib3d.findHomography(p1Corners, p2Corners,Calib3d.RANSAC, 5);
Mat H = Calib3d.findHomography(mp1, mp2,0, 5);
//H = findTransform(mp1,mp2);
//find the transform of H from 1'(original)s corners
Mat orig_corners = new Mat(4,1,CvType.CV_32FC2);
Mat transformed_corners = new Mat(4,1,CvType.CV_32FC2);
orig_corners.put(0, 0, new double[] {v1[0].x,v1[0].y});
orig_corners.put(1, 0, new double[] {v1[1].x,v1[1].y});
orig_corners.put(2, 0, new double[] {v1[2].x,v1[2].y});
orig_corners.put(3, 0, new double[] {v1[3].x,v1[3].y});
Core.perspectiveTransform(orig_corners,transformed_corners,H);
Core.line(outImg, new Point(transformed_corners.get(0,0)), new Point(transformed_corners.get(1,0)), new Scalar(0, 255, 255),4);
Core.line(outImg, new Point(transformed_corners.get(1,0)), new Point(transformed_corners.get(2,0)), new Scalar(0, 255, 255),4);
Core.line(outImg, new Point(transformed_corners.get(2,0)), new Point(transformed_corners.get(3,0)), new Scalar(0, 255, 255),4);
Core.line(outImg, new Point(transformed_corners.get(3,0)), new Point(transformed_corners.get(0,0)), new Scalar(0, 255, 255),4);
Highgui.imwrite("test.jpg", outImg);
and here is a result
I found that findhomography doesn't work at all given any number of points other than 4 corners in each set, but then the problem becomes trivial. Am I using it wrong? If I wanted to write my own function for finding a transformation between 2 matrices how would I go about doing it?
Actually, i want to find out the dominant colour in the image, so i want to find the HSV histogram of the image and hence filter out the other colours. However, i dont know how to do this in java platform using opence. I only find the code in C++. Thank you.
Mat image = Highgui.imread("binary07.jpg");
//Mat src = new Mat(image.height(), image.width(), CvType.CV_8UC2);
Imgproc.cvtColor(image, image, Imgproc.COLOR_RGB2GRAY);
List<Mat> hsv_planes = new ArrayList<Mat>();
Core.split(image, hsv_planes);
MatOfInt histSize = new MatOfInt(256);
final MatOfFloat histRange = new MatOfFloat(0f, 256f);
boolean accumulate = false;
Mat h_hist = new Mat();
Mat s_hist = new Mat();
Mat v_hist = new Mat();
//error appear in the following sentences
Imgproc.calcHist((List<Mat>) hsv_planes.get(0), new MatOfInt(3), new Mat(), h_hist, histSize, histRange, accumulate);
Imgproc.calcHist((List<Mat>) hsv_planes.get(1), new MatOfInt(3), new Mat(), s_hist, histSize, histRange, accumulate);
Imgproc.calcHist((List<Mat>) hsv_planes.get(2), new MatOfInt(3), new Mat(), v_hist, histSize, histRange, accumulate);
int hist_w = 512;
int hist_h = 600;
long bin_w = Math.round((double) hist_w / 256);
//bin_w = Math.round((double) (hist_w / 256));
Mat histImage = new Mat(hist_h, hist_w, CvType.CV_8UC1);
Core.normalize(h_hist, h_hist, 3, histImage.rows(), Core.NORM_MINMAX);
Core.normalize(s_hist, s_hist, 3, histImage.rows(), Core.NORM_MINMAX);
Core.normalize(v_hist, v_hist, 3, histImage.rows(), Core.NORM_MINMAX);
for (int i = 1; i < 256; i++) {
Point p1 = new Point(bin_w * (i - 1), hist_h - Math.round(h_hist.get(i - 1, 0)[0]));
Point p2 = new Point(bin_w * (i), hist_h - Math.round(h_hist.get(i, 0)[0]));
Core.line(histImage, p1, p2, new Scalar(255, 0, 0), 2, 8, 0);
Point p3 = new Point(bin_w * (i - 1), hist_h - Math.round(s_hist.get(i - 1, 0)[0]));
Point p4 = new Point(bin_w * (i), hist_h - Math.round(s_hist.get(i, 0)[0]));
Core.line(histImage, p3, p4, new Scalar(0, 255, 0), 2, 8, 0);
Point p5 = new Point(bin_w * (i - 1), hist_h - Math.round(v_hist.get(i - 1, 0)[0]));
Point p6 = new Point(bin_w * (i), hist_h - Math.round(v_hist.get(i, 0)[0]));
Core.line(histImage, p5, p6, new Scalar(0, 0, 255), 2, 8, 0);
}
Highgui.imwrite("histogram.jpg", histImage);
I dont know how to get the output after the split function.
reference:
http://docs.opencv.org/java/
http://docs.opencv.org/doc/tutorials/imgproc/histograms/histogram_calculation/histogram_calculation.html
In the code, the color conversion:
Imgproc.cvtColor(image, image, Imgproc.COLOR_RGB2GRAY);
should be to HSV not gray:
Imgproc.cvtColor(image, image, Imgproc.COLOR_BGR2HSV);
In your example you will only have one (gray) plane instead of the 3 HSV channels. That will give errors when you access the 2nd and 3rd plane.
Here is the code for comparing the histogram of Source image to a reference image for OpenCV 2.4.11 Java (Android).
// Assume SourceImage is a Bitmap ARGB_8888
BitmapFactory.Options options = new BitmapFactory.Options();
options.inPreferredConfig = Bitmap.Config.ARGB_8888;
Bitmap refImage = BitmapFactory.decodeFile(mBaseDir + "some_reference.jpg", options);
Mat hsvRef = new Mat();
Mat hsvSource = new Mat();
Mat srcRef = new Mat(refImage.getHeight(), refImage.getWidth(), CvType.CV_8U, new Scalar(4));
Utils.bitmapToMat(refImage, srcRef);
Mat srcSource = new Mat(SourceImage.getHeight(), SourceImage.getWidth(), CvType.CV_8U, new Scalar(4));
Utils.bitmapToMat(SourceImage, srcSource);
/// Convert to HSV
Imgproc.cvtColor(srcRef, hsvRef, Imgproc.COLOR_BGR2HSV);
Imgproc.cvtColor(srcSource, hsvSource, Imgproc.COLOR_BGR2HSV);
/// Using 50 bins for hue and 60 for saturation
int hBins = 50;
int sBins = 60;
MatOfInt histSize = new MatOfInt( hBins, sBins);
// hue varies from 0 to 179, saturation from 0 to 255
MatOfFloat ranges = new MatOfFloat( 0f,180f,0f,256f );
// we compute the histogram from the 0-th and 1-st channels
MatOfInt channels = new MatOfInt(0, 1);
Mat histRef = new Mat();
Mat histSource = new Mat();
ArrayList<Mat> histImages=new ArrayList<Mat>();
histImages.add(hsvRef);
Imgproc.calcHist(histImages,
channels,
new Mat(),
histRef,
histSize,
ranges,
false);
Core.normalize(histRef,
histRef,
0,
1,
Core.NORM_MINMAX,
-1,
new Mat());
histImages=new ArrayList<Mat>();
histImages.add(hsvSource);
Imgproc.calcHist(histImages,
channels,
new Mat(),
histSource,
histSize,
ranges,
false);
Core.normalize(histSource,
histSource,
0,
1,
Core.NORM_MINMAX,
-1,
new Mat());
double resp1 = Imgproc.compareHist(histRef, histSource, 0);
double resp2 = Imgproc.compareHist(histRef, histSource, 1);
double resp3 = Imgproc.compareHist(histRef, histSource, 2);
double resp4 = Imgproc.compareHist(histRef, histSource, 3);
The next code works fine for one depth channel. You have to do just a few modifications to add the other two channels
//Calculate histogram
java.util.List<Mat> matList = new LinkedList<Mat>();
matList.add(imageIR_gray);
Mat histogram = new Mat();
MatOfFloat ranges=new MatOfFloat(0,256);
MatOfInt histSize = new MatOfInt(255);
Imgproc.calcHist(
matList,
new MatOfInt(0),
new Mat(),
histogram ,
histSize ,
ranges);
// Create space for histogram image
Mat histImage = Mat.zeros( 100, (int)histSize.get(0, 0)[0], CvType.CV_8UC1);
// Normalize histogram
Core.normalize(histogram, histogram, 1, histImage.rows() , Core.NORM_MINMAX, -1, new Mat() );
// Draw lines for histogram points
for( int i = 0; i < (int)histSize.get(0, 0)[0]; i++ )
{
Core.line(
histImage,
new org.opencv.core.Point( i, histImage.rows() ),
new org.opencv.core.Point( i, histImage.rows()-Math.round( histogram.get(i,0)[0] )) ,
new Scalar( 255, 255, 255),
1, 8, 0 );
}