good night. I'm using this code to find and crop the letters on my image. However, i get this error;
OpenCV Error: Assertion failed (0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols && 0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.rows) in cv::Mat::Mat
And i do not know how to fix that. I've already search something about this, but i'm not finding the solution. Can anyone help me ?
Mat image = Imgcodecs.imread("C:\\Users\\Me\\Desktop\\Programs\\Image2.png", Imgcodecs.CV_LOAD_IMAGE_GRAYSCALE);
// clone the image
Mat original = image.clone();
// thresholding the image to make a binary image
Imgproc.threshold(image, image, 220, 60, Imgproc.THRESH_BINARY_INV);
// find the center of the image
double[] centers = {(double)image.width()/2, (double)image.height()/2};
Point image_center = new Point(centers);
// finding the contours
ArrayList<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(image, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
// finding best bounding rectangle for a contour whose distance is closer to the image center that other ones
double d_min = Double.MAX_VALUE;
Rect rect_min = new Rect();
for (MatOfPoint contour : contours) {
Rect rec = Imgproc.boundingRect(contour);
// find the best candidates
if (rec.height > image.height()/2 & rec.width > image.width()/2)
continue;
Point pt1 = new Point((double)rec.x, (double)rec.y);
Point center = new Point(rec.x+(double)(rec.width)/2, rec.y + (double)(rec.height)/2);
double d = Math.sqrt(Math.pow((double)(pt1.x-image_center.x),2) + Math.pow((double)(pt1.y -image_center.y), 2));
if (d < d_min)
{
d_min = d;
rect_min = rec;
}
}
// slicing the image for result region
int pad = 5;
rect_min.x = rect_min.x - pad;
rect_min.y = rect_min.y - pad;
rect_min.width = rect_min.width + 2*pad;
rect_min.height = rect_min.height + 2*pad;
Mat result = original.submat(rect_min);
Imgcodecs.imwrite("C:\\Users\\Me\\Desktop\\Programs\\result.png", result);
My programming program are pointing out in this line:
Mat result = original.submat(rect_min);
It is most likely that rect_min has dimensions that either negative, i.e. rect_min.x = rect_min.x - pad; or larger than that of original image, i.e. rect_min.width = rect_min.width + 2*pad; makes rect_min.width > original.width.
A possible fix is to crop original image with the unmodified rect_min, then, if you want padding, use copyMakeBorder.
That means the boundaries of rect_min are going beyond the boundaries of the original image.
Maybe the padding is making it so ?
You should print out the size of the original image, and the size of the rect_min to find out.
Related
I am trying to detect and crop circullar/elliptical shapes of different sizes.
This is an example of an image I am trying to do the detection and croping.
Input Image
The result I am trying to get in the aforementioned image is 3 cropped images
looking like this:
segmented part 1, segmented part 2, segmented part 3
Another image could look like this: different image
Just like the previous image, I am trying to do the same to this one.
The shapes are dramatically smaller from the first one.
Can this be achieved algorithmically or should I look for a machine learning-like solution?
Note: The final image has been applied by the following filters: Gaussian Blur, Grayscale, Threshold, Contour and Morphological Dilation.
[EDIT]
The code I have written(not working as intended):
findReference() finds a shape in the middle of the image and returns its rectangle.
private Rect findReference(Mat inputImage) {
// clone the image
Mat original = inputImage.clone();
// find the center of the image
double[] centers = {(double)inputImage.width()/2, (double)inputImage.height()/2};
Point image_center = new Point(centers);
// finding the contours
ArrayList<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(inputImage, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
// finding best bounding rectangle for a contour whose distance is closer to the image center that other ones
double d_min = Double.MAX_VALUE;
Rect rect_min = new Rect();
for (MatOfPoint contour : contours) {
Rect rec = Imgproc.boundingRect(contour);
// find the best candidates
if (rec.height > inputImage.height()/2 & rec.width > inputImage.width()/2){
continue;
}
Point pt1 = new Point((double)rec.x, (double)rec.y);
Point center = new Point(rec.x+(double)(rec.width)/2, rec.y + (double)(rec.height)/2);
double d = Math.sqrt(Math.pow((double)(pt1.x-image_center.x),2) + Math.pow((double)(pt1.y -image_center.y), 2));
if (d < d_min)
{
d_min = d;
rect_min = rec;
}
}
// showReference( rect_min, original);
return rect_min;
}
I use the rectangle as reference and create a bigger one and a smaller one, so that similar shapes fit in the dimensions of the smaller and bigger rectangle.
findAllEllipses() tries to find similar shapes fitting in the smaller and bigger rectangles. After that it draws ellipses around the found shapes.
private Mat findAllEllipses(Rect referenceRect, Mat inputImage) {
float per = 0.5f;
float perSquare = 0.05f;
Rect biggerRect = new Rect();
Rect smallerRect = new Rect();
biggerRect.width = (int) (referenceRect.width / per);
biggerRect.height = (int) (referenceRect.height / per);
smallerRect.width = (int) (referenceRect.width * per);
smallerRect.height = (int) (referenceRect.height * per);
System.out.println("reference rectangle height: " + referenceRect.height + " width: " + referenceRect.width);
System.out.println("[" + 0 +"]: biggerRect.height: " + biggerRect.height + " biggerRect.width: " + biggerRect.width);
System.out.println("[" + 0 +"]: smallerRect.height: " + smallerRect.height + " smallerRect.width: " + smallerRect.width);
//Finding Contours
List<MatOfPoint> contours = new ArrayList<>();
Mat hierarchey = new Mat();
Imgproc.findContours(inputImage, contours, hierarchey, Imgproc.RETR_TREE,
Imgproc.CHAIN_APPROX_SIMPLE);
System.out.println("the numbers of found contours is: " + contours.size());
int sum = 0;
//Empty rectangle
RotatedRect[] rec = new RotatedRect[contours.size()];
for (int i = 0; i < contours.size(); i++) {
rec[i] = new RotatedRect();
if(contours.get(i).toArray().length >= 5 ){
Rect foundRect = Imgproc.boundingRect(contours.get(i));
// Rect foundBigger = new Rect();
// Rect foundSmaller = new Rect();
//
// foundBigger.width = (int) (foundBigger.width + foundBigger.width * per);
// foundBigger.height = (int) (foundBigger.height + foundBigger.height * per);
//
// foundSmaller.width = (int) (foundRect.width - foundRect.width * per);
// foundSmaller.height = (int) (foundRect.height - foundRect.height * per);
if (
(biggerRect.height >= foundRect.height && biggerRect.width >= foundRect.width)
&& (smallerRect.height <= foundRect.height && smallerRect.width <= foundRect.width)
&& (((foundRect.width - foundRect.width * perSquare) <= foundRect.height) && ((foundRect.width + foundRect.width * perSquare) >= foundRect.height))
&& (((foundRect.height - foundRect.height * perSquare) <= foundRect.width) && ((foundRect.height + foundRect.height * perSquare) >= foundRect.width))
) {
System.out.println("[" + i +"]: foundRect.width: " + foundRect.width + " foundRect.height: " + foundRect.height);
System.out.println("----------------");
rec[i] = Imgproc.fitEllipse(new MatOfPoint2f(contours.get(i).toArray()));
sum++;
}
}
Scalar color_elli = new Scalar(190, 0, 0);
Imgproc.ellipse(inputImage, rec[i], color_elli, 5);
}
System.out.println("found ellipses: " + sum);
// trytest(ImageUtils.doResizeMat(outputImage),0,0);
return inputImage;
}
Unfortuantelly there are several variables that are hardcoded into the method.
This is used to make the smaller and bigger rectangles (used as a percentage)
float per = 0.5f;
perSquare is used to get shapes closer to a square (fluctuated width height)
float perSquare = 0.05f;
This code might work in some images, while on others will not find a single shape, like I mentioned the shapes are circullar/elliptical and of different sizes.
I want to find with OpenCV first red pixel and cut rest of picture on right of it.
For this moment I wrote this code, but it work very slow:
int firstRedPixel = mat.Cols();
int len = 0;
for (int x = 0; x < mat.Rows(); x++)
{
for (int y = 0; y < mat.Cols(); y++)
{
double[] rgb = mat.Get(x, y);
double r = rgb[0];
double g = rgb[1];
double b = rgb[2];
if ((r > 175) && (r > 2 * g) && (r > 2 * b))
{
if (len == 3)
{
firstRedPixel = y - len;
break;
}
len++;
}
else
{
len = 0;
}
}
}
Any solutions?
You can:
1) find red pixels (see here)
2) get the bounding box of red pixels
3) crop your image
The code is in C++, but it's only OpenCV functions so it should not be difficult to port to Java:
#include <opencv2\opencv.hpp>
int main()
{
cv::Mat3b img = cv::imread("path/to/img");
// Find red pixels
// https://stackoverflow.com/a/32523532/5008845
cv::Mat3b bgr_inv = ~img;
cv::Mat3b hsv_inv;
cv::cvtColor(bgr_inv, hsv_inv, cv::COLOR_BGR2HSV);
cv::Mat1b red_mask;
inRange(hsv_inv, cv::Scalar(90 - 10, 70, 50), cv::Scalar(90 + 10, 255, 255), red_mask); // Cyan is 90
// Get the rect
std::vector<cv::Point> red_points;
cv::findNonZero(red_mask, red_points);
cv::Rect red_area = cv::boundingRect(red_points);
// Show green rectangle on red area
cv::Mat3b out = img.clone();
cv::rectangle(out, red_area, cv::Scalar(0, 255, 0));
// Define the non red area
cv::Rect not_red_area;
not_red_area.x = 0;
not_red_area.y = 0;
not_red_area.width = red_area.x - 1;
not_red_area.height = img.rows;
// Crop away red area
cv::Mat3b result = img(not_red_area);
return 0;
}
This is not the way to work with computer vision. I know this, because I did it the same way.
One way to achieve your goal would be to use template matching with a red bar that you cut out of your image, and thus locate the red border, and cut it away.
Another would be to transfer to HSV space, filter out red content, and use contour finding to locate a large red structure, as you need it.
There are plenty of ways to do this. Looping yourself over pixel-values rarely is the right approach though, and you won't take advantage of sophisticated vectorisation or algorithms that way.
i have a problem with character segmentation from water meter,
i have a image water meter
Then i have threshold this image after threshold
And i have get a register meter after segmentation
My problem is to get each character from register meter..
Here my code:
System.out.println("Character Segmentation the image at " + path + "... ");
// get the jpeg image from the internal resource folder
Mat image = MatImageFromPath;
// convert the image in gray scale
Imgproc.cvtColor(image, image, Imgproc.COLOR_BGR2GRAY);
// thresholding the image to make a binary image
Imgproc.threshold(image, image, 100, 255, Imgproc.THRESH_BINARY_INV);
// finding the contours
ArrayList<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(image, contours, hierarchy, Imgproc.RETR_TREE, Imgproc.CHAIN_APPROX_SIMPLE);
Imgproc.drawContours(image, contours, 3, new Scalar(0, 255, 0),3);
// finding best bounding rectangle for a contour whose distance is closer to the image center that other ones
Rect rect = new Rect();
ArrayList<Rect> contourRects = new ArrayList<Rect>();
Point p1 = new Point(rect.x,rect.y);
Point p2 = new Point((rect.x+rect.width),(rect.y+rect.height));
int i=0;
for (MatOfPoint contour : contours) {
rect = Imgproc.boundingRect(contour);
contourRects.add(rect);
contour2f = new MatOfPoint2f( contours.get(i).toArray() );
i++;
}
Collections.sort(contourRects, new Comparator<Rect>(){
#Override
public int compare(Rect o1, Rect o2){
return o1.x-o2.x;
}
});
for (int j = 0; j <= contourRects.size()-1; j++) {
if ((contourRects.get(j).width >= 50 && contourRects.get(j).width <= 120) &&
(contourRects.get(j).height >= 150 && contourRects.get(j).height <= 210)) {
System.out.println(contourRects.get(j).width);
Mat result = image.submat(contourRects.get(j));
Imgcodecs.imwrite("BoundingBox/"+(j+1)+".png", result);
// write the new image on disk
path = "BoundingBox/"+(j+1)+".png";
} else {
continue;
}
}
Imgcodecs.imwrite("BoundingBox/draw.png", image);
label2.setIcon(ImageFromPath(path));
All images at : images
can anyone help my problem? Thanks in advance
This is my first question on Stackoverflow.
I'm a software engineer in profession(Java, C#) and I have 0 knowledge on image processing and Android related technologies. I'm writing an android application for my masters thesis to support visually impaired people in my country to read a document from their Android smartphones, in our native language.
I have selected the sample size of the document as A4, and the app should eventually automatically focus on the document once the whole A4 doc is in camera's view (audible notification should be given to user), and it should then capture that image.
Then I plan to run the document through tesseract engine to convert it into OCR. (Some other guy is doing the text-to-speech part of this application)
I googled thorough couple of applications and came up with the documentation of OpenCV. The http://docs.opencv.org/opencv_tutorials.pdf explains something about "Creating Bounding boxes and circles for contours" and looks like its gonna be my life saver.
My MSC project is a 300 hour part time project so I fear that I will get ended up with nothing after spending time to convert C++/ Python examples to Java by myself to learn OpenCV. I went through the JavaCV as well, but looks like its still in a growing stage, so most probably I will have to convert the examples by myself.
What I wanted to ask from experts is that whether the OpenCV can really do a thing like this?
Thanks in advance!
Edit. I had a look at the link on the comment and trying to port the C++ example to Java. Here is what I got so far. Still there are couple of things to do though...
int thresh = 50, N = 11;
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
static double angle(Point pt1, Point pt2, Point pt0)
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/Math.sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
public void find_squares(Mat image, Vector<Vector<Point> > squares)
{
Imgproc img = new Imgproc();
// blur will enhance edge detection
org.opencv.core.Mat blurred = new org.opencv.core.Mat();
Imgproc.medianBlur(image, blurred, 9);
Mat gray0 = new Mat(blurred.size(), CvType.CV_8U);
Mat gray = new Mat();
// Vector<Vector<Point> > contours;
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
// Core.mixChannels(blurred, 1, gray0, 1, ch, 1);
List<Mat> src = new ArrayList<Mat>();
src.add(blurred);
List<Mat> dest = new ArrayList<Mat>();
dest.add(gray0);
MatOfInt a = new MatOfInt(ch);
Core.mixChannels(src, dest, a);
// try several threshold levels
final int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Imgproc.Canny(gray0, gray, 10, 20, 3, false);
// Dilate helps to remove potential holes between edge segments
Point point = new Point(-1, -1);
Imgproc.dilate(gray, gray, new Mat(), point, 1);
}
else
{
// TODO
// gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list. //TODO
Imgproc.findContours(gray, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
// Test contours
MatOfPoint2f approx = new MatOfPoint2f();
for (int i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
double epilson = Imgproc.arcLength(new MatOfPoint2f(contours.get(i)), true);
epilson *= 0.02;
Imgproc.approxPolyDP(new MatOfPoint2f(contours.get(i)), approx, epilson, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
// Mat mmm = new Mat();
// MatOfPoint ppp = new MatOfPoint();
if (/*TODO*/approx.size().area() == 4 &&
Math.abs(Imgproc.contourArea(approx)) > 1000 &&
Imgproc.isContourConvex(/*TODO*/approx))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = Math.abs(angle(approx[j % 4], approx[j - 2], approx[j - 1]));
maxCosine = /*TODO*/MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares./*TODO*/push_back(approx);
}
}
}
}
}
}
Just to answer the question, Yes, this can be done in OpenCv (among many other things) and I have completed the project that I explained in the question. Also voted up Abid's answer for the link he provided:)
I'm developing a shape identification project using JavaCV and I have found some OpenCV code to identify convex shapes in a particular threshold image. I have tried to convert it from openCV to javaCV. But when i run the code, it stucked in the while look checking for contours. Anyone can help to find the error? Thanks.
IplImage detectThrs = getThresholdImage(img);
System.err.println("Stucked!!");
CvMemStorage storage = CvMemStorage.create();
CvSeq contour = new CvSeq(null);
cvFindContours(detectThrs, storage, contour, Loader.sizeof(CvContour.class), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
System.err.println("Stucked 2 !!");
CvSeq approx = null;
CvRect bound = null;
CvRect boundMax = null;
CvRect pos = null;
while (contour != null && !contour.isNull()) {
System.err.println("Stucked in while!!");
bound = cvBoundingRect(contour, 0);
approx = cvApproxPoly(contour, Loader.sizeof(CvContour.class), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contour) * 0.02, 0);
if(cvCheckContourConvexity(approx) != 0){
if(bound.width() > 4 || bound.height() > 4){
cvRectangle(
detectThrs,
cvPoint(bound.x(), bound.y()),
cvPoint(bound.x()+bound.width(), bound.y()+bound.height()),
CvScalar.RED,4,
CV_AA, 0
);//2 is thickness CV_AA is type of line 0 means no shift
//cvSaveImage(++ii + "colorMaskTest.jpg", detectThrs);
paint(img, bound.x(), bound.y());
}
}
}
contour = contour.h_next();
}