I have used find contours and boundingrect and display it at my project. then I want to find the largest contours and display it. Is this possible? I am newbie to OpenCV java lang.
heres my code so far:
#Override
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat(height, width, CvType.CV_8UC4);
mHsv = new Mat(height,width,CvType.CV_8UC3);
hierarchy = new Mat();
mHsvMask = new Mat();
mDilated = new Mat();
mEroded = new Mat();
}
#Override
public void onCameraViewStopped() {
mRgba.release();
mHsv.release();
mHsvMask.release();
mDilated.release();
hierarchy.release();
}
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
mRgba =inputFrame.rgba();
contours = new ArrayList<MatOfPoint>();
hierarchy =new Mat();
mHsv = new Mat();
mHsvMask =new Mat();
Imgproc.cvtColor(mRgba, mHsv, Imgproc.COLOR_RGB2HSV);
Scalar lowerThreshold = new Scalar ( 0, 0, 0 ); // Blue color – lower hsv values
Scalar upperThreshold = new Scalar ( 179, 255, 10 ); // Blue color – higher hsv values
Core.inRange ( mHsv, lowerThreshold , upperThreshold, mHsvMask );
//just some filter
//Imgproc.dilate ( mHsvMask, mDilated, new Mat() );
//Imgproc.erode(mDilated,mEroded,new Mat());
Imgproc.findContours(mHsvMask, contours, hierarchy, Imgproc.RETR_TREE, Imgproc.CHAIN_APPROX_SIMPLE);
for ( int contourIdx=0; contourIdx < contours.size(); contourIdx++ )
{
//Minimun size allowed for consideration
MatOfPoint2f approxCurve = new MatOfPoint2f();
MatOfPoint2f contour2f = new MatOfPoint2f(contours.get(contourIdx).toArray());
//Processing on mMOP2f1 which is in type MatOfPoint2f
double approxDistance = Imgproc.arcLength(contour2f,true)*0.02;
Imgproc.approxPolyDP(contour2f,approxCurve,approxDistance,true);
//convert to MatofPoint
MatOfPoint point = new MatOfPoint(approxCurve.toArray());
//get boundingrect from contour
Rect rect = Imgproc.boundingRect(point);
Imgproc.rectangle(mRgba,new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(255, 0, 0, 255),3);
//bisa Imgproc.rectangle(mRgba, rect.tl(), rect.br(), new Scalar(255, 0, 0),1, 8,0);
//show contour kontur
if(Imgproc.contourArea(contours.get(contourIdx))>100) {
Imgproc.drawContours(mRgba, contours, contourIdx, new Scalar(0,255,0), 5);
}
}
return mRgba;
Hopefully, someone has some experience in this. Thanks..
With function Imgproc.contourArea you can just simply find the areas of all of your contours and the contour with the largest area would simply be the largest one.
Code to draw the largest contour would be like this:
double maxVal = 0;
int maxValIdx = 0;
for (int contourIdx = 0; contourIdx < contours.size(); contourIdx++)
{
double contourArea = Imgproc.contourArea(contours.get(contourIdx));
if (maxVal < contourArea)
{
maxVal = contourArea;
maxValIdx = contourIdx;
}
}
Imgproc.drawContours(mRgba, contours, maxValIdx, new Scalar(0,255,0), 5);
Related
I am developing an Android application and I'm processing frames using Opencv in real time.
The image processing is heavy as I'm detecting the largest contour, rotating and cropping it and apply some sort of segmentation on the cropped Mat object.
The problem is that FPS start with a value of 3 and drops to 1.
I'm not doing some heavy operations like matToBitmap and I'm not making native calls yet.
It's my first big project and I don't have much experience in Opencv. What can I do to increase my FPS?
Here is my code:
long e1 = Core.getTickCount();
Mat mGray = new Mat();
MatOfDouble mu = new MatOfDouble();
MatOfDouble stddev = new MatOfDouble();
Imgproc.cvtColor(origMat, origMat, Imgproc.COLOR_BGRA2BGR);
Imgproc.cvtColor(origMat, mGray, Imgproc.COLOR_BGR2GRAY);
Core.meanStdDev(mGray, mu, stddev);
Imgproc.GaussianBlur(mGray, mGray, new Size(5, 5), 5);
Imgproc.Canny(mGray, mGray, (mu.get(0, 0)[0]) * 0.66, (mu.get(0, 0)[0]) * 1.33, 3, false);
Mat kernell = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9, 9));
Imgproc.morphologyEx(mGray, mGray, Imgproc.MORPH_CLOSE, kernell);
Imgproc.dilate(mGray, mGray, Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3)));
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(mGray, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
double largest_area = 0;
Rect rect = new Rect();
int largest_idx = 0;
for (int idx = 0; idx < contours.size(); idx++) {
double a = Imgproc.contourArea(contours.get(idx)); //Find the area of contour
if (a > largest_area) {
largest_area = a;
largest_idx = idx;
}
}
if (contours.size() > 0) {
MatOfPoint2f new_mat = new MatOfPoint2f(contours.get(largest_idx).toArray());
RotatedRect rbox = Imgproc.minAreaRect(new_mat);
Point vertices[] = new Point[4];
rbox.points(vertices);
List<MatOfPoint> boxContours = new ArrayList<>();
boxContours.add(new MatOfPoint(vertices));
for (int i = 0; i < 4; ++i) {
Imgproc.line(origMat, vertices[i], vertices[(i + 1) % 4], new Scalar(255, 0, 0));
}
double rect_angle = rbox.angle - 90.0f;
Size rect_size = rbox.size;
double d = rect_size.width;
rect_size.width = rect_size.height;
rect_size.height = d;
M = Imgproc.getRotationMatrix2D(rbox.center, rect_angle, 1.0);
Imgproc.warpAffine(origMat, rotated, M, origMat.size());
if (rect_size.width > 70 && rect_size.height > 70)
Imgproc.getRectSubPix(rotated, new Size(rect_size.width - 70, rect_size.height - 70), rbox.center, rotated);
Imgproc.resize(rotated, rotated, origMat.size());
Mat orr = rotated.clone();
Imgproc.cvtColor(orr, orr, Imgproc.COLOR_RGB2HSV);
Core.split(orr, channels);
orr = channels.get(2);
CLAHE clahe = Imgproc.createCLAHE();
clahe.setClipLimit(1);
clahe.apply(orr, orr);
Imgproc.GaussianBlur(orr, orr, new Size(5, 5), 5);
Core.meanStdDev(mGray, mu, stddev);
Imgproc.Canny(orr, orr, (mu.get(0, 0)[0]) * 0.66, (mu.get(0, 0)[0]) * 1.33, 3, false);
Imgproc.morphologyEx(orr, orr, Imgproc.MORPH_CLOSE, kernell);
Imgproc.dilate(orr, orr, Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3)));
Imgproc.findContours(orr, contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
for (int i = contours.size() - 1; i >= 0; i--) {
double area = Imgproc.contourArea(contours.get(i), false); // Find the area of contour
if (area < min_area)
contours.remove(i);
}
if (contours.size() > 0) {
MatOfPoint2f approxCurve = new MatOfPoint2f();
rectList.clear();
for (int idx = 0; idx < contours.size(); idx++) {
//Convert contours(i) from MatOfPoint to MatOfPoint2f
MatOfPoint2f contour2f = new MatOfPoint2f(contours.get(idx).toArray());
//Processing on mMOP2f1 which is in type MatOfPoint2f
double approxDistance = Imgproc.arcLength(contour2f, true) * 0.02;
Imgproc.approxPolyDP(contour2f, approxCurve, approxDistance, true);
//Convert back to MatOfPoint
MatOfPoint points = new MatOfPoint(approxCurve.toArray());
// Get bounding rect of contour
Rect rectt = Imgproc.boundingRect(points);
rectList.add(rect);
Mat miniature = new Mat(orr, new Rect(rectt.tl(), rectt.br()));
mats.add(miniature);
// draw enclosing rectangle (all same color, but you could use variable i to make them unique)
Imgproc.rectangle(rotated, rectt.tl(), rectt.br(), new Scalar(255, 0, 0));
}
}
long e2 = Core.getTickCount();
long e = e2 - e1;
double time = e / Core.getTickFrequency();
Log.d("timeTAG", "" + time);
return orr;
}
return rotated;
I am performing Canny edge detector using Android and Opencv on an image to detect the largest contour, extract it using warpPerspective method then find all objects inside that contour. Everything is working as expected but only for image that isn't rotated.
I am using boundingRect to get the contour and use its coordinates to extract it.
Here my code:
private Mat detectLargestContour(Mat origMat) {
// long e1 = Core.getTickCount();
Mat mGray = new Mat();
MatOfDouble mu = new MatOfDouble();
MatOfDouble stddev = new MatOfDouble();
Imgproc.cvtColor(origMat, mGray, Imgproc.COLOR_BGR2GRAY);
Core.meanStdDev(mGray, mu, stddev);
Imgproc.GaussianBlur(mGray, mGray, new Size(5, 5), 5);
//Imgproc.Canny(mGray, mGray, 30, 80, 3, false); //FOR HIGH BRIGHTNESS
//Imgproc.Canny(mGray, mGray, 50, 130, 3, false); // FOR LOW BRIGHTNESS
Imgproc.Canny(mGray, mGray, mu.get(0, 0)[0], stddev.get(0, 0)[0], 3, false);
Mat kernell = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9,9));
Imgproc.morphologyEx(mGray, mGray, Imgproc.MORPH_CLOSE, kernell);
Imgproc.dilate(mGray, mGray, Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3)));
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(mGray, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
//MatOfPoint2f approxCurve = new MatOfPoint2f();
double largest_area=0;
Rect rect = new Rect();
for (int idx = 0; idx < contours.size() ; idx++) {
double a = Imgproc.contourArea(contours.get(idx)); //Find the area of contour
if (a > largest_area) {
largest_area = a;
rect = Imgproc.boundingRect(contours.get(idx));
}
}
if (rect.area() > 100000) {
Imgproc.rectangle(origMat, rect.tl(), rect.br(), new Scalar(0, 255, 0));
p1 = new Point(rect.tl().x, rect.tl().y);
p2 = new Point(rect.tl().x + rect.width, rect.tl().y);
p3 = new Point(rect.tl().x, rect.tl().y + rect.height);
p4 = new Point(rect.tl().x + rect.width, rect.tl().y + rect.height);
card_corners = new ArrayList<>();
card_corners.add(p1);
card_corners.add(p3);
card_corners.add(p4);
card_corners.add(p2);
warpedCard = new Mat(origMat.rows(), origMat.cols(), CvType.CV_8UC3);
final Point p1 = new Point(warpedCard.cols() + marge, warpedCard.rows() + marge);
final Point p2 = new Point(0 - marge, warpedCard.rows() + marge);
final Point p3 = new Point(0 - marge, 0 - marge);
final Point p4 = new Point(warpedCard.cols() + marge, 0 - marge);
LinkedList<Point> sceneList = new LinkedList<Point>();
sceneList.addLast(p4);
sceneList.addLast(p3);
sceneList.addLast(p2);
sceneList.addLast(p1);
MatOfPoint2f scene = new MatOfPoint2f();
scene.fromList(sceneList);
MatOfPoint2f obj = new MatOfPoint2f();
obj.fromList(card_corners);
Mat homography = Calib3d.findHomography(obj, scene);
Imgproc.warpPerspective(origMat, warpedCard, homography, new Size(warpedCard.cols(), warpedCard.rows()));
return warpedCard;
}
return origMat;
}
It's weird but only boundingRect gave me a stable and performant result but the drawn rectangle doesn't rotate with the found contour.
How can I manage to resolve this issue? Any thoughts?
EDIT:
I changed boundingRect with minAreaRect.
Here is the code
int largest_idx = 0;
for (int idx = 0; idx < contours.size() ; idx++) {
double a = Imgproc.contourArea(contours.get(idx)); //Find the area of contour
if (a > largest_area) {
largest_area = a;
// rect = Imgproc.boundingRect(contours.get(idx));
largest_idx = idx;
}
}
MatOfPoint2f new_mat = new MatOfPoint2f( contours.get(largest_idx).toArray() );
RotatedRect rbox = Imgproc.minAreaRect(new_mat);
Log.d("rotatedrect_angle", "" + rbox.angle);
Point points[] = new Point[4];
rbox.points(points);
for(int i=0; i<4; ++i){
Imgproc.line(origMat, points[i], points[(i+1)%4], new Scalar(255,255,255));
}
And here is what I've got:
As you can see the detection isn't as accurate as when I used boundingRect.
A Python demo to find and draw a rotatedRect:
# 2019/03/01
# https://stackoverflow.com/a/54942835/3547485
import numpy as np
import cv2
gray = cv2.imread("tmp.png", cv2.IMREAD_GRAYSCALE)
th, threshed = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY_INV)
cnts = cv2.findContours(threshed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
cnt = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
rbox = cv2.minAreaRect(cnt)
pts = cv2.boxPoints(rbox).astype(np.int32)
cv2.drawContours(img, [pts], -1, (0, 255, 0), 1, cv2.LINE_AA)
cv2.imwrite("dst.png", img)
Useful OpenCV functions(in Python) : cv2.minAreaRect, cv2.boxPoints, cv.2drawContours. You can find corresponding functions in Java.
I am trying to scan a MTG card using OpenCV on android. I have it to where I can detect the edges of the card and even draw an outline around it but am confused on how to extract just the card from the background and then exit from the camera preview. Here is my code so far:
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Mat result = new Mat();
Mat mask = new Mat();
List<MatOfPoint> contours = new ArrayList<>();
Mat hierarchy = new Mat();
mRgba = inputFrame.rgba();
Imgproc.Canny(mRgba, result, 40, 120);
Imgproc.GaussianBlur(result, result, new Size(9,9), 2, 2);
Imgproc.findContours(result, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE, new Point(0,0));
Imgproc.drawContours(mask, contours, -1, new Scalar(0, 255, 0), 1);
hierarchy.release();
for ( int contourIdx=0; contourIdx < contours.size(); contourIdx++ )
{
// Minimum size allowed for consideration
MatOfPoint2f approxCurve = new MatOfPoint2f();
MatOfPoint2f contour2f = new MatOfPoint2f( contours.get(contourIdx).toArray() );
//Processing on mMOP2f1 which is in type MatOfPoint2f
double approxDistance = Imgproc.arcLength(contour2f, true)*0.02;
Imgproc.approxPolyDP(contour2f, approxCurve, approxDistance, true);
//Convert back to MatOfPoint
MatOfPoint points = new MatOfPoint( approxCurve.toArray() );
// Get bounding rect of contour
Rect rect = Imgproc.boundingRect(points);
Imgproc.rectangle(mRgba, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(255, 0, 0, 255), 3);
}
Bitmap card = Bitmap.createBitmap(result.cols(), result.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(result, card);
return mRgba;
}
}
Here is an example of what it looks like when run the code. As you can see the card is outlined by a red rectangle now but how do I extract just whats in the rectangle, save it to a mat or bitmap and then exit the camera?
screenshot of outlined card
I didn't test it, but can't you just use...
return new Mat(mRgba, rect);
I am able to detect the Contour in an image using C++. However, I want to convert it in Android using java lang. So, i make it using android studio.
(partly because I am a JAVA newbie)
Then i found some problems, When i run it, i had a error problems with this code :
if(contours[contourIdx].size()>100)
in this main program.
#Override
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat(height, width, CvType.CV_8UC4);
mHsv = new Mat(height,width,CvType.CV_8UC3);
hierarchy = new Mat();
mHsvMask = new Mat();
mDilated = new Mat(height, width, CvType.CV_8UC4);
}
#Override
public void onCameraViewStopped() {
mRgba.release();
mHsv.release();
mHsvMask.release();
mDilated.release();
hierarchy.release();
}
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
mRgba =inputFrame.rgba();
mHsv = new Mat();
Imgproc.cvtColor(mRgba, mHsv, Imgproc.COLOR_RGB2HSV, 3);
hierarchy.release();
Scalar lowerThreshold = new Scalar ( 120, 100, 100 );
Scalar upperThreshold = new Scalar ( 179, 255, 255 );
Core.inRange ( mHsv, lowerThreshold , upperThreshold, mHsvMask );
Imgproc.dilate ( mHsvMask, mDilated, new Mat() );
Imgproc.findContours(mDilated, contours, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
for ( int contourIdx=0; contourIdx < contours.size(); contourIdx++ )
{
if(contours[contourIdx].size()>100) // Minimum size allowed for consideration
{
Imgproc.drawContours ( mRgba, contours, contourIdx, new Scalar(0,255,0) , -5);
}
}
return mRgba;
}
}
Hopefully, someone has some experience in this. Thanks..
Use the area concept
if(Imgproc.contourArea(contours.get(contourIdx))>100) {
Imgproc.drawContours(mRgba, contours, contourIdx, new Scalar(Math.random() * 255, Math.random() * 255, Math.random() * 255), 1, 8, hierarchy, 0, new Point());
}
So i'm completely new to OpenCV and for the past 2-3 days i've searched a lot about how to use OpenCV in Java and Android studio to perform perspective correction and detect biggest rectangle in bitmaps and based on my searches i have done some work but the result bitmap is not correct.I'm sure i've done a lot of things wrong so it would be great if someone help me.
Thanks in advance for your help.
public void onPictureTaken(byte[] data, Camera camera)
{
Bitmap myImage = BitmapFactory.decodeByteArray(data, 0, data.length);
Mat matImage = new Mat(myImage.getHeight(),myImage.getWidth(), CvType.CV_8UC3);
Bitmap myBitmap32 = myImage.copy(Bitmap.Config.ARGB_8888, true);
Utils.bitmapToMat(myBitmap32, matImage);
correctPerspective(matImage);
}
public static void correctPerspective(Mat imgSource)
{
// convert the image to black and white does (8 bit)
Imgproc.Canny(imgSource.clone(), imgSource, 50, 50);
// apply gaussian blur to smoothen lines of dots
Imgproc.GaussianBlur(imgSource, imgSource, new org.opencv.core.Size(5, 5), 5);
// find the contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(imgSource, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
double maxArea = -1;
MatOfPoint temp_contour = contours.get(0);
// index 0 for starting
// point
MatOfPoint2f approxCurve = new MatOfPoint2f();
for (int idx = 0; idx < contours.size(); idx++) {
temp_contour = contours.get(idx);
double contourarea = Imgproc.contourArea(temp_contour);
// compare this contour to the previous largest contour found
if (contourarea > maxArea) {
// check if this contour is a square
MatOfPoint2f new_mat = new MatOfPoint2f(temp_contour.toArray());
int contourSize = (int) temp_contour.total();
MatOfPoint2f approxCurve_temp = new MatOfPoint2f();
Imgproc.approxPolyDP(new_mat, approxCurve_temp, contourSize * 0.05, true);
if (approxCurve_temp.total() == 4) {
maxArea = contourarea;
approxCurve = approxCurve_temp;
}
}
}
Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BayerBG2RGB);
double[] temp_double;
temp_double = approxCurve.get(0, 0);
Point p1 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve.get(1, 0);
Point p2 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve.get(2, 0);
Point p3 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve.get(3, 0);
Point p4 = new Point(temp_double[0], temp_double[1]);
List<Point> source = new ArrayList<Point>();
source.add(p1);
source.add(p2);
source.add(p3);
source.add(p4);
Mat startM = Converters.vector_Point2f_to_Mat(source);
Mat result = warp(imgSource, startM);
//Saving into bitmap
Bitmap resultBitmap = Bitmap.createBitmap(result.cols(), result.rows(),Bitmap.Config.ARGB_8888);;
Mat tmp = new Mat (result.cols(), result.rows(), CvType.CV_8U, new Scalar(4));
Imgproc.cvtColor(result, tmp, Imgproc.COLOR_RGB2BGRA);
Utils.matToBitmap(tmp, resultBitmap);
}
public static Mat warp(Mat inputMat, Mat startM)
{
int resultWidth = 1200;
int resultHeight = 680;
Point ocvPOut4 = new Point(0, 0);
Point ocvPOut1 = new Point(0, resultHeight);
Point ocvPOut2 = new Point(resultWidth, resultHeight);
Point ocvPOut3 = new Point(resultWidth, 0);
if (inputMat.height() > inputMat.width())
{
ocvPOut3 = new Point(0, 0);
ocvPOut4 = new Point(0, resultHeight);
ocvPOut1 = new Point(resultWidth, resultHeight);
ocvPOut2 = new Point(resultWidth, 0);
}
Mat outputMat = new Mat(resultWidth, resultHeight, CvType.CV_8UC4);
List<Point> dest = new ArrayList<Point>();
dest.add(ocvPOut1);
dest.add(ocvPOut2);
dest.add(ocvPOut3);
dest.add(ocvPOut4);
Mat endM = Converters.vector_Point2f_to_Mat(dest);
Mat perspectiveTransform = Imgproc.getPerspectiveTransform(startM, endM);
Imgproc.warpPerspective(inputMat, outputMat, perspectiveTransform, new Size(resultWidth, resultHeight), Imgproc.INTER_CUBIC);
return outputMat;
}