Auto perspective correction using OpenCV and Java - java

I'm looking at perspective correction for images taken from an Android camera, similar as defined in this question posted earlier. However, I'm not able to get my hands on proper working Java code.
If anyone has made it work on Java, please help.

I found an answer to my query here. Have put little refined code snippet below:
public static void correctPerspective() {
String fileName = "IMG_20141024_132131.jpg";
Mat imgSource = Highgui.imread(fileName, Highgui.CV_LOAD_IMAGE_UNCHANGED);
// convert the image to black and white does (8 bit)
Imgproc.Canny(imgSource.clone(), imgSource, 50, 50);
// apply gaussian blur to smoothen lines of dots
Imgproc.GaussianBlur(imgSource, imgSource, new org.opencv.core.Size(5, 5), 5);
// find the contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(imgSource, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
double maxArea = -1;
MatOfPoint temp_contour = contours.get(0); // the largest is at the
// index 0 for starting
// point
MatOfPoint2f approxCurve = new MatOfPoint2f();
for (int idx = 0; idx < contours.size(); idx++) {
temp_contour = contours.get(idx);
double contourarea = Imgproc.contourArea(temp_contour);
// compare this contour to the previous largest contour found
if (contourarea > maxArea) {
// check if this contour is a square
MatOfPoint2f new_mat = new MatOfPoint2f(temp_contour.toArray());
int contourSize = (int) temp_contour.total();
MatOfPoint2f approxCurve_temp = new MatOfPoint2f();
Imgproc.approxPolyDP(new_mat, approxCurve_temp, contourSize * 0.05, true);
if (approxCurve_temp.total() == 4) {
maxArea = contourarea;
approxCurve = approxCurve_temp;
}
}
}
Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BayerBG2RGB);
Mat sourceImage = Highgui.imread(fileName, Highgui.CV_LOAD_IMAGE_UNCHANGED);
double[] temp_double;
temp_double = approxCurve.get(0, 0);
Point p1 = new Point(temp_double[0], temp_double[1]);
// Core.circle(imgSource,p1,55,new Scalar(0,0,255));
// Imgproc.warpAffine(sourceImage, dummy, rotImage,sourceImage.size());
temp_double = approxCurve.get(1, 0);
Point p2 = new Point(temp_double[0], temp_double[1]);
// Core.circle(imgSource,p2,150,new Scalar(255,255,255));
temp_double = approxCurve.get(2, 0);
Point p3 = new Point(temp_double[0], temp_double[1]);
// Core.circle(imgSource,p3,200,new Scalar(255,0,0));
temp_double = approxCurve.get(3, 0);
Point p4 = new Point(temp_double[0], temp_double[1]);
// Core.circle(imgSource,p4,100,new Scalar(0,0,255));
List<Point> source = new ArrayList<Point>();
source.add(p1);
source.add(p2);
source.add(p3);
source.add(p4);
Mat startM = Converters.vector_Point2f_to_Mat(source);
Mat result = warp(sourceImage, startM);
Highgui.imwrite("corrected.jpg", result);
}
public static Mat warp(Mat inputMat, Mat startM) {
int resultWidth = 1200;
int resultHeight = 680;
Point ocvPOut4 = new Point(0, 0);
Point ocvPOut1 = new Point(0, resultHeight);
Point ocvPOut2 = new Point(resultWidth, resultHeight);
Point ocvPOut3 = new Point(resultWidth, 0);
if (inputMat.height() > inputMat.width()) {
// int temp = resultWidth;
// resultWidth = resultHeight;
// resultHeight = temp;
ocvPOut3 = new Point(0, 0);
ocvPOut4 = new Point(0, resultHeight);
ocvPOut1 = new Point(resultWidth, resultHeight);
ocvPOut2 = new Point(resultWidth, 0);
}
Mat outputMat = new Mat(resultWidth, resultHeight, CvType.CV_8UC4);
List<Point> dest = new ArrayList<Point>();
dest.add(ocvPOut1);
dest.add(ocvPOut2);
dest.add(ocvPOut3);
dest.add(ocvPOut4);
Mat endM = Converters.vector_Point2f_to_Mat(dest);
Mat perspectiveTransform = Imgproc.getPerspectiveTransform(startM, endM);
Imgproc.warpPerspective(inputMat, outputMat, perspectiveTransform, new Size(resultWidth, resultHeight), Imgproc.INTER_CUBIC);
return outputMat;
}

Related

How to increase Android Opencv camera FPS?

I am developing an Android application and I'm processing frames using Opencv in real time.
The image processing is heavy as I'm detecting the largest contour, rotating and cropping it and apply some sort of segmentation on the cropped Mat object.
The problem is that FPS start with a value of 3 and drops to 1.
I'm not doing some heavy operations like matToBitmap and I'm not making native calls yet.
It's my first big project and I don't have much experience in Opencv. What can I do to increase my FPS?
Here is my code:
long e1 = Core.getTickCount();
Mat mGray = new Mat();
MatOfDouble mu = new MatOfDouble();
MatOfDouble stddev = new MatOfDouble();
Imgproc.cvtColor(origMat, origMat, Imgproc.COLOR_BGRA2BGR);
Imgproc.cvtColor(origMat, mGray, Imgproc.COLOR_BGR2GRAY);
Core.meanStdDev(mGray, mu, stddev);
Imgproc.GaussianBlur(mGray, mGray, new Size(5, 5), 5);
Imgproc.Canny(mGray, mGray, (mu.get(0, 0)[0]) * 0.66, (mu.get(0, 0)[0]) * 1.33, 3, false);
Mat kernell = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9, 9));
Imgproc.morphologyEx(mGray, mGray, Imgproc.MORPH_CLOSE, kernell);
Imgproc.dilate(mGray, mGray, Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3)));
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(mGray, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
double largest_area = 0;
Rect rect = new Rect();
int largest_idx = 0;
for (int idx = 0; idx < contours.size(); idx++) {
double a = Imgproc.contourArea(contours.get(idx)); //Find the area of contour
if (a > largest_area) {
largest_area = a;
largest_idx = idx;
}
}
if (contours.size() > 0) {
MatOfPoint2f new_mat = new MatOfPoint2f(contours.get(largest_idx).toArray());
RotatedRect rbox = Imgproc.minAreaRect(new_mat);
Point vertices[] = new Point[4];
rbox.points(vertices);
List<MatOfPoint> boxContours = new ArrayList<>();
boxContours.add(new MatOfPoint(vertices));
for (int i = 0; i < 4; ++i) {
Imgproc.line(origMat, vertices[i], vertices[(i + 1) % 4], new Scalar(255, 0, 0));
}
double rect_angle = rbox.angle - 90.0f;
Size rect_size = rbox.size;
double d = rect_size.width;
rect_size.width = rect_size.height;
rect_size.height = d;
M = Imgproc.getRotationMatrix2D(rbox.center, rect_angle, 1.0);
Imgproc.warpAffine(origMat, rotated, M, origMat.size());
if (rect_size.width > 70 && rect_size.height > 70)
Imgproc.getRectSubPix(rotated, new Size(rect_size.width - 70, rect_size.height - 70), rbox.center, rotated);
Imgproc.resize(rotated, rotated, origMat.size());
Mat orr = rotated.clone();
Imgproc.cvtColor(orr, orr, Imgproc.COLOR_RGB2HSV);
Core.split(orr, channels);
orr = channels.get(2);
CLAHE clahe = Imgproc.createCLAHE();
clahe.setClipLimit(1);
clahe.apply(orr, orr);
Imgproc.GaussianBlur(orr, orr, new Size(5, 5), 5);
Core.meanStdDev(mGray, mu, stddev);
Imgproc.Canny(orr, orr, (mu.get(0, 0)[0]) * 0.66, (mu.get(0, 0)[0]) * 1.33, 3, false);
Imgproc.morphologyEx(orr, orr, Imgproc.MORPH_CLOSE, kernell);
Imgproc.dilate(orr, orr, Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3)));
Imgproc.findContours(orr, contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
for (int i = contours.size() - 1; i >= 0; i--) {
double area = Imgproc.contourArea(contours.get(i), false); // Find the area of contour
if (area < min_area)
contours.remove(i);
}
if (contours.size() > 0) {
MatOfPoint2f approxCurve = new MatOfPoint2f();
rectList.clear();
for (int idx = 0; idx < contours.size(); idx++) {
//Convert contours(i) from MatOfPoint to MatOfPoint2f
MatOfPoint2f contour2f = new MatOfPoint2f(contours.get(idx).toArray());
//Processing on mMOP2f1 which is in type MatOfPoint2f
double approxDistance = Imgproc.arcLength(contour2f, true) * 0.02;
Imgproc.approxPolyDP(contour2f, approxCurve, approxDistance, true);
//Convert back to MatOfPoint
MatOfPoint points = new MatOfPoint(approxCurve.toArray());
// Get bounding rect of contour
Rect rectt = Imgproc.boundingRect(points);
rectList.add(rect);
Mat miniature = new Mat(orr, new Rect(rectt.tl(), rectt.br()));
mats.add(miniature);
// draw enclosing rectangle (all same color, but you could use variable i to make them unique)
Imgproc.rectangle(rotated, rectt.tl(), rectt.br(), new Scalar(255, 0, 0));
}
}
long e2 = Core.getTickCount();
long e = e2 - e1;
double time = e / Core.getTickFrequency();
Log.d("timeTAG", "" + time);
return orr;
}
return rotated;

How to draw a boundingRect with the right rotation angle by using OpenCV?

I am performing Canny edge detector using Android and Opencv on an image to detect the largest contour, extract it using warpPerspective method then find all objects inside that contour. Everything is working as expected but only for image that isn't rotated.
I am using boundingRect to get the contour and use its coordinates to extract it.
Here my code:
private Mat detectLargestContour(Mat origMat) {
// long e1 = Core.getTickCount();
Mat mGray = new Mat();
MatOfDouble mu = new MatOfDouble();
MatOfDouble stddev = new MatOfDouble();
Imgproc.cvtColor(origMat, mGray, Imgproc.COLOR_BGR2GRAY);
Core.meanStdDev(mGray, mu, stddev);
Imgproc.GaussianBlur(mGray, mGray, new Size(5, 5), 5);
//Imgproc.Canny(mGray, mGray, 30, 80, 3, false); //FOR HIGH BRIGHTNESS
//Imgproc.Canny(mGray, mGray, 50, 130, 3, false); // FOR LOW BRIGHTNESS
Imgproc.Canny(mGray, mGray, mu.get(0, 0)[0], stddev.get(0, 0)[0], 3, false);
Mat kernell = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9,9));
Imgproc.morphologyEx(mGray, mGray, Imgproc.MORPH_CLOSE, kernell);
Imgproc.dilate(mGray, mGray, Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3)));
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(mGray, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
//MatOfPoint2f approxCurve = new MatOfPoint2f();
double largest_area=0;
Rect rect = new Rect();
for (int idx = 0; idx < contours.size() ; idx++) {
double a = Imgproc.contourArea(contours.get(idx)); //Find the area of contour
if (a > largest_area) {
largest_area = a;
rect = Imgproc.boundingRect(contours.get(idx));
}
}
if (rect.area() > 100000) {
Imgproc.rectangle(origMat, rect.tl(), rect.br(), new Scalar(0, 255, 0));
p1 = new Point(rect.tl().x, rect.tl().y);
p2 = new Point(rect.tl().x + rect.width, rect.tl().y);
p3 = new Point(rect.tl().x, rect.tl().y + rect.height);
p4 = new Point(rect.tl().x + rect.width, rect.tl().y + rect.height);
card_corners = new ArrayList<>();
card_corners.add(p1);
card_corners.add(p3);
card_corners.add(p4);
card_corners.add(p2);
warpedCard = new Mat(origMat.rows(), origMat.cols(), CvType.CV_8UC3);
final Point p1 = new Point(warpedCard.cols() + marge, warpedCard.rows() + marge);
final Point p2 = new Point(0 - marge, warpedCard.rows() + marge);
final Point p3 = new Point(0 - marge, 0 - marge);
final Point p4 = new Point(warpedCard.cols() + marge, 0 - marge);
LinkedList<Point> sceneList = new LinkedList<Point>();
sceneList.addLast(p4);
sceneList.addLast(p3);
sceneList.addLast(p2);
sceneList.addLast(p1);
MatOfPoint2f scene = new MatOfPoint2f();
scene.fromList(sceneList);
MatOfPoint2f obj = new MatOfPoint2f();
obj.fromList(card_corners);
Mat homography = Calib3d.findHomography(obj, scene);
Imgproc.warpPerspective(origMat, warpedCard, homography, new Size(warpedCard.cols(), warpedCard.rows()));
return warpedCard;
}
return origMat;
}
It's weird but only boundingRect gave me a stable and performant result but the drawn rectangle doesn't rotate with the found contour.
How can I manage to resolve this issue? Any thoughts?
EDIT:
I changed boundingRect with minAreaRect.
Here is the code
int largest_idx = 0;
for (int idx = 0; idx < contours.size() ; idx++) {
double a = Imgproc.contourArea(contours.get(idx)); //Find the area of contour
if (a > largest_area) {
largest_area = a;
// rect = Imgproc.boundingRect(contours.get(idx));
largest_idx = idx;
}
}
MatOfPoint2f new_mat = new MatOfPoint2f( contours.get(largest_idx).toArray() );
RotatedRect rbox = Imgproc.minAreaRect(new_mat);
Log.d("rotatedrect_angle", "" + rbox.angle);
Point points[] = new Point[4];
rbox.points(points);
for(int i=0; i<4; ++i){
Imgproc.line(origMat, points[i], points[(i+1)%4], new Scalar(255,255,255));
}
And here is what I've got:
As you can see the detection isn't as accurate as when I used boundingRect.
A Python demo to find and draw a rotatedRect:
# 2019/03/01
# https://stackoverflow.com/a/54942835/3547485
import numpy as np
import cv2
gray = cv2.imread("tmp.png", cv2.IMREAD_GRAYSCALE)
th, threshed = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY_INV)
cnts = cv2.findContours(threshed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
cnt = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
rbox = cv2.minAreaRect(cnt)
pts = cv2.boxPoints(rbox).astype(np.int32)
cv2.drawContours(img, [pts], -1, (0, 255, 0), 1, cv2.LINE_AA)
cv2.imwrite("dst.png", img)
Useful OpenCV functions(in Python) : cv2.minAreaRect, cv2.boxPoints, cv.2drawContours. You can find corresponding functions in Java.

Cannot get warpPerspective to get correct transformation

Already tried all the options found on the net.
The coordinates are correct. The order is equal on vectors.
What am I missing?
public void cropBtn(View view) {
List<Point> corners=new ArrayList<>();
for (int i=0;i<4;i++)
corners.add(new Point(cropPanel.corners[i].x,cropPanel.corners[i].y));
List<Point> target=new ArrayList<>();
target.add(new Point(cropPanel.left,cropPanel.top));
target.add(new Point(cropPanel.right,cropPanel.top));
target.add(new Point(cropPanel.right,cropPanel.bottom));
target.add(new Point(cropPanel.left,cropPanel.bottom));
Mat trans=Imgproc.getPerspectiveTransform(Converters.vector_Point2f_to_Mat(corners), Converters.vector_Point2f_to_Mat(target));
Mat src = new Mat(bitmap.getHeight(), bitmap.getWidth(), CvType.CV_8SC1);
Utils.bitmapToMat(bitmap, src);
Mat dst = new Mat(bitmap.getHeight(), bitmap.getWidth(), CvType.CV_8SC1);
Imgproc.warpPerspective(src, dst, trans, dst.size());
Utils.matToBitmap(dst, bitmap);
image.setImageBitmap(bitmap);
cropPanel.reset();
}
I think, you are getting wrong points. So , i would suggest to try below code:
MatOfPoint2f thisContour2f = new MatOfPoint2f();
MatOfPoint2f approxContour2f = new MatOfPoint2f();
contours.get(maxI).convertTo(thisContour2f, CvType.CV_32FC2); // here contours is a List of MatOfPoint which you get from ImgProc.findContours() method.
Imgproc.approxPolyDP(thisContour2f, approxContour2f, Imgproc.arcLength(thisContour2f, true) * 0.01, true); // Approximates a polygonal curve(s) with the specified precision. since you have to detect document which is rectangle , you need to find four points
if (approxContour2f.size().height == 4) { // since we are detecting rectangle , check wether contours has 4 points
MatOfPoint2f rotatedMat = new MatOfPoint2f(contours.get(maxI).toArray());
RotatedRect boundingBox = Imgproc.minAreaRect(rotatedMat); // find RotatedRect from contours
double[] temp_double; // find four points
temp_double = approxContour2f.get(0, 0);
Point p1 = new Point(temp_double[0], temp_double[1]);
temp_double = approxContour2f.get(1, 0);
Point p2 = new Point(temp_double[0], temp_double[1]);
temp_double = approxContour2f.get(2, 0);
Point p3 = new Point(temp_double[0], temp_double[1]);
temp_double = approxContour2f.get(3, 0);
Point p4 = new Point(temp_double[0], temp_double[1]);
List<Point> source = new ArrayList<>();
source.add(p1);
source.add(p2);
source.add(p3);
source.add(p4);
Mat startM = Converters.vector_Point2f_to_Mat(source);
` Mat result = warp(mRgba, startM, boundingBox.boundingRect()); // mrgba is a original mat
}
Now , you can perspect it from below warp() function :
public Mat warp(Mat inputMat, Mat startM, Rect rect) {
int resultWidth = rect.width;
int resultHeight = rect.height;
Point ocvPOut4, ocvPOut1, ocvPOut2, ocvPOut3;
ocvPOut1 = new Point(0, 0);
ocvPOut2 = new Point(0, resultHeight);
ocvPOut3 = new Point(resultWidth, resultHeight);
ocvPOut4 = new Point(resultWidth, 0);
Mat outputMat = new Mat(resultWidth, resultHeight, CvType.CV_8UC4);
List<Point> dest = new ArrayList<Point>();
dest.add(ocvPOut1);
dest.add(ocvPOut2);
dest.add(ocvPOut3);
dest.add(ocvPOut4);
Mat endM = Converters.vector_Point2f_to_Mat(dest);
Mat perspectiveTransform = Imgproc.getPerspectiveTransform(startM, endM);
Imgproc.warpPerspective(inputMat, outputMat, perspectiveTransform, new Size(resultWidth, resultHeight), Imgproc.INTER_CUBIC);
return outputMat;
}
hope it will help !!
After a lot of hours of fight with that, finally found the problem.
Just post here. Maybe help to someone in my situation.
The problem was: I used coordinates of the view. And need the coordinates of bitmap. That maked the warp completely crazy behaviour. So just translated from View's coords to Bitmaps's. Now it's perfect:
float xRatio=(float)bitmap.getWidth()/cropPanel.getWidth();
float yRatio=(float)bitmap.getHeight()/cropPanel.getHeight();
for (int i=0;i<4;i++)
corners.add(new Point(cropPanel.corners[i].x*xRatio,cropPanel.corners[i].y*yRatio);
List<Point> target=new ArrayList<>();
target.add(new Point(0,0));
target.add(new Point(bitmap.getWidth(),0));
target.add(new Point(bitmap.getWidth(),bitmap.getHeight()));
target.add(new Point(0,bitmap.getHeight()));

Detecting rectangles using OpenCV

So i'm completely new to OpenCV and for the past 2-3 days i've searched a lot about how to use OpenCV in Java and Android studio to perform perspective correction and detect biggest rectangle in bitmaps and based on my searches i have done some work but the result bitmap is not correct.I'm sure i've done a lot of things wrong so it would be great if someone help me.
Thanks in advance for your help.
public void onPictureTaken(byte[] data, Camera camera)
{
Bitmap myImage = BitmapFactory.decodeByteArray(data, 0, data.length);
Mat matImage = new Mat(myImage.getHeight(),myImage.getWidth(), CvType.CV_8UC3);
Bitmap myBitmap32 = myImage.copy(Bitmap.Config.ARGB_8888, true);
Utils.bitmapToMat(myBitmap32, matImage);
correctPerspective(matImage);
}
public static void correctPerspective(Mat imgSource)
{
// convert the image to black and white does (8 bit)
Imgproc.Canny(imgSource.clone(), imgSource, 50, 50);
// apply gaussian blur to smoothen lines of dots
Imgproc.GaussianBlur(imgSource, imgSource, new org.opencv.core.Size(5, 5), 5);
// find the contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(imgSource, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
double maxArea = -1;
MatOfPoint temp_contour = contours.get(0);
// index 0 for starting
// point
MatOfPoint2f approxCurve = new MatOfPoint2f();
for (int idx = 0; idx < contours.size(); idx++) {
temp_contour = contours.get(idx);
double contourarea = Imgproc.contourArea(temp_contour);
// compare this contour to the previous largest contour found
if (contourarea > maxArea) {
// check if this contour is a square
MatOfPoint2f new_mat = new MatOfPoint2f(temp_contour.toArray());
int contourSize = (int) temp_contour.total();
MatOfPoint2f approxCurve_temp = new MatOfPoint2f();
Imgproc.approxPolyDP(new_mat, approxCurve_temp, contourSize * 0.05, true);
if (approxCurve_temp.total() == 4) {
maxArea = contourarea;
approxCurve = approxCurve_temp;
}
}
}
Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BayerBG2RGB);
double[] temp_double;
temp_double = approxCurve.get(0, 0);
Point p1 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve.get(1, 0);
Point p2 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve.get(2, 0);
Point p3 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve.get(3, 0);
Point p4 = new Point(temp_double[0], temp_double[1]);
List<Point> source = new ArrayList<Point>();
source.add(p1);
source.add(p2);
source.add(p3);
source.add(p4);
Mat startM = Converters.vector_Point2f_to_Mat(source);
Mat result = warp(imgSource, startM);
//Saving into bitmap
Bitmap resultBitmap = Bitmap.createBitmap(result.cols(), result.rows(),Bitmap.Config.ARGB_8888);;
Mat tmp = new Mat (result.cols(), result.rows(), CvType.CV_8U, new Scalar(4));
Imgproc.cvtColor(result, tmp, Imgproc.COLOR_RGB2BGRA);
Utils.matToBitmap(tmp, resultBitmap);
}
public static Mat warp(Mat inputMat, Mat startM)
{
int resultWidth = 1200;
int resultHeight = 680;
Point ocvPOut4 = new Point(0, 0);
Point ocvPOut1 = new Point(0, resultHeight);
Point ocvPOut2 = new Point(resultWidth, resultHeight);
Point ocvPOut3 = new Point(resultWidth, 0);
if (inputMat.height() > inputMat.width())
{
ocvPOut3 = new Point(0, 0);
ocvPOut4 = new Point(0, resultHeight);
ocvPOut1 = new Point(resultWidth, resultHeight);
ocvPOut2 = new Point(resultWidth, 0);
}
Mat outputMat = new Mat(resultWidth, resultHeight, CvType.CV_8UC4);
List<Point> dest = new ArrayList<Point>();
dest.add(ocvPOut1);
dest.add(ocvPOut2);
dest.add(ocvPOut3);
dest.add(ocvPOut4);
Mat endM = Converters.vector_Point2f_to_Mat(dest);
Mat perspectiveTransform = Imgproc.getPerspectiveTransform(startM, endM);
Imgproc.warpPerspective(inputMat, outputMat, perspectiveTransform, new Size(resultWidth, resultHeight), Imgproc.INTER_CUBIC);
return outputMat;
}

Keypoint matching just works two times...? (java opencv)

I have a very strange problem. I'm using this code to detect an image in another one (java opencv):
UPDATED CODE:
public void startRecognition() {
//load images, I want to find img_object in img_scene
Mat img_scene = Highgui.imread("D:/opencvws/ImageRecognition/src/main/resources/ascene.jpg");
Mat img_object = Highgui.imread("D:/opencvws/ImageRecognition/src/main/resources/aobj1.jpg");
run++;
System.out.println("RUN NO: " + run);
//init detector
FeatureDetector detector = FeatureDetector.create(FeatureDetector.SURF);
//keypoint detection for both images (keyponts_scene for img_scene, keypoint_object for img_object)
MatOfKeyPoint keypoints_object = new MatOfKeyPoint();
MatOfKeyPoint keypoints_scene = new MatOfKeyPoint();
detector.detect(img_object, keypoints_object);
detector.detect(img_scene, keypoints_scene);
System.out.println("OK: " + keypoints_object.total());
System.out.println("SK: " + keypoints_scene.total());
//extractor init
DescriptorExtractor extractor = DescriptorExtractor.create(2); //2 = SURF;
Mat descriptor_object = new Mat();
Mat descriptor_scene = new Mat() ;
//Compute descriptors
extractor.compute(img_object, keypoints_object, descriptor_object);
extractor.compute(img_scene, keypoints_scene, descriptor_scene);
//init matcher
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED); // 1 = FLANNBASED
matcher.clear();
MatOfDMatch matches = new MatOfDMatch();
//match both descriptors
matcher.match(descriptor_object, descriptor_scene, matches);
List<DMatch> matchesList = matches.toList();
//calc min/max dist
Double max_dist = 0.0;
Double min_dist = 100.0;
for(int i = 0; i < descriptor_object.rows(); i++){
Double dist = (double) matchesList.get(i).distance;
if(dist < min_dist) min_dist = dist;
if(dist > max_dist) max_dist = dist;
}
//filter good matches
LinkedList<DMatch> good_matches = new LinkedList<DMatch>();
MatOfDMatch gm = new MatOfDMatch();
//good match = distance > 2*min_distance ==> put them in a list
for(int i = 0; i < descriptor_object.rows(); i++){
if(matchesList.get(i).distance < 2*min_dist){
good_matches.addLast(matchesList.get(i));
}
}
//List -> Mat
gm.fromList(good_matches);
//mat for resulting image
Mat img_matches = new Mat();
//filter keypoints (use only good matches); First in a List, iterate, afterwards ==> Mat
LinkedList<Point> objList = new LinkedList<Point>();
LinkedList<Point> sceneList = new LinkedList<Point>();
List<KeyPoint> keypoints_objectList = keypoints_object.toList();
List<KeyPoint> keypoints_sceneList = keypoints_scene.toList();
for(int i = 0; i<good_matches.size(); i++){
objList.addLast(keypoints_objectList.get(good_matches.get(i).queryIdx).pt);
sceneList.addLast(keypoints_sceneList.get(good_matches.get(i).trainIdx).pt);
}
MatOfPoint2f obj = new MatOfPoint2f();
obj.fromList(objList);
MatOfPoint2f scene = new MatOfPoint2f();
scene.fromList(sceneList);
//calc transformation matrix; method = 8 (RANSAC) ransacReprojThreshold=3
Mat hg = Calib3d.findHomography(obj, scene, 8,3);
//init corners
Mat obj_corners = new Mat(4,1,CvType.CV_32FC2);
Mat scene_corners = new Mat(4,1,CvType.CV_32FC2);
//obj
obj_corners.put(0, 0, new double[] {0,0});
obj_corners.put(1, 0, new double[] {img_object.cols(),0});
obj_corners.put(2, 0, new double[] {img_object.cols(),img_object.rows()});
obj_corners.put(3, 0, new double[] {0,img_object.rows()});
//transform obj corners to scene_img (stored in scene_corners)
Core.perspectiveTransform(obj_corners,scene_corners, hg);
//move points for img_obg width to the right to fit the matching image
Point p1 = new Point(scene_corners.get(0,0)[0]+img_object.cols(), scene_corners.get(0,0)[1]);
Point p2 = new Point(scene_corners.get(1,0)[0]+img_object.cols(), scene_corners.get(1,0)[1]);
Point p3 = new Point(scene_corners.get(2,0)[0]+img_object.cols(), scene_corners.get(2,0)[1]);
Point p4 = new Point(scene_corners.get(3,0)[0]+img_object.cols(), scene_corners.get(3,0)[1]);
//create the matching image
Features2d.drawMatches(
img_object,
keypoints_object,
img_scene,
keypoints_scene,
gm,
img_matches);
//draw lines to the matching image
Core.line(img_matches, p1 , p2, new Scalar(0, 255, 0),4);
Core.line(img_matches, p2, p3, new Scalar(0, 255, 0),4);
Core.line(img_matches, p3, p4, new Scalar(0, 255, 0),4);
Core.line(img_matches, p4, p1, new Scalar(0, 255, 0),4);
// resizing...
Mat resizeimage = new Mat();
Size sz = new Size(1200, 1000);
Imgproc.resize(img_matches, img_matches, sz);
panel1.setimagewithMat(img_matches);
frame1.repaint();
//tried to prevent any old references to mix up new calculation
matcher.clear();
img_matches = new Mat();
img_object = new Mat();
img_scene = new Mat();
keypoints_object = new MatOfKeyPoint();
keypoints_scene = new MatOfKeyPoint();
hg = new Mat();
}
If I run the startRecognition methode twice (the opencv library is loaded at the startup) in my running application i get the same result for both recognitions. For the third try it detects other keypoints and calculates another transformation matrix (hg). Examples:
after 2nd try:
after 3rd:
Can anyone explain why? Or tell me how to prevent it? When I restart the whole program, it will again detect 2 times correct and afterwards varying. After several tries it will again calculate the correct hg (from the first and seceond try). I can't figure out why this is happending.
Thanks in advance
gemorra

Categories