There are 2 images, I find ORB descriptors for each of them. Next, I filter the descriptors by distance and by the RANSAC algorithm, but it turns out not that the ear indicates the logo and so on. What is the problem?
Below is the code:
public static void compareFeature(String filename1, String filename2) {
ORB orb = ORB.create();
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
Mat img1 = Imgcodecs.imread(filename1, Imgcodecs.CV_LOAD_IMAGE_GRAYSCALE);
Mat descriptors1 = new Mat();
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
orb.detectAndCompute(img1, new Mat(), keypoints1, descriptors1);
// second image
Mat img2 = Imgcodecs.imread(filename2, Imgcodecs.CV_LOAD_IMAGE_GRAYSCALE);
Mat descriptors2 = new Mat();
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
orb.detectAndCompute(img2, new Mat(), keypoints2, descriptors2);
// MATCHING
// match these two keypoints sets
MatOfDMatch matches = new MatOfDMatch();
matcher.match(descriptors1, descriptors2, matches);
List<DMatch> matchesList = matches.toList();
// ratio test
LinkedList<DMatch> good_matches = new LinkedList<>();
Double max_dist = 0.0;
Double min_dist = 100.0;
for (DMatch aMatchesList1 : matchesList) {
Double dist = (double) aMatchesList1.distance;
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
for (DMatch aMatchesList : matchesList) {
if (aMatchesList.distance <= (1.5 * min_dist))
good_matches.addLast(aMatchesList);
}
// get keypoint coordinates of good matches to find homography and remove outliers using ransac
List<Point> pts1 = new ArrayList<>();
List<Point> pts2 = new ArrayList<>();
for (DMatch good_matche : good_matches) {
pts1.add(keypoints1.toList().get(good_matche.queryIdx).pt);
pts2.add(keypoints2.toList().get(good_matche.trainIdx).pt);
}
// convertion of data types - there is maybe a more beautiful way
Mat outputMask = new Mat();
MatOfPoint2f pts1Mat = new MatOfPoint2f();
pts1Mat.fromList(pts1);
MatOfPoint2f pts2Mat = new MatOfPoint2f();
pts2Mat.fromList(pts2);
// Find homography - here just used to perform match filtering with RANSAC, but could be used to e.g. stitch images
// the smaller the allowed reprojection error (here 15), the more matches are filtered
System.out.println("pts1mat " + pts1Mat.size());
System.out.println("pts2mat " + pts2Mat.size());
Mat Homog = Calib3d.findHomography(pts1Mat, pts2Mat, Calib3d.RANSAC, 15, outputMask, 2000, 0.995);
System.out.println("pts1mat " + pts1Mat.size());
System.out.println("pts2mat " + pts2Mat.size());
// outputMask contains zeros and ones indicating which matches are filtered
LinkedList<DMatch> better_matches = new LinkedList<DMatch>();
for (int i = 0; i < good_matches.size(); i++) {
if (outputMask.get(i, 0)[0] != 0.0) {
better_matches.add(good_matches.get(i));
}
}
// DRAWING OUTPUT
Mat outputImg = new Mat();
// this will draw all matches, works fine
MatOfDMatch better_matches_mat = new MatOfDMatch();
better_matches_mat.fromList(better_matches);
Features2d.drawMatches(img1, keypoints1, img2, keypoints2, better_matches_mat, outputImg);
// save image
Imgcodecs.imwrite("result.jpg", outputImg);
System.out.println("better_matches_mat " + better_matches_mat.size());
It does not work correctly. Tell me what to fix, what to fix, so that everything works. And how to compare descriptors for a similar image?
Related
I am performing Canny edge detector using Android and Opencv on an image to detect the largest contour, extract it using warpPerspective method then find all objects inside that contour. Everything is working as expected but only for image that isn't rotated.
I am using boundingRect to get the contour and use its coordinates to extract it.
Here my code:
private Mat detectLargestContour(Mat origMat) {
// long e1 = Core.getTickCount();
Mat mGray = new Mat();
MatOfDouble mu = new MatOfDouble();
MatOfDouble stddev = new MatOfDouble();
Imgproc.cvtColor(origMat, mGray, Imgproc.COLOR_BGR2GRAY);
Core.meanStdDev(mGray, mu, stddev);
Imgproc.GaussianBlur(mGray, mGray, new Size(5, 5), 5);
//Imgproc.Canny(mGray, mGray, 30, 80, 3, false); //FOR HIGH BRIGHTNESS
//Imgproc.Canny(mGray, mGray, 50, 130, 3, false); // FOR LOW BRIGHTNESS
Imgproc.Canny(mGray, mGray, mu.get(0, 0)[0], stddev.get(0, 0)[0], 3, false);
Mat kernell = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9,9));
Imgproc.morphologyEx(mGray, mGray, Imgproc.MORPH_CLOSE, kernell);
Imgproc.dilate(mGray, mGray, Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3)));
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(mGray, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
//MatOfPoint2f approxCurve = new MatOfPoint2f();
double largest_area=0;
Rect rect = new Rect();
for (int idx = 0; idx < contours.size() ; idx++) {
double a = Imgproc.contourArea(contours.get(idx)); //Find the area of contour
if (a > largest_area) {
largest_area = a;
rect = Imgproc.boundingRect(contours.get(idx));
}
}
if (rect.area() > 100000) {
Imgproc.rectangle(origMat, rect.tl(), rect.br(), new Scalar(0, 255, 0));
p1 = new Point(rect.tl().x, rect.tl().y);
p2 = new Point(rect.tl().x + rect.width, rect.tl().y);
p3 = new Point(rect.tl().x, rect.tl().y + rect.height);
p4 = new Point(rect.tl().x + rect.width, rect.tl().y + rect.height);
card_corners = new ArrayList<>();
card_corners.add(p1);
card_corners.add(p3);
card_corners.add(p4);
card_corners.add(p2);
warpedCard = new Mat(origMat.rows(), origMat.cols(), CvType.CV_8UC3);
final Point p1 = new Point(warpedCard.cols() + marge, warpedCard.rows() + marge);
final Point p2 = new Point(0 - marge, warpedCard.rows() + marge);
final Point p3 = new Point(0 - marge, 0 - marge);
final Point p4 = new Point(warpedCard.cols() + marge, 0 - marge);
LinkedList<Point> sceneList = new LinkedList<Point>();
sceneList.addLast(p4);
sceneList.addLast(p3);
sceneList.addLast(p2);
sceneList.addLast(p1);
MatOfPoint2f scene = new MatOfPoint2f();
scene.fromList(sceneList);
MatOfPoint2f obj = new MatOfPoint2f();
obj.fromList(card_corners);
Mat homography = Calib3d.findHomography(obj, scene);
Imgproc.warpPerspective(origMat, warpedCard, homography, new Size(warpedCard.cols(), warpedCard.rows()));
return warpedCard;
}
return origMat;
}
It's weird but only boundingRect gave me a stable and performant result but the drawn rectangle doesn't rotate with the found contour.
How can I manage to resolve this issue? Any thoughts?
EDIT:
I changed boundingRect with minAreaRect.
Here is the code
int largest_idx = 0;
for (int idx = 0; idx < contours.size() ; idx++) {
double a = Imgproc.contourArea(contours.get(idx)); //Find the area of contour
if (a > largest_area) {
largest_area = a;
// rect = Imgproc.boundingRect(contours.get(idx));
largest_idx = idx;
}
}
MatOfPoint2f new_mat = new MatOfPoint2f( contours.get(largest_idx).toArray() );
RotatedRect rbox = Imgproc.minAreaRect(new_mat);
Log.d("rotatedrect_angle", "" + rbox.angle);
Point points[] = new Point[4];
rbox.points(points);
for(int i=0; i<4; ++i){
Imgproc.line(origMat, points[i], points[(i+1)%4], new Scalar(255,255,255));
}
And here is what I've got:
As you can see the detection isn't as accurate as when I used boundingRect.
A Python demo to find and draw a rotatedRect:
# 2019/03/01
# https://stackoverflow.com/a/54942835/3547485
import numpy as np
import cv2
gray = cv2.imread("tmp.png", cv2.IMREAD_GRAYSCALE)
th, threshed = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY_INV)
cnts = cv2.findContours(threshed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
cnt = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
rbox = cv2.minAreaRect(cnt)
pts = cv2.boxPoints(rbox).astype(np.int32)
cv2.drawContours(img, [pts], -1, (0, 255, 0), 1, cv2.LINE_AA)
cv2.imwrite("dst.png", img)
Useful OpenCV functions(in Python) : cv2.minAreaRect, cv2.boxPoints, cv.2drawContours. You can find corresponding functions in Java.
I'd like to copy the recognized text to a clean image using createBitmap but I dont know how to get it from the boxes on the original image.
This code finds Maximally stable extremal regions and highlights them on the bitmap. Further below is what I get wit it on a sample image.
private void sgmnt(Mat mImg) {
Imgproc.cvtColor(msImg, m2, Imgproc.COLOR_RGB2GRAY);
Mat mRgba = mImg;
Mat mGray = m2;
Scalar CONTOUR_COLOR = new Scalar(1, 1, 255, 1);
//Scalar CONTOUR_COLOR = new Scalar(255);
MatOfKeyPoint keyPoint = new MatOfKeyPoint();
List<KeyPoint> listPoint = new ArrayList<>();
KeyPoint kPoint = new KeyPoint();
Mat mask = Mat.zeros(mGray.size(), CvType.CV_8UC1);
int rectanx1;
int rectany1;
int rectanx2;
int rectany2;
int imgSize = mGray.height() * mGray.width();
Scalar zeros = new Scalar(255,1,1, 1);
List<MatOfPoint> contour2 = new ArrayList<MatOfPoint>();
Mat kernel = new Mat(1, 50, CvType.CV_8UC1, Scalar.all(255));
Mat morByte = new Mat();
Mat hierarchy = new Mat();
Rect rectan3 = new Rect();
FeatureDetector detector = FeatureDetector.create(FeatureDetector.MSER);
detector.detect(mGray, keyPoint);
listPoint = keyPoint.toList();
for(int ind = 0; ind < listPoint.size(); ++ind) {
kPoint = listPoint.get(ind);
rectanx1 = (int) (kPoint.pt.x - 0.5 * kPoint.size);
rectany1 = (int) (kPoint.pt.y - 0.5 * kPoint.size);
rectanx2 = (int) (kPoint.size);
rectany2 = (int) (kPoint.size);
if (rectanx1 <= 0) {
rectanx1 = 1;
}
if (rectany1 <= 0) {
rectany1 = 1;
}
if ((rectanx1 + rectanx2) > mGray.width()) {
rectanx2 = mGray.width() - rectanx1;
}
if ((rectany1 + rectany2) > mGray.height()) {
rectany2 = mGray.height() - rectany1;
}
Rect rectant = new Rect(rectanx1, rectany1, rectanx2, rectany2);
try{
Mat roi = new Mat(mask, rectant);
roi.setTo(CONTOUR_COLOR);
}
catch (Exception ex) {
Log.d("mylog", "mat roi error " + ex.getMessage());
}
}
Imgproc.morphologyEx(mask, morByte, Imgproc.MORPH_DILATE, kernel);
Imgproc.findContours(morByte, contour2, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE);
for(int i = 0; i<contour2.size(); ++i){
rectan3 = Imgproc.boundingRect(contour2.get(i));
if(rectan3.area() > 0.5 * imgSize || rectan3.area()<100 || rectan3.width / rectan3.height < 2){
Mat roi = new Mat(morByte, rectan3);
roi.setTo(zeros);
}else{
Imgproc.rectangle(mRgba, rectan3.br(), rectan3.tl(), CONTOUR_COLOR);
}
}
}
Here's an example of what I get:
My problem is I want to somehow get the text inside the boxes.
I am working on a licence plate recognition software using OpenCV, Tesseract and Java but experiencing issues, I cant seem to segment my text correctly, its not always that I get all characters to be detected and bounded with a bounding box these are some of my outputs with my code...and also when I detect the characters I never know which character is in which box so when I pass them through tesseract they get jumbled, how do I format my string?
This one fails despite the clearly visible characters:
Here Z and 6 fail to be detected even when 6 is clearly visible:
Below is my code:
originalFrame = image.clone();
roiColor = image.clone();
Imgproc.cvtColor(image, image, Imgproc.COLOR_BGR2GRAY, 0);
originalFrameGrayScale = image.clone();
Mat morph = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9, 9));
Imgproc.morphologyEx(image, image, Imgproc.MORPH_TOPHAT, morph);
Imgproc.Sobel(image, image, -1, 2, 0);
Imgproc.GaussianBlur(image, image, new Size(5,5), 3,3);
Imgproc.morphologyEx(image, image, Imgproc.MORPH_CLOSE, morph);
Imgproc.threshold(image, image, 200, 255, Imgproc.THRESH_OTSU);
Vector<Rect> rectangles = detectionContour(image);
Mat roi = originalFrameGrayScale.clone();
if(!rectangles.isEmpty()){
roi = originalFrameGrayScale.submat(rectangles.get(0));
roiBlack = roi.clone();
roiColor = roiColor.submat(rectangles.get(0));
Imgproc.rectangle(originalFrame, rectangles.get(0).br(), rectangles.get(0).tl(), new Scalar(0,0,255), 2);
}
Imgproc.medianBlur(roi, roi, 3);
Imgproc.adaptiveThreshold(roi, roi, 225, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 15, 3);
roiBinarize = roi.clone();
Mat erode = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(1, 1));
Mat dilate = Imgproc.getStructuringElement(Imgproc.MORPH_RECT,new Size(1, 1));
Imgproc.morphologyEx(roi, roi, Imgproc.MORPH_OPEN, dilate);
Imgproc.morphologyEx(roi, roi, Imgproc.MORPH_OPEN, erode);
Imgproc.Canny(roi, roi, 150, 150 * 3, 3, true);
Vector<Rect> letters = detectionPlateCharacterContour(roi);
doTesseractOCR(letters, roiBinarize);
private static void doTesseractOCR(Vector<Rect> letters, Mat plate){
Tesseract instance = new Tesseract(); //
instance.setLanguage(LANGUAGE);
String resultPlate = "";
for(int i= 0; i < letters.size(); i++){
BufferedImage letter = OpenCvUtils.Mat2bufferedImage(plate.submat(letters.get(i)));
try {
String result = instance.doOCR(letter);
resultPlate += result + " position "+i;
} catch (TesseractException e) {
System.err.println(e.getMessage());
}
System.out.println("Tesseract output: "+resultPlate);
}
}
private static Vector<Rect> detectionPlateCharacterContour(Mat roi) {
Mat contHierarchy = new Mat();
Mat imageMat = roi.clone();
Rect rect = null;
List<MatOfPoint> contours = new ArrayList<>();
Imgproc.findContours(imageMat, contours, contHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE);
Vector<Rect> rect_array = new Vector<>();
for (int i = 0; i < contours.size(); i++) {
rect = Imgproc.boundingRect(contours.get(i));
double ratio = 0;
if(rect.height > rect.width){
ratio = rect.height/rect.width;
}else{
ratio = rect.width/rect.height;
}
Logger.printMessage("Ratio of letter: "+ratio);
double contourarea = Imgproc.contourArea(contours.get(i));
if (contourarea >= 100 && contourarea <= 1000 && ( ratio >= 1 && ratio <= 2)) {
Imgproc.rectangle(roiColor, rect.br(), rect.tl(), new Scalar(255,0,0));
rect_array.add(rect);
}
}
contHierarchy.release();
return rect_array;
}
I have been having some issues getting the outline of the detected object in the correct place, its as if the coordinates are in the wrong place. I have the hessian set to 2000 and I have filtered for matches that are less than 3 times the minimum distance. Any help would be appreciated.
Results from running matching and homography:
Code sample below:
public static void findMatches()
{
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
//Load Image 1
Mat img_object = Highgui.imread("./resources/Database/box.png");
//Load Image 2
Mat img_scene = Highgui.imread("./resources/Database/box_in_scene.png");
//Check if either image is null if so exit application
if (img_object == null || img_scene == null)
{
System.exit(0);
}
//Convert Image 1 to greyscale
Mat grayImageobject = new Mat(img_object.rows(), img_object.cols(), img_object.type());
Imgproc.cvtColor(img_object, grayImageobject, Imgproc.COLOR_BGRA2GRAY);
Core.normalize(grayImageobject, grayImageobject, 0, 255, Core.NORM_MINMAX);
//Convert image 2 to greyscale
Mat grayImageScene = new Mat(img_scene.rows(), img_scene.cols(), img_scene.type());
Imgproc.cvtColor(img_scene, grayImageScene, Imgproc.COLOR_BGRA2GRAY);
Core.normalize(grayImageScene, grayImageScene, 0, 255, Core.NORM_MINMAX);
//Create a SURF feature detector
FeatureDetector detector = FeatureDetector.create(4); //4 = SURF
//Cannot input hessian value as normal so we have to write the desired value into a
//file and then read value from file into detector.read
try (Writer writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream("hessian.txt"), "utf-8"))) {
writer.write("%YAML:1.0\nhessianThreshold: 2000.\noctaves:3\noctaveLayers: 4\nupright: 0\n");
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
detector.read("hessian.txt");
//Mat of keypoints for object and scene
MatOfKeyPoint keypoints_object = new MatOfKeyPoint();
MatOfKeyPoint keypoints_scene = new MatOfKeyPoint();
//Detect keypoints in scene and object storing them in mat of keypoints
detector.detect(img_object, keypoints_object);
detector.detect(img_scene, keypoints_scene);
DescriptorExtractor extractor = DescriptorExtractor.create(2); //2 = SURF;
Mat descriptor_object = new Mat();
Mat descriptor_scene = new Mat() ;
extractor.compute(img_object, keypoints_object, descriptor_object);
extractor.compute(img_scene, keypoints_scene, descriptor_scene);
DescriptorMatcher matcher = DescriptorMatcher.create(1); // 1 = FLANNBASED
MatOfDMatch matches = new MatOfDMatch();
matcher.match(descriptor_object, descriptor_scene, matches);
List<DMatch> matchesList = matches.toList();
Double max_dist = 0.0;
Double min_dist = 100.0;
for(int i = 0; i < descriptor_object.rows(); i++){
Double dist = (double) matchesList.get(i).distance;
if(dist < min_dist) min_dist = dist;
if(dist > max_dist) max_dist = dist;
}
System.out.println("-- Max dist : " + max_dist);
System.out.println("-- Min dist : " + min_dist);
LinkedList<DMatch> good_matches = new LinkedList<DMatch>();
MatOfDMatch gm = new MatOfDMatch();
for(int i = 0; i < descriptor_object.rows(); i++){
if(matchesList.get(i).distance < 3*min_dist){
good_matches.addLast(matchesList.get(i));
}
}
gm.fromList(good_matches);
Mat img_matches = new Mat();
Features2d.drawMatches(img_object,keypoints_object,img_scene,keypoints_scene, gm, img_matches, new Scalar(255,0,0), new Scalar(0,0,255), new MatOfByte(), 2);
if(good_matches.size() >= 10){
LinkedList<Point> objList = new LinkedList<Point>();
LinkedList<Point> sceneList = new LinkedList<Point>();
List<KeyPoint> keypoints_objectList = keypoints_object.toList();
List<KeyPoint> keypoints_sceneList = keypoints_scene.toList();
for(int i = 0; i<good_matches.size(); i++){
objList.addLast(keypoints_objectList.get(good_matches.get(i).queryIdx).pt);
sceneList.addLast(keypoints_sceneList.get(good_matches.get(i).trainIdx).pt);
}
MatOfPoint2f obj = new MatOfPoint2f();
obj.fromList(objList);
MatOfPoint2f scene = new MatOfPoint2f();
scene.fromList(sceneList);
Mat homography = Calib3d.findHomography(obj, scene);
Mat obj_corners = new Mat(4,1,CvType.CV_32FC2);
Mat scene_corners = new Mat(4,1,CvType.CV_32FC2);
obj_corners.put(0, 0, new double[] {0,0});
obj_corners.put(1, 0, new double[] {img_object.cols(),0});
obj_corners.put(2, 0, new double[] {img_object.cols(),img_object.rows()});
obj_corners.put(3, 0, new double[] {0,img_object.rows()});
//Compute the most probable perspective transformation
//out of several pairs of corresponding points.
//Imgproc.getPerspectiveTransform(obj_corners, scene_corners);
Core.perspectiveTransform(obj_corners,scene_corners, homography);
Core.line(img_matches, new Point(scene_corners.get(0,0)), new Point(scene_corners.get(1,0)), new Scalar(0, 255, 0),4);
Core.line(img_matches, new Point(scene_corners.get(1,0)), new Point(scene_corners.get(2,0)), new Scalar(0, 255, 0),4);
Core.line(img_matches, new Point(scene_corners.get(2,0)), new Point(scene_corners.get(3,0)), new Scalar(0, 255, 0),4);
Core.line(img_matches, new Point(scene_corners.get(3,0)), new Point(scene_corners.get(0,0)), new Scalar(0, 255, 0),4);
Highgui.imwrite("./resources/ImageMatching" + ".jpg", img_matches);
createWindow("Image Matching", "resources/ImageMatching.jpg");
}
else
{
System.out.println("Not enough Matches");
System.exit(0);
}
}
The coordinates are in the correct place, you're simply drawing on the wrong image.
Your coordinates are relative to the second image img_scene . So if you draw your lines only on that image they'll be correct.
If you want to draw the lines on a composed image, where img_scene is translated to the right by the width of the first image img_object, you simply need to add img_object.cols() to the points x coordinates.
For example:
Core.line(img_matches,
new Point(scene_corners.get(0,0)[0] + img_object.cols(), scene_corners.get(0,0)[1]),
new Point(scene_corners.get(1,0)[0] + img_object.cols(), scene_corners.get(1,0)[1]),
new Scalar(0, 255, 0),4);
for the first line, and the same for next 3 lines.
I have a very strange problem. I'm using this code to detect an image in another one (java opencv):
UPDATED CODE:
public void startRecognition() {
//load images, I want to find img_object in img_scene
Mat img_scene = Highgui.imread("D:/opencvws/ImageRecognition/src/main/resources/ascene.jpg");
Mat img_object = Highgui.imread("D:/opencvws/ImageRecognition/src/main/resources/aobj1.jpg");
run++;
System.out.println("RUN NO: " + run);
//init detector
FeatureDetector detector = FeatureDetector.create(FeatureDetector.SURF);
//keypoint detection for both images (keyponts_scene for img_scene, keypoint_object for img_object)
MatOfKeyPoint keypoints_object = new MatOfKeyPoint();
MatOfKeyPoint keypoints_scene = new MatOfKeyPoint();
detector.detect(img_object, keypoints_object);
detector.detect(img_scene, keypoints_scene);
System.out.println("OK: " + keypoints_object.total());
System.out.println("SK: " + keypoints_scene.total());
//extractor init
DescriptorExtractor extractor = DescriptorExtractor.create(2); //2 = SURF;
Mat descriptor_object = new Mat();
Mat descriptor_scene = new Mat() ;
//Compute descriptors
extractor.compute(img_object, keypoints_object, descriptor_object);
extractor.compute(img_scene, keypoints_scene, descriptor_scene);
//init matcher
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED); // 1 = FLANNBASED
matcher.clear();
MatOfDMatch matches = new MatOfDMatch();
//match both descriptors
matcher.match(descriptor_object, descriptor_scene, matches);
List<DMatch> matchesList = matches.toList();
//calc min/max dist
Double max_dist = 0.0;
Double min_dist = 100.0;
for(int i = 0; i < descriptor_object.rows(); i++){
Double dist = (double) matchesList.get(i).distance;
if(dist < min_dist) min_dist = dist;
if(dist > max_dist) max_dist = dist;
}
//filter good matches
LinkedList<DMatch> good_matches = new LinkedList<DMatch>();
MatOfDMatch gm = new MatOfDMatch();
//good match = distance > 2*min_distance ==> put them in a list
for(int i = 0; i < descriptor_object.rows(); i++){
if(matchesList.get(i).distance < 2*min_dist){
good_matches.addLast(matchesList.get(i));
}
}
//List -> Mat
gm.fromList(good_matches);
//mat for resulting image
Mat img_matches = new Mat();
//filter keypoints (use only good matches); First in a List, iterate, afterwards ==> Mat
LinkedList<Point> objList = new LinkedList<Point>();
LinkedList<Point> sceneList = new LinkedList<Point>();
List<KeyPoint> keypoints_objectList = keypoints_object.toList();
List<KeyPoint> keypoints_sceneList = keypoints_scene.toList();
for(int i = 0; i<good_matches.size(); i++){
objList.addLast(keypoints_objectList.get(good_matches.get(i).queryIdx).pt);
sceneList.addLast(keypoints_sceneList.get(good_matches.get(i).trainIdx).pt);
}
MatOfPoint2f obj = new MatOfPoint2f();
obj.fromList(objList);
MatOfPoint2f scene = new MatOfPoint2f();
scene.fromList(sceneList);
//calc transformation matrix; method = 8 (RANSAC) ransacReprojThreshold=3
Mat hg = Calib3d.findHomography(obj, scene, 8,3);
//init corners
Mat obj_corners = new Mat(4,1,CvType.CV_32FC2);
Mat scene_corners = new Mat(4,1,CvType.CV_32FC2);
//obj
obj_corners.put(0, 0, new double[] {0,0});
obj_corners.put(1, 0, new double[] {img_object.cols(),0});
obj_corners.put(2, 0, new double[] {img_object.cols(),img_object.rows()});
obj_corners.put(3, 0, new double[] {0,img_object.rows()});
//transform obj corners to scene_img (stored in scene_corners)
Core.perspectiveTransform(obj_corners,scene_corners, hg);
//move points for img_obg width to the right to fit the matching image
Point p1 = new Point(scene_corners.get(0,0)[0]+img_object.cols(), scene_corners.get(0,0)[1]);
Point p2 = new Point(scene_corners.get(1,0)[0]+img_object.cols(), scene_corners.get(1,0)[1]);
Point p3 = new Point(scene_corners.get(2,0)[0]+img_object.cols(), scene_corners.get(2,0)[1]);
Point p4 = new Point(scene_corners.get(3,0)[0]+img_object.cols(), scene_corners.get(3,0)[1]);
//create the matching image
Features2d.drawMatches(
img_object,
keypoints_object,
img_scene,
keypoints_scene,
gm,
img_matches);
//draw lines to the matching image
Core.line(img_matches, p1 , p2, new Scalar(0, 255, 0),4);
Core.line(img_matches, p2, p3, new Scalar(0, 255, 0),4);
Core.line(img_matches, p3, p4, new Scalar(0, 255, 0),4);
Core.line(img_matches, p4, p1, new Scalar(0, 255, 0),4);
// resizing...
Mat resizeimage = new Mat();
Size sz = new Size(1200, 1000);
Imgproc.resize(img_matches, img_matches, sz);
panel1.setimagewithMat(img_matches);
frame1.repaint();
//tried to prevent any old references to mix up new calculation
matcher.clear();
img_matches = new Mat();
img_object = new Mat();
img_scene = new Mat();
keypoints_object = new MatOfKeyPoint();
keypoints_scene = new MatOfKeyPoint();
hg = new Mat();
}
If I run the startRecognition methode twice (the opencv library is loaded at the startup) in my running application i get the same result for both recognitions. For the third try it detects other keypoints and calculates another transformation matrix (hg). Examples:
after 2nd try:
after 3rd:
Can anyone explain why? Or tell me how to prevent it? When I restart the whole program, it will again detect 2 times correct and afterwards varying. After several tries it will again calculate the correct hg (from the first and seceond try). I can't figure out why this is happending.
Thanks in advance
gemorra