How to increase Android Opencv camera FPS? - java

I am developing an Android application and I'm processing frames using Opencv in real time.
The image processing is heavy as I'm detecting the largest contour, rotating and cropping it and apply some sort of segmentation on the cropped Mat object.
The problem is that FPS start with a value of 3 and drops to 1.
I'm not doing some heavy operations like matToBitmap and I'm not making native calls yet.
It's my first big project and I don't have much experience in Opencv. What can I do to increase my FPS?
Here is my code:
long e1 = Core.getTickCount();
Mat mGray = new Mat();
MatOfDouble mu = new MatOfDouble();
MatOfDouble stddev = new MatOfDouble();
Imgproc.cvtColor(origMat, origMat, Imgproc.COLOR_BGRA2BGR);
Imgproc.cvtColor(origMat, mGray, Imgproc.COLOR_BGR2GRAY);
Core.meanStdDev(mGray, mu, stddev);
Imgproc.GaussianBlur(mGray, mGray, new Size(5, 5), 5);
Imgproc.Canny(mGray, mGray, (mu.get(0, 0)[0]) * 0.66, (mu.get(0, 0)[0]) * 1.33, 3, false);
Mat kernell = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9, 9));
Imgproc.morphologyEx(mGray, mGray, Imgproc.MORPH_CLOSE, kernell);
Imgproc.dilate(mGray, mGray, Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3)));
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(mGray, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
double largest_area = 0;
Rect rect = new Rect();
int largest_idx = 0;
for (int idx = 0; idx < contours.size(); idx++) {
double a = Imgproc.contourArea(contours.get(idx)); //Find the area of contour
if (a > largest_area) {
largest_area = a;
largest_idx = idx;
}
}
if (contours.size() > 0) {
MatOfPoint2f new_mat = new MatOfPoint2f(contours.get(largest_idx).toArray());
RotatedRect rbox = Imgproc.minAreaRect(new_mat);
Point vertices[] = new Point[4];
rbox.points(vertices);
List<MatOfPoint> boxContours = new ArrayList<>();
boxContours.add(new MatOfPoint(vertices));
for (int i = 0; i < 4; ++i) {
Imgproc.line(origMat, vertices[i], vertices[(i + 1) % 4], new Scalar(255, 0, 0));
}
double rect_angle = rbox.angle - 90.0f;
Size rect_size = rbox.size;
double d = rect_size.width;
rect_size.width = rect_size.height;
rect_size.height = d;
M = Imgproc.getRotationMatrix2D(rbox.center, rect_angle, 1.0);
Imgproc.warpAffine(origMat, rotated, M, origMat.size());
if (rect_size.width > 70 && rect_size.height > 70)
Imgproc.getRectSubPix(rotated, new Size(rect_size.width - 70, rect_size.height - 70), rbox.center, rotated);
Imgproc.resize(rotated, rotated, origMat.size());
Mat orr = rotated.clone();
Imgproc.cvtColor(orr, orr, Imgproc.COLOR_RGB2HSV);
Core.split(orr, channels);
orr = channels.get(2);
CLAHE clahe = Imgproc.createCLAHE();
clahe.setClipLimit(1);
clahe.apply(orr, orr);
Imgproc.GaussianBlur(orr, orr, new Size(5, 5), 5);
Core.meanStdDev(mGray, mu, stddev);
Imgproc.Canny(orr, orr, (mu.get(0, 0)[0]) * 0.66, (mu.get(0, 0)[0]) * 1.33, 3, false);
Imgproc.morphologyEx(orr, orr, Imgproc.MORPH_CLOSE, kernell);
Imgproc.dilate(orr, orr, Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3)));
Imgproc.findContours(orr, contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
for (int i = contours.size() - 1; i >= 0; i--) {
double area = Imgproc.contourArea(contours.get(i), false); // Find the area of contour
if (area < min_area)
contours.remove(i);
}
if (contours.size() > 0) {
MatOfPoint2f approxCurve = new MatOfPoint2f();
rectList.clear();
for (int idx = 0; idx < contours.size(); idx++) {
//Convert contours(i) from MatOfPoint to MatOfPoint2f
MatOfPoint2f contour2f = new MatOfPoint2f(contours.get(idx).toArray());
//Processing on mMOP2f1 which is in type MatOfPoint2f
double approxDistance = Imgproc.arcLength(contour2f, true) * 0.02;
Imgproc.approxPolyDP(contour2f, approxCurve, approxDistance, true);
//Convert back to MatOfPoint
MatOfPoint points = new MatOfPoint(approxCurve.toArray());
// Get bounding rect of contour
Rect rectt = Imgproc.boundingRect(points);
rectList.add(rect);
Mat miniature = new Mat(orr, new Rect(rectt.tl(), rectt.br()));
mats.add(miniature);
// draw enclosing rectangle (all same color, but you could use variable i to make them unique)
Imgproc.rectangle(rotated, rectt.tl(), rectt.br(), new Scalar(255, 0, 0));
}
}
long e2 = Core.getTickCount();
long e = e2 - e1;
double time = e / Core.getTickFrequency();
Log.d("timeTAG", "" + time);
return orr;
}
return rotated;

Related

How to draw a boundingRect with the right rotation angle by using OpenCV?

I am performing Canny edge detector using Android and Opencv on an image to detect the largest contour, extract it using warpPerspective method then find all objects inside that contour. Everything is working as expected but only for image that isn't rotated.
I am using boundingRect to get the contour and use its coordinates to extract it.
Here my code:
private Mat detectLargestContour(Mat origMat) {
// long e1 = Core.getTickCount();
Mat mGray = new Mat();
MatOfDouble mu = new MatOfDouble();
MatOfDouble stddev = new MatOfDouble();
Imgproc.cvtColor(origMat, mGray, Imgproc.COLOR_BGR2GRAY);
Core.meanStdDev(mGray, mu, stddev);
Imgproc.GaussianBlur(mGray, mGray, new Size(5, 5), 5);
//Imgproc.Canny(mGray, mGray, 30, 80, 3, false); //FOR HIGH BRIGHTNESS
//Imgproc.Canny(mGray, mGray, 50, 130, 3, false); // FOR LOW BRIGHTNESS
Imgproc.Canny(mGray, mGray, mu.get(0, 0)[0], stddev.get(0, 0)[0], 3, false);
Mat kernell = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9,9));
Imgproc.morphologyEx(mGray, mGray, Imgproc.MORPH_CLOSE, kernell);
Imgproc.dilate(mGray, mGray, Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3)));
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(mGray, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
//MatOfPoint2f approxCurve = new MatOfPoint2f();
double largest_area=0;
Rect rect = new Rect();
for (int idx = 0; idx < contours.size() ; idx++) {
double a = Imgproc.contourArea(contours.get(idx)); //Find the area of contour
if (a > largest_area) {
largest_area = a;
rect = Imgproc.boundingRect(contours.get(idx));
}
}
if (rect.area() > 100000) {
Imgproc.rectangle(origMat, rect.tl(), rect.br(), new Scalar(0, 255, 0));
p1 = new Point(rect.tl().x, rect.tl().y);
p2 = new Point(rect.tl().x + rect.width, rect.tl().y);
p3 = new Point(rect.tl().x, rect.tl().y + rect.height);
p4 = new Point(rect.tl().x + rect.width, rect.tl().y + rect.height);
card_corners = new ArrayList<>();
card_corners.add(p1);
card_corners.add(p3);
card_corners.add(p4);
card_corners.add(p2);
warpedCard = new Mat(origMat.rows(), origMat.cols(), CvType.CV_8UC3);
final Point p1 = new Point(warpedCard.cols() + marge, warpedCard.rows() + marge);
final Point p2 = new Point(0 - marge, warpedCard.rows() + marge);
final Point p3 = new Point(0 - marge, 0 - marge);
final Point p4 = new Point(warpedCard.cols() + marge, 0 - marge);
LinkedList<Point> sceneList = new LinkedList<Point>();
sceneList.addLast(p4);
sceneList.addLast(p3);
sceneList.addLast(p2);
sceneList.addLast(p1);
MatOfPoint2f scene = new MatOfPoint2f();
scene.fromList(sceneList);
MatOfPoint2f obj = new MatOfPoint2f();
obj.fromList(card_corners);
Mat homography = Calib3d.findHomography(obj, scene);
Imgproc.warpPerspective(origMat, warpedCard, homography, new Size(warpedCard.cols(), warpedCard.rows()));
return warpedCard;
}
return origMat;
}
It's weird but only boundingRect gave me a stable and performant result but the drawn rectangle doesn't rotate with the found contour.
How can I manage to resolve this issue? Any thoughts?
EDIT:
I changed boundingRect with minAreaRect.
Here is the code
int largest_idx = 0;
for (int idx = 0; idx < contours.size() ; idx++) {
double a = Imgproc.contourArea(contours.get(idx)); //Find the area of contour
if (a > largest_area) {
largest_area = a;
// rect = Imgproc.boundingRect(contours.get(idx));
largest_idx = idx;
}
}
MatOfPoint2f new_mat = new MatOfPoint2f( contours.get(largest_idx).toArray() );
RotatedRect rbox = Imgproc.minAreaRect(new_mat);
Log.d("rotatedrect_angle", "" + rbox.angle);
Point points[] = new Point[4];
rbox.points(points);
for(int i=0; i<4; ++i){
Imgproc.line(origMat, points[i], points[(i+1)%4], new Scalar(255,255,255));
}
And here is what I've got:
As you can see the detection isn't as accurate as when I used boundingRect.
A Python demo to find and draw a rotatedRect:
# 2019/03/01
# https://stackoverflow.com/a/54942835/3547485
import numpy as np
import cv2
gray = cv2.imread("tmp.png", cv2.IMREAD_GRAYSCALE)
th, threshed = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY_INV)
cnts = cv2.findContours(threshed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
cnt = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
rbox = cv2.minAreaRect(cnt)
pts = cv2.boxPoints(rbox).astype(np.int32)
cv2.drawContours(img, [pts], -1, (0, 255, 0), 1, cv2.LINE_AA)
cv2.imwrite("dst.png", img)
Useful OpenCV functions(in Python) : cv2.minAreaRect, cv2.boxPoints, cv.2drawContours. You can find corresponding functions in Java.

OpenCV Java text segmentation

I am working on a licence plate recognition software using OpenCV, Tesseract and Java but experiencing issues, I cant seem to segment my text correctly, its not always that I get all characters to be detected and bounded with a bounding box these are some of my outputs with my code...and also when I detect the characters I never know which character is in which box so when I pass them through tesseract they get jumbled, how do I format my string?
This one fails despite the clearly visible characters:
Here Z and 6 fail to be detected even when 6 is clearly visible:
Below is my code:
originalFrame = image.clone();
roiColor = image.clone();
Imgproc.cvtColor(image, image, Imgproc.COLOR_BGR2GRAY, 0);
originalFrameGrayScale = image.clone();
Mat morph = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9, 9));
Imgproc.morphologyEx(image, image, Imgproc.MORPH_TOPHAT, morph);
Imgproc.Sobel(image, image, -1, 2, 0);
Imgproc.GaussianBlur(image, image, new Size(5,5), 3,3);
Imgproc.morphologyEx(image, image, Imgproc.MORPH_CLOSE, morph);
Imgproc.threshold(image, image, 200, 255, Imgproc.THRESH_OTSU);
Vector<Rect> rectangles = detectionContour(image);
Mat roi = originalFrameGrayScale.clone();
if(!rectangles.isEmpty()){
roi = originalFrameGrayScale.submat(rectangles.get(0));
roiBlack = roi.clone();
roiColor = roiColor.submat(rectangles.get(0));
Imgproc.rectangle(originalFrame, rectangles.get(0).br(), rectangles.get(0).tl(), new Scalar(0,0,255), 2);
}
Imgproc.medianBlur(roi, roi, 3);
Imgproc.adaptiveThreshold(roi, roi, 225, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 15, 3);
roiBinarize = roi.clone();
Mat erode = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(1, 1));
Mat dilate = Imgproc.getStructuringElement(Imgproc.MORPH_RECT,new Size(1, 1));
Imgproc.morphologyEx(roi, roi, Imgproc.MORPH_OPEN, dilate);
Imgproc.morphologyEx(roi, roi, Imgproc.MORPH_OPEN, erode);
Imgproc.Canny(roi, roi, 150, 150 * 3, 3, true);
Vector<Rect> letters = detectionPlateCharacterContour(roi);
doTesseractOCR(letters, roiBinarize);
private static void doTesseractOCR(Vector<Rect> letters, Mat plate){
Tesseract instance = new Tesseract(); //
instance.setLanguage(LANGUAGE);
String resultPlate = "";
for(int i= 0; i < letters.size(); i++){
BufferedImage letter = OpenCvUtils.Mat2bufferedImage(plate.submat(letters.get(i)));
try {
String result = instance.doOCR(letter);
resultPlate += result + " position "+i;
} catch (TesseractException e) {
System.err.println(e.getMessage());
}
System.out.println("Tesseract output: "+resultPlate);
}
}
private static Vector<Rect> detectionPlateCharacterContour(Mat roi) {
Mat contHierarchy = new Mat();
Mat imageMat = roi.clone();
Rect rect = null;
List<MatOfPoint> contours = new ArrayList<>();
Imgproc.findContours(imageMat, contours, contHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE);
Vector<Rect> rect_array = new Vector<>();
for (int i = 0; i < contours.size(); i++) {
rect = Imgproc.boundingRect(contours.get(i));
double ratio = 0;
if(rect.height > rect.width){
ratio = rect.height/rect.width;
}else{
ratio = rect.width/rect.height;
}
Logger.printMessage("Ratio of letter: "+ratio);
double contourarea = Imgproc.contourArea(contours.get(i));
if (contourarea >= 100 && contourarea <= 1000 && ( ratio >= 1 && ratio <= 2)) {
Imgproc.rectangle(roiColor, rect.br(), rect.tl(), new Scalar(255,0,0));
rect_array.add(rect);
}
}
contHierarchy.release();
return rect_array;
}

how to find largest contour in java opencv

I have used find contours and boundingrect and display it at my project. then I want to find the largest contours and display it. Is this possible? I am newbie to OpenCV java lang.
heres my code so far:
#Override
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat(height, width, CvType.CV_8UC4);
mHsv = new Mat(height,width,CvType.CV_8UC3);
hierarchy = new Mat();
mHsvMask = new Mat();
mDilated = new Mat();
mEroded = new Mat();
}
#Override
public void onCameraViewStopped() {
mRgba.release();
mHsv.release();
mHsvMask.release();
mDilated.release();
hierarchy.release();
}
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
mRgba =inputFrame.rgba();
contours = new ArrayList<MatOfPoint>();
hierarchy =new Mat();
mHsv = new Mat();
mHsvMask =new Mat();
Imgproc.cvtColor(mRgba, mHsv, Imgproc.COLOR_RGB2HSV);
Scalar lowerThreshold = new Scalar ( 0, 0, 0 ); // Blue color – lower hsv values
Scalar upperThreshold = new Scalar ( 179, 255, 10 ); // Blue color – higher hsv values
Core.inRange ( mHsv, lowerThreshold , upperThreshold, mHsvMask );
//just some filter
//Imgproc.dilate ( mHsvMask, mDilated, new Mat() );
//Imgproc.erode(mDilated,mEroded,new Mat());
Imgproc.findContours(mHsvMask, contours, hierarchy, Imgproc.RETR_TREE, Imgproc.CHAIN_APPROX_SIMPLE);
for ( int contourIdx=0; contourIdx < contours.size(); contourIdx++ )
{
//Minimun size allowed for consideration
MatOfPoint2f approxCurve = new MatOfPoint2f();
MatOfPoint2f contour2f = new MatOfPoint2f(contours.get(contourIdx).toArray());
//Processing on mMOP2f1 which is in type MatOfPoint2f
double approxDistance = Imgproc.arcLength(contour2f,true)*0.02;
Imgproc.approxPolyDP(contour2f,approxCurve,approxDistance,true);
//convert to MatofPoint
MatOfPoint point = new MatOfPoint(approxCurve.toArray());
//get boundingrect from contour
Rect rect = Imgproc.boundingRect(point);
Imgproc.rectangle(mRgba,new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(255, 0, 0, 255),3);
//bisa Imgproc.rectangle(mRgba, rect.tl(), rect.br(), new Scalar(255, 0, 0),1, 8,0);
//show contour kontur
if(Imgproc.contourArea(contours.get(contourIdx))>100) {
Imgproc.drawContours(mRgba, contours, contourIdx, new Scalar(0,255,0), 5);
}
}
return mRgba;
Hopefully, someone has some experience in this. Thanks..
With function Imgproc.contourArea you can just simply find the areas of all of your contours and the contour with the largest area would simply be the largest one.
Code to draw the largest contour would be like this:
double maxVal = 0;
int maxValIdx = 0;
for (int contourIdx = 0; contourIdx < contours.size(); contourIdx++)
{
double contourArea = Imgproc.contourArea(contours.get(contourIdx));
if (maxVal < contourArea)
{
maxVal = contourArea;
maxValIdx = contourIdx;
}
}
Imgproc.drawContours(mRgba, contours, maxValIdx, new Scalar(0,255,0), 5);

Detecting rectangles using OpenCV

So i'm completely new to OpenCV and for the past 2-3 days i've searched a lot about how to use OpenCV in Java and Android studio to perform perspective correction and detect biggest rectangle in bitmaps and based on my searches i have done some work but the result bitmap is not correct.I'm sure i've done a lot of things wrong so it would be great if someone help me.
Thanks in advance for your help.
public void onPictureTaken(byte[] data, Camera camera)
{
Bitmap myImage = BitmapFactory.decodeByteArray(data, 0, data.length);
Mat matImage = new Mat(myImage.getHeight(),myImage.getWidth(), CvType.CV_8UC3);
Bitmap myBitmap32 = myImage.copy(Bitmap.Config.ARGB_8888, true);
Utils.bitmapToMat(myBitmap32, matImage);
correctPerspective(matImage);
}
public static void correctPerspective(Mat imgSource)
{
// convert the image to black and white does (8 bit)
Imgproc.Canny(imgSource.clone(), imgSource, 50, 50);
// apply gaussian blur to smoothen lines of dots
Imgproc.GaussianBlur(imgSource, imgSource, new org.opencv.core.Size(5, 5), 5);
// find the contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(imgSource, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
double maxArea = -1;
MatOfPoint temp_contour = contours.get(0);
// index 0 for starting
// point
MatOfPoint2f approxCurve = new MatOfPoint2f();
for (int idx = 0; idx < contours.size(); idx++) {
temp_contour = contours.get(idx);
double contourarea = Imgproc.contourArea(temp_contour);
// compare this contour to the previous largest contour found
if (contourarea > maxArea) {
// check if this contour is a square
MatOfPoint2f new_mat = new MatOfPoint2f(temp_contour.toArray());
int contourSize = (int) temp_contour.total();
MatOfPoint2f approxCurve_temp = new MatOfPoint2f();
Imgproc.approxPolyDP(new_mat, approxCurve_temp, contourSize * 0.05, true);
if (approxCurve_temp.total() == 4) {
maxArea = contourarea;
approxCurve = approxCurve_temp;
}
}
}
Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BayerBG2RGB);
double[] temp_double;
temp_double = approxCurve.get(0, 0);
Point p1 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve.get(1, 0);
Point p2 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve.get(2, 0);
Point p3 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve.get(3, 0);
Point p4 = new Point(temp_double[0], temp_double[1]);
List<Point> source = new ArrayList<Point>();
source.add(p1);
source.add(p2);
source.add(p3);
source.add(p4);
Mat startM = Converters.vector_Point2f_to_Mat(source);
Mat result = warp(imgSource, startM);
//Saving into bitmap
Bitmap resultBitmap = Bitmap.createBitmap(result.cols(), result.rows(),Bitmap.Config.ARGB_8888);;
Mat tmp = new Mat (result.cols(), result.rows(), CvType.CV_8U, new Scalar(4));
Imgproc.cvtColor(result, tmp, Imgproc.COLOR_RGB2BGRA);
Utils.matToBitmap(tmp, resultBitmap);
}
public static Mat warp(Mat inputMat, Mat startM)
{
int resultWidth = 1200;
int resultHeight = 680;
Point ocvPOut4 = new Point(0, 0);
Point ocvPOut1 = new Point(0, resultHeight);
Point ocvPOut2 = new Point(resultWidth, resultHeight);
Point ocvPOut3 = new Point(resultWidth, 0);
if (inputMat.height() > inputMat.width())
{
ocvPOut3 = new Point(0, 0);
ocvPOut4 = new Point(0, resultHeight);
ocvPOut1 = new Point(resultWidth, resultHeight);
ocvPOut2 = new Point(resultWidth, 0);
}
Mat outputMat = new Mat(resultWidth, resultHeight, CvType.CV_8UC4);
List<Point> dest = new ArrayList<Point>();
dest.add(ocvPOut1);
dest.add(ocvPOut2);
dest.add(ocvPOut3);
dest.add(ocvPOut4);
Mat endM = Converters.vector_Point2f_to_Mat(dest);
Mat perspectiveTransform = Imgproc.getPerspectiveTransform(startM, endM);
Imgproc.warpPerspective(inputMat, outputMat, perspectiveTransform, new Size(resultWidth, resultHeight), Imgproc.INTER_CUBIC);
return outputMat;
}

Auto perspective correction using OpenCV and Java

I'm looking at perspective correction for images taken from an Android camera, similar as defined in this question posted earlier. However, I'm not able to get my hands on proper working Java code.
If anyone has made it work on Java, please help.
I found an answer to my query here. Have put little refined code snippet below:
public static void correctPerspective() {
String fileName = "IMG_20141024_132131.jpg";
Mat imgSource = Highgui.imread(fileName, Highgui.CV_LOAD_IMAGE_UNCHANGED);
// convert the image to black and white does (8 bit)
Imgproc.Canny(imgSource.clone(), imgSource, 50, 50);
// apply gaussian blur to smoothen lines of dots
Imgproc.GaussianBlur(imgSource, imgSource, new org.opencv.core.Size(5, 5), 5);
// find the contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(imgSource, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
double maxArea = -1;
MatOfPoint temp_contour = contours.get(0); // the largest is at the
// index 0 for starting
// point
MatOfPoint2f approxCurve = new MatOfPoint2f();
for (int idx = 0; idx < contours.size(); idx++) {
temp_contour = contours.get(idx);
double contourarea = Imgproc.contourArea(temp_contour);
// compare this contour to the previous largest contour found
if (contourarea > maxArea) {
// check if this contour is a square
MatOfPoint2f new_mat = new MatOfPoint2f(temp_contour.toArray());
int contourSize = (int) temp_contour.total();
MatOfPoint2f approxCurve_temp = new MatOfPoint2f();
Imgproc.approxPolyDP(new_mat, approxCurve_temp, contourSize * 0.05, true);
if (approxCurve_temp.total() == 4) {
maxArea = contourarea;
approxCurve = approxCurve_temp;
}
}
}
Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BayerBG2RGB);
Mat sourceImage = Highgui.imread(fileName, Highgui.CV_LOAD_IMAGE_UNCHANGED);
double[] temp_double;
temp_double = approxCurve.get(0, 0);
Point p1 = new Point(temp_double[0], temp_double[1]);
// Core.circle(imgSource,p1,55,new Scalar(0,0,255));
// Imgproc.warpAffine(sourceImage, dummy, rotImage,sourceImage.size());
temp_double = approxCurve.get(1, 0);
Point p2 = new Point(temp_double[0], temp_double[1]);
// Core.circle(imgSource,p2,150,new Scalar(255,255,255));
temp_double = approxCurve.get(2, 0);
Point p3 = new Point(temp_double[0], temp_double[1]);
// Core.circle(imgSource,p3,200,new Scalar(255,0,0));
temp_double = approxCurve.get(3, 0);
Point p4 = new Point(temp_double[0], temp_double[1]);
// Core.circle(imgSource,p4,100,new Scalar(0,0,255));
List<Point> source = new ArrayList<Point>();
source.add(p1);
source.add(p2);
source.add(p3);
source.add(p4);
Mat startM = Converters.vector_Point2f_to_Mat(source);
Mat result = warp(sourceImage, startM);
Highgui.imwrite("corrected.jpg", result);
}
public static Mat warp(Mat inputMat, Mat startM) {
int resultWidth = 1200;
int resultHeight = 680;
Point ocvPOut4 = new Point(0, 0);
Point ocvPOut1 = new Point(0, resultHeight);
Point ocvPOut2 = new Point(resultWidth, resultHeight);
Point ocvPOut3 = new Point(resultWidth, 0);
if (inputMat.height() > inputMat.width()) {
// int temp = resultWidth;
// resultWidth = resultHeight;
// resultHeight = temp;
ocvPOut3 = new Point(0, 0);
ocvPOut4 = new Point(0, resultHeight);
ocvPOut1 = new Point(resultWidth, resultHeight);
ocvPOut2 = new Point(resultWidth, 0);
}
Mat outputMat = new Mat(resultWidth, resultHeight, CvType.CV_8UC4);
List<Point> dest = new ArrayList<Point>();
dest.add(ocvPOut1);
dest.add(ocvPOut2);
dest.add(ocvPOut3);
dest.add(ocvPOut4);
Mat endM = Converters.vector_Point2f_to_Mat(dest);
Mat perspectiveTransform = Imgproc.getPerspectiveTransform(startM, endM);
Imgproc.warpPerspective(inputMat, outputMat, perspectiveTransform, new Size(resultWidth, resultHeight), Imgproc.INTER_CUBIC);
return outputMat;
}

Categories