Related
I have the following code:
Rect rect = Imgproc.boundingRect(screenCnt2f);
// do transpose
Mat destImage = new Mat(new Size(rect.width, rect.height), orig.type());
Mat src = orderPointsClockwise(screenCnt2f);
dst = new MatOfPoint2f(new Point(0, 0), new Point(rect.width - 1, 0),
new Point(rect.width - 1, rect.height - 1), new Point(0, rect.height - 1));
Mat transform = Imgproc.getPerspectiveTransform(src, dst);
Imgproc.warpPerspective(orig, destImage, transform, destImage.size());
If I have a pixel at location x,y,
How can I calculate the location of the same pixel in destImage?
So that if Point p contains x,y values, I would like a method newPointLocation:
Point pp = newPointLocation(p, tranform);
so that pp contains the same pixel in destImage
Same code with warpAffine:
Point center = new Point(src.width() / 2, src.height() / 2);
Mat rotImage = Imgproc.getRotationMatrix2D(center, angle, 1.0);
// 1.0 means 100 % scale
Size size = new Size(src.width(), src.height());
Imgproc.warpAffine(src, src, rotImage, size, Imgproc.INTER_LINEAR + Imgproc.CV_WARP_FILL_OUTLIERS);
Point pp = newPointLocation(p, rotImage);
You can do it with Core.transform(matOfPoint, dst, rotImage)
Code:
Point center = new Point(source.width() / 2, source.height() / 2);
Mat rotImage = Imgproc.getRotationMatrix2D(center, angle, 1.0);
// 1.0 means 100 % scale
Size size = new Size(source.width(), source.height());
Imgproc.warpAffine(source, source, rotImage, size, Imgproc.INTER_LINEAR + Imgproc.CV_WARP_FILL_OUTLIERS);
// calculate new bounding box location
// zBoundingBox contain the matOfPoint that describe the bounding box in the Orig location
for (ZBoundingBox zBoundingBox : boundingBoxesOrig) {
MatOfPoint matOfPoint = zBoundingBox.matOfPoint, dst = new MatOfPoint();
Core.transform(matOfPoint, dst, rotImage);
boundingBoxes.add(dst); // Add to predefined collection
}
I need to combine several cubes in ARCore to become one ModelRenderable. I have code that is taking the vertices and submeshes from each cube and creating one ModelRenderable, however, only the last cube added is rendered.
I have only the last cube added showing, as previously said. The one strange thing I can see is that there are vertices that have the same positions, so I'm not sure if that's correct or not.
Here's my code taking each cube and adding the submeshes and vertices.
List<RenderableDefinition.Submesh> submeshes = new ArrayList<>();
List<Vertex> vertices = new ArrayList<>();
for (SubCube cube : cubes) {
submeshes.add(cube.getSubmesh());
vertices.addAll(cube.getVertices());
}
RenderableDefinition renderableDefinition = RenderableDefinition.builder().setVertices(vertices).setSubmeshes(submeshes).build();
CompletableFuture future = ModelRenderable.builder().setSource(renderableDefinition).build();
ModelRenderable result = (ModelRenderable) future.get();
Here's my code for creating a cube. It's basically identical to ShapeFactory.makeCube.
public SubCube makeCube(Vector3 size, Vector3 center, Material material) {
AndroidPreconditions.checkMinAndroidApiLevel();
Vector3 extents = size.scaled(0.5F);
Vector3 p0 = Vector3.add(center, new Vector3(-extents.x, -extents.y, extents.z));
Vector3 p1 = Vector3.add(center, new Vector3(extents.x, -extents.y, extents.z));
Vector3 p2 = Vector3.add(center, new Vector3(extents.x, -extents.y, -extents.z));
Vector3 p3 = Vector3.add(center, new Vector3(-extents.x, -extents.y, -extents.z));
Vector3 p4 = Vector3.add(center, new Vector3(-extents.x, extents.y, extents.z));
Vector3 p5 = Vector3.add(center, new Vector3(extents.x, extents.y, extents.z));
Vector3 p6 = Vector3.add(center, new Vector3(extents.x, extents.y, -extents.z));
Vector3 p7 = Vector3.add(center, new Vector3(-extents.x, extents.y, -extents.z));
Vector3 up = Vector3.up();
Vector3 down = Vector3.down();
Vector3 front = Vector3.forward();
Vector3 back = Vector3.back();
Vector3 left = Vector3.left();
Vector3 right = Vector3.right();
Vertex.UvCoordinate uv00 = new Vertex.UvCoordinate(0.0F, 0.0F);
Vertex.UvCoordinate uv10 = new Vertex.UvCoordinate(1.0F, 0.0F);
Vertex.UvCoordinate uv01 = new Vertex.UvCoordinate(0.0F, 1.0F);
Vertex.UvCoordinate uv11 = new Vertex.UvCoordinate(1.0F, 1.0F);
List<Vertex> vertices = Arrays.asList(
Vertex.builder().setPosition(p0).setNormal(down).setUvCoordinate(uv01).build(),
Vertex.builder().setPosition(p1).setNormal(down).setUvCoordinate(uv11).build(),
Vertex.builder().setPosition(p2).setNormal(down).setUvCoordinate(uv10).build(),
Vertex.builder().setPosition(p3).setNormal(down).setUvCoordinate(uv00).build(),
Vertex.builder().setPosition(p7).setNormal(left).setUvCoordinate(uv01).build(),
Vertex.builder().setPosition(p4).setNormal(left).setUvCoordinate(uv11).build(),
Vertex.builder().setPosition(p0).setNormal(left).setUvCoordinate(uv10).build(),
Vertex.builder().setPosition(p3).setNormal(left).setUvCoordinate(uv00).build(),
Vertex.builder().setPosition(p4).setNormal(front).setUvCoordinate(uv01).build(),
Vertex.builder().setPosition(p5).setNormal(front).setUvCoordinate(uv11).build(),
Vertex.builder().setPosition(p1).setNormal(front).setUvCoordinate(uv10).build(),
Vertex.builder().setPosition(p0).setNormal(front).setUvCoordinate(uv00).build(),
Vertex.builder().setPosition(p6).setNormal(back).setUvCoordinate(uv01).build(),
Vertex.builder().setPosition(p7).setNormal(back).setUvCoordinate(uv11).build(),
Vertex.builder().setPosition(p3).setNormal(back).setUvCoordinate(uv10).build(),
Vertex.builder().setPosition(p2).setNormal(back).setUvCoordinate(uv00).build(),
Vertex.builder().setPosition(p5).setNormal(right).setUvCoordinate(uv01).build(),
Vertex.builder().setPosition(p6).setNormal(right).setUvCoordinate(uv11).build(),
Vertex.builder().setPosition(p2).setNormal(right).setUvCoordinate(uv10).build(),
Vertex.builder().setPosition(p1).setNormal(right).setUvCoordinate(uv00).build(),
Vertex.builder().setPosition(p7).setNormal(up).setUvCoordinate(uv01).build(),
Vertex.builder().setPosition(p6).setNormal(up).setUvCoordinate(uv11).build(),
Vertex.builder().setPosition(p5).setNormal(up).setUvCoordinate(uv10).build(),
Vertex.builder().setPosition(p4).setNormal(up).setUvCoordinate(uv00).build());
ArrayList<Integer> triangleIndices = new ArrayList(36);
for(int i = 0; i < 6; ++i) {
triangleIndices.add(3 + 4 * i);
triangleIndices.add(1 + 4 * i);
triangleIndices.add(0 + 4 * i);
triangleIndices.add(3 + 4 * i);
triangleIndices.add(2 + 4 * i);
triangleIndices.add(1 + 4 * i);
}
RenderableDefinition.Submesh submesh = RenderableDefinition.Submesh.builder().setTriangleIndices(triangleIndices).setMaterial(material).build();
return new SubCube(submesh, vertices);
}
I'm not getting error messages or anything. I know the passed-in positions to makeCube are different, so that's not the issue. Expected behavior is that I'm able to render more than one cube in one ModelRenderable.
As you merge all the vertices in a final array (named vertices), the triangle indices should be offset in each sub cube.
For the first cube, index values go from 0 to 23 (as you have 24 vertices per cube)
For the second cube, index values go from 24 to 47
For the ith cube, index values go from (i-1)*24 to i*24-1
Otherwise you can achieve the same result by creating only one cube geometry and a node hierarchy with each sub node using that renderable but a different position.
I've been using Apache math for a while to do a multiple linear regression using OLSMultipleLinearRegression. Now I need to extend my solution to include a weighting factor for each data point.
I'm trying to replicate the MATLAB function fitlm.
I have a MATLAB call like:
table_data = table(points_scored, height, weight, age);
model = fitlm( table_data, 'points_scored ~ -1, height, weight, age', 'Weights', data_weights)
From 'model' I get the regression coefficients for height, weight, age.
In Java the code I have now is (roughly):
double[][] variables = double[grades.length][3];
// Fill in variables for height, weight, age,
...
OLSMultipleLinearRegression regression = new OLSMultipleLinearRegression();
regression.setNoIntercept(true);
regression.newSampleData(points_scored, variables);
There does not appear to be a way to add weightings to OLSMultipleLinearRegression. There does appear to be a way to add weights to the LeastSquaresBuilder. However I'm having trouble figuring out exactly how to use this. My biggest problem (I think) is creating the jacobians that are expected.
Here is most of what I tried:
double[] points_scored = //fill in points scored
double[] height = //fill in
double[] weight = //fill in
double[] age = // fill in
MultivariateJacobianFunction distToResidual= coeffs -> {
RealVector value = new ArrayRealVector(points_scored.length);
RealMatrix jacobian = new Array2DRowRealMatrix(points_scored.length, 3);
for (int i = 0; i < measures.length; ++i) {
double residual = points_scored[i];
residual -= coeffs.getEntry(0) * height[i];
residual -= coeffs.getEntry(1) * weight[i];
residual -= coeffs.getEntry(2) * age[i];
value.setEntry(i, residual);
//No idea how to set up the jacobian here
}
return new Pair<RealVector, RealMatrix>(value, jacobian);
};
double[] prescribedDistancesToLine = new double[measures.length];
Arrays.fill(prescribedDistancesToLine, 0);
double[] starts = new double[] {1, 1, 1};
LeastSquaresProblem problem = new LeastSquaresBuilder().
start(starts).
model(distToResidual).
target(prescribedDistancesToLine).
lazyEvaluation(false).
maxEvaluations(1000).
maxIterations(1000).
build();
LeastSquaresOptimizer.Optimum optimum = new LevenbergMarquardtOptimizer().optimize(problem);
Since I don't know how to make the jacobian values I've just been stabbing in the dark and getting coefficient nowhere near the MATLAB answers. Once I get this part working I know that adding the weights should be a pretty straight forward extra line int the LeastSquaresBuilder.
Thanks for any help in advance!
You can use class GLSMultipleLinearRegression from Apache math.
For example, let find linear regression for three plane data points
(0, 0), (1, 2), (2, 0) with weights 1, 2, 1:
import org.apache.commons.math3.stat.regression.GLSMultipleLinearRegression;
public class Main {
public static void main(String[] args) {
GLSMultipleLinearRegression regr = new GLSMultipleLinearRegression();
regr.setNoIntercept(false);
double[] y = new double[]{0.0, 2.0, 0.0};
double[][] x = new double[3][];
x[0] = new double[]{0.0};
x[1] = new double[]{1.0};
x[2] = new double[]{2.0};
double[][] omega = new double[3][];
omega[0] = new double[]{1.0, 0.0, 0.0};
omega[1] = new double[]{0.0, 0.5, 0.0};
omega[2] = new double[]{0.0, 0.0, 1.0};
regr.newSampleData(y, x, omega);
double[] params = regr.estimateRegressionParameters();
System.out.println("Slope: " + params[1] + ", intercept: " + params[0]);
}
}
Note that the omega matrix is diagonal, and its diagonal elements are reciprocal weights.
View the documentation for multi-variable case.
I know this is duplicated post but still get stuck on implementation.
I following some guide on the internet in how to detect document in an image in OpenCV and Java.
The first approarch i came up with is that use the findContours after pre-process some image processing like blur, edge detection, after get all the contours i can found the largest contour and assume that is a rectangle i'm looking for but it fail in some case, e.g the document is not fully taken like missing one corner.
After trying several time and some new processing but does not work at all, i found that the HoughLine transform take it easier. From now i have all the line inside an image but still do not what to do next to defined the interest rectangle that i want.
Here is the implementation code i have so far:
Approach 1: Using findContours
Mat grayImage = new Mat();
Mat detectedEdges = new Mat();
// convert to grayscale
Imgproc.cvtColor(frame, grayImage, Imgproc.COLOR_BGR2GRAY);
// reduce noise with a 3x3 kernel
// Imgproc.blur(grayImage, detectedEdges, new Size(3, 3));
Imgproc.medianBlur(grayImage, detectedEdges, 9);
// Imgproc.equalizeHist(detectedEdges, detectedEdges);
// Imgproc.GaussianBlur(detectedEdges, detectedEdges, new Size(5, 5), 0, 0, Core.BORDER_DEFAULT);
Mat edges = new Mat();
// canny detector, with ratio of lower:upper threshold of 3:1
Imgproc.Canny(detectedEdges, edges, this.threshold.getValue(), this.threshold.getValue() * 3, 3, true);
// makes the object in white bigger
Imgproc.dilate(edges, edges, new Mat(), new Point(-1, -1), 1); // 1
Image imageToShow = Utils.mat2Image(edges);
updateImageView(cannyFrame, imageToShow);
/// Find contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(edges, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
// loop over the contours
MatOfPoint2f approxCurve;
double maxArea = 0;
int maxId = -1;
for (MatOfPoint contour : contours) {
MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());
double area = Imgproc.contourArea(contour);
approxCurve = new MatOfPoint2f();
Imgproc.approxPolyDP(temp, approxCurve, Imgproc.arcLength(temp, true) * 0.02, true);
if (approxCurve.total() == 4 && area >= maxArea) {
double maxCosine = 0;
List<Point> curves = approxCurve.toList();
for (int j = 2; j < 5; j++) {
double cosine = Math.abs(angle(curves.get(j % 4), curves.get(j - 2), curves.get(j - 1)));
maxCosine = Math.max(maxCosine, cosine);
}
if (maxCosine < 0.3) {
maxArea = area;
maxId = contours.indexOf(contour);
}
}
}
MatOfPoint maxMatOfPoint = contours.get(maxId);
MatOfPoint2f maxMatOfPoint2f = new MatOfPoint2f(maxMatOfPoint.toArray());
RotatedRect rect = Imgproc.minAreaRect(maxMatOfPoint2f);
System.out.println("Rect angle: " + rect.angle);
Point points[] = new Point[4];
rect.points(points);
for (int i = 0; i < 4; ++i) {
Imgproc.line(frame, points[i], points[(i + 1) % 4], new Scalar(255, 255, 25), 3);
}
Mat dest = new Mat();
frame.copyTo(dest, frame);
return dest;
Apparch 2: Using HoughLine transform
// STEP 1: Edge detection
Mat grayImage = new Mat();
Mat detectedEdges = new Mat();
Vector<Point> start = new Vector<Point>();
Vector<Point> end = new Vector<Point>();
// convert to grayscale
Imgproc.cvtColor(frame, grayImage, Imgproc.COLOR_BGR2GRAY);
// reduce noise with a 3x3 kernel
// Imgproc.blur(grayImage, detectedEdges, new Size(3, 3));
Imgproc.medianBlur(grayImage, detectedEdges, 9);
// Imgproc.equalizeHist(detectedEdges, detectedEdges);
// Imgproc.GaussianBlur(detectedEdges, detectedEdges, new Size(5, 5), 0, 0, Core.BORDER_DEFAULT);
// AdaptiveThreshold -> classify as either black or white
// Imgproc.adaptiveThreshold(detectedEdges, detectedEdges, 255, Imgproc.ADAPTIVE_THRESH_MEAN_C, Imgproc.THRESH_BINARY, 5, 2);
// Imgproc.Sobel(detectedEdges, detectedEdges, -1, 1, 0);
Mat edges = new Mat();
// canny detector, with ratio of lower:upper threshold of 3:1
Imgproc.Canny(detectedEdges, edges, this.threshold.getValue(), this.threshold.getValue() * 3, 3, true);
// apply gaussian blur to smoothen lines of dots
Imgproc.GaussianBlur(edges, edges, new org.opencv.core.Size(5, 5), 5);
// makes the object in white bigger
Imgproc.dilate(edges, edges, new Mat(), new Point(-1, -1), 1); // 1
Image imageToShow = Utils.mat2Image(edges);
updateImageView(cannyFrame, imageToShow);
// STEP 2: Line detection
// Do Hough line
Mat lines = new Mat();
int minLineSize = 50;
int lineGap = 10;
Imgproc.HoughLinesP(edges, lines, 1, Math.PI / 720, (int) this.threshold.getValue(), this.minLineSize.getValue(), lineGap);
System.out.println("MinLineSize: " + this.minLineSize.getValue());
System.out.println(lines.rows());
for (int i = 0; i < lines.rows(); i++) {
double[] val = lines.get(i, 0);
Point tmpStartP = new Point(val[0], val[1]);
Point tmpEndP = new Point(val[2], val[3]);
start.add(tmpStartP);
end.add(tmpEndP);
Imgproc.line(frame, tmpStartP, tmpEndP, new Scalar(255, 255, 0), 2);
}
Mat dest = new Mat();
frame.copyTo(dest, frame);
return dest;
HoughLine result 1
HoughLine result 2
How to detect needed rectangle from HoughLine result?
Can someone give me the next step to complete the HoughLine transform approach.
Any help is appriciated. i'm stuck with this for a while.
Thanks you for reading this.
This answer is pretty much a mix of two other answers (here and here) I posted. But the pipeline I used for the other answers can be a little bit improved for your case. So I think it's worth posting a new answer.
There are many ways to achieve what you want. However, I don't think that line detection with HoughLinesP is needed here. So here is the pipeline I used on your samples:
Step 1: Detect egdes
Resize the input image if it's too large (I noticed that this pipeline works better on down scaled version of a given input image)
Blur grayscale input and detect edges with Canny filter
Step 2: Find the card's corners
Compute the contours
Sort the contours by length and only keep the largest one
Generate the convex hull of this contour
Use approxPolyDP to simplify the convex hull (this should give a quadrilateral)
Create a mask out of the approximate polygon
return the 4 points of the quadrilateral
Step 3: Homography
Use findHomography to find the affine transformation of your paper sheet (with the 4 corner points found at Step 2)
Warp the input image using the computed homography matrix
NOTE: Of course, once you have found the corners of the paper sheet on the down scaled version of the input image, you can easily compute the position of the corners on the full sized input image. This, in order to have the best resolution for the warped paper sheet.
And here is the result:
vector<Point> getQuadrilateral(Mat & grayscale, Mat& output)
{
Mat approxPoly_mask(grayscale.rows, grayscale.cols, CV_8UC1);
approxPoly_mask = Scalar(0);
vector<vector<Point>> contours;
findContours(grayscale, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
vector<int> indices(contours.size());
iota(indices.begin(), indices.end(), 0);
sort(indices.begin(), indices.end(), [&contours](int lhs, int rhs) {
return contours[lhs].size() > contours[rhs].size();
});
/// Find the convex hull object for each contour
vector<vector<Point> >hull(1);
convexHull(Mat(contours[indices[0]]), hull[0], false);
vector<vector<Point>> polygon(1);
approxPolyDP(hull[0], polygon[0], 20, true);
drawContours(approxPoly_mask, polygon, 0, Scalar(255));
imshow("approxPoly_mask", approxPoly_mask);
if (polygon[0].size() >= 4) // we found the 4 corners
{
return(polygon[0]);
}
return(vector<Point>());
}
int main(int argc, char** argv)
{
Mat input = imread("papersheet1.JPG");
resize(input, input, Size(), 0.1, 0.1);
Mat input_grey;
cvtColor(input, input_grey, CV_BGR2GRAY);
Mat threshold1;
Mat edges;
blur(input_grey, input_grey, Size(3, 3));
Canny(input_grey, edges, 30, 100);
vector<Point> card_corners = getQuadrilateral(edges, input);
Mat warpedCard(400, 300, CV_8UC3);
if (card_corners.size() == 4)
{
Mat homography = findHomography(card_corners, vector<Point>{Point(warpedCard.cols, warpedCard.rows), Point(0, warpedCard.rows), Point(0, 0), Point(warpedCard.cols, 0)});
warpPerspective(input, warpedCard, homography, Size(warpedCard.cols, warpedCard.rows));
}
imshow("warped card", warpedCard);
imshow("edges", edges);
imshow("input", input);
waitKey(0);
return 0;
}
This is C++ code, but it shouldn't be to hard to translate into Java.
I'm working on a android application and I need to estimate online camera rotation in 3D-plan using images from camera and opencv library. I like to calculate Euler angles.
I have read this and this page and I can estimate homography matrix like here.
My first question is, should I really know the camera intrinsic matrix from camera calibrtion or is the homography matrix (camera extrinsic) enough to estimate euler angles (pitch, roll, yaw)?
If homography matrix is enough, how can I do it exactly?
Sorry, I am really beginner with opencv and cannot decompose "Mat" of homography to rotation matrix and translation matrix like describes here. How can I implement euler angles in android?
You can see my code using solvePnPRansac() and decomposeProjectionMatrix to calculate euler angles.
But it returns just a null-vector as double[] eulerArray = {0,0,0}!!! Can somebody help me?! What is wrong there?
Thank you very much for any response!
public double[] findEulerAngles(MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch matches){
KeyPoint[] k1 = keypoints1.toArray();
KeyPoint[] k2 = keypoints2.toArray();
List<DMatch> matchesList = matches.toList();
List<KeyPoint> referenceKeypointsList = keypoints2.toList();
List<KeyPoint> sceneKeypointsList = keypoints1.toList();
// Calculate the max and min distances between keypoints.
double maxDist = 0.0;
double minDist = Double.MAX_VALUE;
for(DMatch match : matchesList) {
double dist = match.distance;
if (dist < minDist) {
minDist = dist;
}
if (dist > maxDist) {
maxDist = dist;
}
}
// Identify "good" keypoints based on match distance.
List<Point3> goodReferencePointsList = new ArrayList<Point3>();
ArrayList<Point> goodScenePointsList = new ArrayList<Point>();
double maxGoodMatchDist = 1.75 * minDist;
for(DMatch match : matchesList) {
if (match.distance < maxGoodMatchDist) {
Point kk2 = k2[match.queryIdx].pt;
Point kk1 = k1[match.trainIdx].pt;
Point3 point3 = new Point3(kk1.x, kk1.y, 0.0);
goodReferencePointsList.add(point3);
goodScenePointsList.add( kk2);
sceneKeypointsList.get(match.queryIdx).pt);
}
}
if (goodReferencePointsList.size() < 4 || goodScenePointsList.size() < 4) {
// There are too few good points to find the pose.
return;
}
MatOfPoint3f goodReferencePoints = new MatOfPoint3f();
goodReferencePoints.fromList(goodReferencePointsList);
MatOfPoint2f goodScenePoints = new MatOfPoint2f();
goodScenePoints.fromList(goodScenePointsList);
MatOfDouble mRMat = new MatOfDouble(3, 3, CvType.CV_32F);
MatOfDouble mTVec = new MatOfDouble(3, 1, CvType.CV_32F);
//TODO: solve camera intrinsic matrix
Mat intrinsics = Mat.eye(3, 3, CvType.CV_32F); // dummy camera matrix
intrinsics.put(0, 0, 400);
intrinsics.put(1, 1, 400);
intrinsics.put(0, 2, 640 / 2);
intrinsics.put(1, 2, 480 / 2);
Calib3d.solvePnPRansac(goodReferencePoints, goodScenePoints, intrinsics, new MatOfDouble(), mRMat, mTVec);
MatOfDouble rotCameraMatrix1 = new MatOfDouble(3, 1, CvType.CV_32F);
double[] rVecArray = mRMat.toArray();
// Calib3d.Rodrigues(mRMat, rotCameraMatrix1);
double[] tVecArray = mTVec.toArray();
MatOfDouble projMatrix = new MatOfDouble(3, 4, CvType.CV_32F); //projMatrix 3x4 input projection matrix P.
projMatrix.put(0, 0, rVecArray[0]);
projMatrix.put(0, 1, rVecArray[1]);
projMatrix.put(0, 2, rVecArray[2]);
projMatrix.put(0, 3, 0);
projMatrix.put(1, 0, rVecArray[3]);
projMatrix.put(1, 1, rVecArray[4]);
projMatrix.put(1, 2, rVecArray[5]);
projMatrix.put(1, 3, 0);
projMatrix.put(2, 0, rVecArray[6]);
projMatrix.put(2, 1, rVecArray[7]);
projMatrix.put(2, 2, rVecArray[8]);
projMatrix.put(2, 3, 0);
MatOfDouble cameraMatrix = new MatOfDouble(3, 3, CvType.CV_32F); //cameraMatrix Output 3x3 camera matrix K.
MatOfDouble rotMatrix = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrix Output 3x3 external rotation matrix R.
MatOfDouble transVect = new MatOfDouble(4, 1, CvType.CV_32F); //transVect Output 4x1 translation vector T.
MatOfDouble rotMatrixX = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixX a rotMatrixX
MatOfDouble rotMatrixY = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixY a rotMatrixY
MatOfDouble rotMatrixZ = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixZ a rotMatrixZ
MatOfDouble eulerAngles = new MatOfDouble(3, 1, CvType.CV_32F); //eulerAngles Optional three-element vector containing three Euler angles of rotation in degrees.
Calib3d.decomposeProjectionMatrix( projMatrix,
cameraMatrix,
rotMatrix,
transVect,
rotMatrixX,
rotMatrixY,
rotMatrixZ,
eulerAngles);
double[] eulerArray = eulerAngles.toArray();
return eulerArray;
}
Homography relates images of the same planar surface, so it works only if there is a dominant plane in the image and you can find enough feature points lying on the plane in both images and successfully match them. Minimum number of matches is four and the math will work under the assumption, that the matches are 100% correct. With the help of robust estimation like RANSAC, you can get the result even if some elements in your set of feature point matches are obvious mismatches or are not placed on a plane.
For a more general case of a set of macthed features without the planarity assumption, you will need to find an essential matrix. The exact definition of the matrix can be found here. In short, it works more or less like homography - it relates corresponding points in two images. The minimum number of matches required to compute the essential matrix is five. To get the result from such a minimum sample, you need to make sure that the established matches are 100% correct. Again, robust estimation can help if there are outliers in your correspondence set -- and with automatic feature detection and matching there usually are.
OpenCV 3.0 has a function for essential matrix computation, conveniently integrated with RANSAC robust estimation. The essential matrix can be decomposed to the rotation matrix and translation vector as shown in the Wikipedia article I linked before. OpenCV 3.0 has a readily available function for this, too.
Now works the flowing code for me and I have decomposed the euler angles from homography matrix! I have some values for pitch, roll and yaw, which I don't know, whether there are correct. Have somebody any Idee, how can I test it?!
private static MatOfDMatch filterMatchesByHomography(MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch matches){
List<Point> lp1 = new ArrayList<Point>(500);
List<Point> lp2 = new ArrayList<Point>(500);
KeyPoint[] k1 = keypoints1.toArray();
KeyPoint[] k2 = keypoints2.toArray();
List<DMatch> matchesList = matches.toList();
if (matchesList.size() < 4){
MatOfDMatch mat = new MatOfDMatch();
return mat;
}
// Add matches keypoints to new list to apply homography
for(DMatch match : matchesList){
Point kk1 = k1[match.queryIdx].pt;
Point kk2 = k2[match.trainIdx].pt;
lp1.add(kk1);
lp2.add(kk2);
}
MatOfPoint2f srcPoints = new MatOfPoint2f(lp1.toArray(new Point[0]));
MatOfPoint2f dstPoints = new MatOfPoint2f(lp2.toArray(new Point[0]));
//---------------------------------------
Mat mask = new Mat();
Mat homography = Calib3d.findHomography(srcPoints, dstPoints, Calib3d.RANSAC, 0.2, mask); // Finds a perspective transformation between two planes. ---Calib3d.LMEDS
Mat pose = cameraPoseFromHomography(homography);
//Decomposing a rotation matrix to eulerangle
pitch = Math.atan2(pose.get(2, 1)[0], pose.get(2, 2)[0]); // arctan2(r32, r33)
roll = Math.atan2(-1*pose.get(2, 0)[0], Math.sqrt( Math.pow(pose.get(2, 1)[0], 2) + Math.pow(pose.get(2, 2)[0], 2)) ); // arctan2(-r31, sqrt(r32^2 + r33^2))
yaw = Math.atan2(pose.get(2, 0)[0], pose.get(0, 0)[0]);
List<DMatch> matches_homo = new ArrayList<DMatch>();
int size = (int) mask.size().height;
for(int i = 0; i < size; i++){
if ( mask.get(i, 0)[0] == 1){
DMatch d = matchesList.get(i);
matches_homo.add(d);
}
}
MatOfDMatch mat = new MatOfDMatch();
mat.fromList(matches_homo);
return mat;
}
This is my camera pose from homography matrix (see this page too):
private static Mat cameraPoseFromHomography(Mat h) {
//Log.d("DEBUG", "cameraPoseFromHomography: homography " + matToString(h));
Mat pose = Mat.eye(3, 4, CvType.CV_32FC1); // 3x4 matrix, the camera pose
float norm1 = (float) Core.norm(h.col(0));
float norm2 = (float) Core.norm(h.col(1));
float tnorm = (norm1 + norm2) / 2.0f; // Normalization value
Mat normalizedTemp = new Mat();
Core.normalize(h.col(0), normalizedTemp);
normalizedTemp.convertTo(normalizedTemp, CvType.CV_32FC1);
normalizedTemp.copyTo(pose.col(0)); // Normalize the rotation, and copies the column to pose
Core.normalize(h.col(1), normalizedTemp);
normalizedTemp.convertTo(normalizedTemp, CvType.CV_32FC1);
normalizedTemp.copyTo(pose.col(1));// Normalize the rotation and copies the column to pose
Mat p3 = pose.col(0).cross(pose.col(1)); // Computes the cross-product of p1 and p2
p3.copyTo(pose.col(2));// Third column is the crossproduct of columns one and two
Mat temp = h.col(2);
double[] buffer = new double[3];
h.col(2).get(0, 0, buffer);
pose.put(0, 3, buffer[0] / tnorm); //vector t [R|t] is the last column of pose
pose.put(1, 3, buffer[1] / tnorm);
pose.put(2, 3, buffer[2] / tnorm);
return pose;
}