I am writing an OpenCV based image processing algorythm for thresholding. The algorythm is written here in C++ language and I am rewriting in on Java, for Android studio. In one line, I have to add two Mat (OpenCV matrix) objects. In C++ it is res=Img+res;, in Java Core.add(imgMat, res, res);. At this line i get an error, which I cannot solve:
CvException: /Volumes/./././././././arithm.cpp:639: error: (-209) The operation is neither 'array op array' (where arrays have the same size and the same number of channels), nor 'array op scalar', nor 'scalar op array' in function void cv::arithm_op(...)
In the code below you could see, that both Mat objects have the same size and it has the same format (CvType) too. Again, how the code looks in C++, you can see in this question.
My code (java):
public Bitmap Thresholding(Bitmap bitmap)
{
Mat imgMat = new Mat();
Utils.bitmapToMat(bitmap, imgMat);
imgMat.convertTo(imgMat, CvType.CV_32FC1, 1.0 / 255.0);
Mat res = CalcBlockMeanVariance(imgMat, 21);
Core.subtract(new MatOfDouble(1.0), res, res);
Core.add(imgMat, res, res);
Imgproc.threshold(res, res, 0.85, 1, Imgproc.THRESH_BINARY);
//Imgproc.resize(res, res, new org.opencv.core.Size(res.cols() / 2, res.rows() / 2));
res.convertTo(res, CvType.CV_8UC1, 255.0);
Utils.matToBitmap(res, bitmap);
return bitmap;
}
public Mat CalcBlockMeanVariance (Mat Img, int blockSide)
{
Mat I = new Mat();
Mat ResMat;
Mat inpaintmask = new Mat();
Mat patch;
Mat smallImg = new Mat();
MatOfDouble mean = new MatOfDouble();
MatOfDouble stddev = new MatOfDouble();
Img.convertTo(I, CvType.CV_32FC1);
ResMat = Mat.zeros(Img.rows() / blockSide, Img.cols() / blockSide, CvType.CV_32FC1);
for (int i = 0; i < Img.rows() - blockSide; i += blockSide)
{
for (int j = 0; j < Img.cols() - blockSide; j += blockSide)
{
patch = new Mat(I,new Rect(j,i, blockSide, blockSide));
Core.meanStdDev(patch, mean, stddev);
if (stddev.get(0,0)[0] > 0.01)
ResMat.put(i / blockSide, j / blockSide, mean.get(0,0)[0]);
else
ResMat.put(i / blockSide, j / blockSide, 0);
}
}
Imgproc.resize(I, smallImg, ResMat.size());
Imgproc.threshold(ResMat, inpaintmask, 0.02, 1.0, Imgproc.THRESH_BINARY);
Mat inpainted = new Mat();
Imgproc.cvtColor(smallImg, smallImg, Imgproc.COLOR_RGBA2BGR);
smallImg.convertTo(smallImg, CvType.CV_8UC1, 255.0);
inpaintmask.convertTo(inpaintmask, CvType.CV_8UC1);
Photo.inpaint(smallImg, inpaintmask, inpainted, 5, Photo.INPAINT_TELEA);
Imgproc.resize(inpainted, ResMat, Img.size());
ResMat.convertTo(ResMat, CvType.CV_32FC1, 1.0 / 255.0);
return ResMat;
}
Thank you in advance.
While imgMat and res have the same size, they have different number of channels: imgMat has 4 channels and res has 3 channels.
Since you can add two matrices only if they have same size and number of channels, you can convert imgMat to a 3 channel image before calling add like:
Imgproc.cvtColor( imgMat, imgMat, Imgproc.COLOR_BGRA2BGR);
Related
I am writing a program which need to detect red circle-alikes from this picture.
I have tried canny edge detection and find contours but none of them find this red "circles". I also tried to convert this to hsv and detect this by color but I couldn't determine good range for this color, maybe background color confuses it?
I put here a piece of my code with my final attempt..
Mat image = new Mat();
image = Imgcodecs.imread("image.jpg");
Mat hsvImage = new Mat();
Mat grayscaleImage = new Mat();
Mat binaryImage = new Mat();
Imgproc.blur(image, image, new Size(1, 1));
Imgproc.cvtColor(image, hsvImage, Imgproc.COLOR_BGR2HSV);
Imgproc.cvtColor(image, grayscaleImage, Imgproc.COLOR_BGR2GRAY);
Imgproc.equalizeHist(grayscaleImage, grayscaleImage);
Imgproc.Canny(grayscaleImage, grayscaleImage, 50, 150, 3,false);
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(grayscaleImage.clone(), contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
for (int id=0;id<contours.size();id++){
MatOfPoint2f mop2f = new MatOfPoint2f();
contours.get(id).convertTo(mop2f,CvType.CV_32F);
RotatedRect rectangle = Imgproc.minAreaRect(mop2f);
if (rectangle.boundingRect().width>80)
Imgproc.drawContours(image,contours,id,new Scalar(0,255,0));
}
If you want to process that marked image, you really might want to detect colors. Typically this is done in HSV color-space.
Here is some C++ code to detect "red" color. The result isn't good enough to use findContours yet, but maybe after some dilation. Maybe you can convert the code to Java.
If you want to detect different color, change the line redMask = thresholdHue(hsv, 0, 20, 50, 50); to mask = thresholdHue(hsv, yourWantedHueColorValue, 20, 50, 50);`
// for example to shift a circluar hue-channel
cv::Mat shiftChannel(cv::Mat H, int shift, int maxVal = 180)
{
// CV_8UC1 only!
cv::Mat shiftedH = H.clone();
//int shift = 25; // in openCV hue values go from 0 to 180 (so have to be doubled to get to 0 .. 360) because of byte range from 0 to 255
for (int j = 0; j < shiftedH.rows; ++j)
for (int i = 0; i < shiftedH.cols; ++i)
{
shiftedH.at<unsigned char>(j, i) = (shiftedH.at<unsigned char>(j, i) + shift) % maxVal;
}
return shiftedH;
}
cv::Mat thresholdHue(cv::Mat hsvImage, int hueVal, int range = 30, int minSat = 50, int minValue = 50)
{
// hsvImage must be CV_8UC3 HSV image.
// hue val and range are in openCV's hue range (0 .. 180)
// range shouldnt be bigger than 90, because that's max (all colors), after shifting the hue channel.
// this function will
// 1. shift the hue channel, so that even colors near the border (red color!) will be detectable with same code.
// 2. threshold the hue channel around the value 90 +/- range
cv::Mat mask; // return-value
std::vector<cv::Mat> channels;
cv::split(hsvImage, channels);
int targetHueVal = 180 / 2; // we'll shift the hue-space so that the target val will always be 90 afterwards, no matter which hue value was chosen. This can be important if
int shift = targetHueVal - hueVal;
if (shift < 0) shift += 180;
cv::Mat shiftedHue = shiftChannel(channels[0], shift, 180);
// merge the channels back to hsv image
std::vector<cv::Mat> newChannels;
newChannels.push_back(shiftedHue);
newChannels.push_back(channels[1]);
newChannels.push_back(channels[2]);
cv::Mat shiftedHSV;
cv::merge(newChannels, shiftedHSV);
// threshold
cv::inRange(shiftedHSV, cv::Vec3b(targetHueVal - range, minSat, minValue), cv::Vec3b(targetHueVal + range, 255, 255), mask);
return mask;
}
int main(int argc, char* argv[])
{
cv::Mat input = cv::imread("C:/StackOverflow/Input/redCircleLikeContours.jpg");
cv::Mat redMask;
cv::Mat hsv;
cv::cvtColor(input, hsv, CV_BGR2HSV);
redMask = thresholdHue(hsv, 0, 20, 50, 50);
cv::imshow("red", redMask);
cv::imshow("input", input);
cv::imwrite("C:/StackOverflow/Output/redCircleLikeContoursMask.png", redMask);
cv::waitKey(0);
return 0;
}
Here's the result:
Here is my code if somebody would want to look :)
public static void main (String args[]){
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
Mat image = new Mat();
image = Imgcodecs.imread("imageorg.jpg");
if ( image == null) System.out.println("Image is fine");
else System.out.println("Wrong path to image");
Mat hsvImage = new Mat();
Imgproc.blur(image, image, new Size(3,3));
Imgproc.cvtColor(image, hsvImage, Imgproc.COLOR_BGR2HSV);
Mat redMask = new Mat();
redMask = thresholdHue(hsvImage,0,20,50,50);
Mat kernel = new Mat();
kernel = Imgproc.getStructuringElement(Imgproc.MORPH_DILATE, new Size(2,2));
Mat dilateMat = new Mat();
Imgproc.dilate(redMask, dilateMat, kernel);
Imgcodecs.imwrite("redCircleLikeContours.png", redMask);
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(dilateMat.clone(), contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
List<MatOfPoint> removedContoursList = new ArrayList<MatOfPoint>();
for (int id=0;id<contours.size();id++){
MatOfPoint2f mop2f = new MatOfPoint2f();
contours.get(id).convertTo(mop2f,CvType.CV_32F);
RotatedRect rectangle = Imgproc.minAreaRect(mop2f);
if (rectangle.boundingRect().height<10){
removedContoursList.add(contours.get(id));
System.out.println("removing: "+rectangle.boundingRect());
contours.remove(id);
id--;
}
}
}
public static Mat thresholdHue(Mat hsvImage, int hueVal, int range, int minSat, int minValue)
{
Mat mask = new Mat();
List<Mat> channels = new ArrayList<Mat>();
Core.split(hsvImage, channels);
int targetHueVal = 180 / 2;
int shift = targetHueVal - hueVal;
if (shift < 0) shift += 180;
Mat shiftedHue = shiftChannel(channels.get(0), shift, 180);
List<Mat> newChannels = new ArrayList<Mat>();
newChannels.add(shiftedHue);
newChannels.add(channels.get(1));
newChannels.add(channels.get(2));
Mat shiftedHSV = new Mat();
Core.merge(newChannels, shiftedHSV);
Core.inRange(shiftedHSV, new Scalar(targetHueVal - range, minSat, minValue), new Scalar(targetHueVal + range, 255, 255), mask);
return mask;
}
private static Mat shiftChannel(Mat H, int shift, int maxVal)
{
Mat shiftedH = H.clone();
for (int j = 0; j < shiftedH.rows(); ++j)
for (int i = 0; i < shiftedH.cols(); ++i)
{
shiftedH.put(j, i,(shiftedH.get(j,i)[0] + shift) % maxVal);
}
return shiftedH;
}
I know this is duplicated post but still get stuck on implementation.
I following some guide on the internet in how to detect document in an image in OpenCV and Java.
The first approarch i came up with is that use the findContours after pre-process some image processing like blur, edge detection, after get all the contours i can found the largest contour and assume that is a rectangle i'm looking for but it fail in some case, e.g the document is not fully taken like missing one corner.
After trying several time and some new processing but does not work at all, i found that the HoughLine transform take it easier. From now i have all the line inside an image but still do not what to do next to defined the interest rectangle that i want.
Here is the implementation code i have so far:
Approach 1: Using findContours
Mat grayImage = new Mat();
Mat detectedEdges = new Mat();
// convert to grayscale
Imgproc.cvtColor(frame, grayImage, Imgproc.COLOR_BGR2GRAY);
// reduce noise with a 3x3 kernel
// Imgproc.blur(grayImage, detectedEdges, new Size(3, 3));
Imgproc.medianBlur(grayImage, detectedEdges, 9);
// Imgproc.equalizeHist(detectedEdges, detectedEdges);
// Imgproc.GaussianBlur(detectedEdges, detectedEdges, new Size(5, 5), 0, 0, Core.BORDER_DEFAULT);
Mat edges = new Mat();
// canny detector, with ratio of lower:upper threshold of 3:1
Imgproc.Canny(detectedEdges, edges, this.threshold.getValue(), this.threshold.getValue() * 3, 3, true);
// makes the object in white bigger
Imgproc.dilate(edges, edges, new Mat(), new Point(-1, -1), 1); // 1
Image imageToShow = Utils.mat2Image(edges);
updateImageView(cannyFrame, imageToShow);
/// Find contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(edges, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
// loop over the contours
MatOfPoint2f approxCurve;
double maxArea = 0;
int maxId = -1;
for (MatOfPoint contour : contours) {
MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());
double area = Imgproc.contourArea(contour);
approxCurve = new MatOfPoint2f();
Imgproc.approxPolyDP(temp, approxCurve, Imgproc.arcLength(temp, true) * 0.02, true);
if (approxCurve.total() == 4 && area >= maxArea) {
double maxCosine = 0;
List<Point> curves = approxCurve.toList();
for (int j = 2; j < 5; j++) {
double cosine = Math.abs(angle(curves.get(j % 4), curves.get(j - 2), curves.get(j - 1)));
maxCosine = Math.max(maxCosine, cosine);
}
if (maxCosine < 0.3) {
maxArea = area;
maxId = contours.indexOf(contour);
}
}
}
MatOfPoint maxMatOfPoint = contours.get(maxId);
MatOfPoint2f maxMatOfPoint2f = new MatOfPoint2f(maxMatOfPoint.toArray());
RotatedRect rect = Imgproc.minAreaRect(maxMatOfPoint2f);
System.out.println("Rect angle: " + rect.angle);
Point points[] = new Point[4];
rect.points(points);
for (int i = 0; i < 4; ++i) {
Imgproc.line(frame, points[i], points[(i + 1) % 4], new Scalar(255, 255, 25), 3);
}
Mat dest = new Mat();
frame.copyTo(dest, frame);
return dest;
Apparch 2: Using HoughLine transform
// STEP 1: Edge detection
Mat grayImage = new Mat();
Mat detectedEdges = new Mat();
Vector<Point> start = new Vector<Point>();
Vector<Point> end = new Vector<Point>();
// convert to grayscale
Imgproc.cvtColor(frame, grayImage, Imgproc.COLOR_BGR2GRAY);
// reduce noise with a 3x3 kernel
// Imgproc.blur(grayImage, detectedEdges, new Size(3, 3));
Imgproc.medianBlur(grayImage, detectedEdges, 9);
// Imgproc.equalizeHist(detectedEdges, detectedEdges);
// Imgproc.GaussianBlur(detectedEdges, detectedEdges, new Size(5, 5), 0, 0, Core.BORDER_DEFAULT);
// AdaptiveThreshold -> classify as either black or white
// Imgproc.adaptiveThreshold(detectedEdges, detectedEdges, 255, Imgproc.ADAPTIVE_THRESH_MEAN_C, Imgproc.THRESH_BINARY, 5, 2);
// Imgproc.Sobel(detectedEdges, detectedEdges, -1, 1, 0);
Mat edges = new Mat();
// canny detector, with ratio of lower:upper threshold of 3:1
Imgproc.Canny(detectedEdges, edges, this.threshold.getValue(), this.threshold.getValue() * 3, 3, true);
// apply gaussian blur to smoothen lines of dots
Imgproc.GaussianBlur(edges, edges, new org.opencv.core.Size(5, 5), 5);
// makes the object in white bigger
Imgproc.dilate(edges, edges, new Mat(), new Point(-1, -1), 1); // 1
Image imageToShow = Utils.mat2Image(edges);
updateImageView(cannyFrame, imageToShow);
// STEP 2: Line detection
// Do Hough line
Mat lines = new Mat();
int minLineSize = 50;
int lineGap = 10;
Imgproc.HoughLinesP(edges, lines, 1, Math.PI / 720, (int) this.threshold.getValue(), this.minLineSize.getValue(), lineGap);
System.out.println("MinLineSize: " + this.minLineSize.getValue());
System.out.println(lines.rows());
for (int i = 0; i < lines.rows(); i++) {
double[] val = lines.get(i, 0);
Point tmpStartP = new Point(val[0], val[1]);
Point tmpEndP = new Point(val[2], val[3]);
start.add(tmpStartP);
end.add(tmpEndP);
Imgproc.line(frame, tmpStartP, tmpEndP, new Scalar(255, 255, 0), 2);
}
Mat dest = new Mat();
frame.copyTo(dest, frame);
return dest;
HoughLine result 1
HoughLine result 2
How to detect needed rectangle from HoughLine result?
Can someone give me the next step to complete the HoughLine transform approach.
Any help is appriciated. i'm stuck with this for a while.
Thanks you for reading this.
This answer is pretty much a mix of two other answers (here and here) I posted. But the pipeline I used for the other answers can be a little bit improved for your case. So I think it's worth posting a new answer.
There are many ways to achieve what you want. However, I don't think that line detection with HoughLinesP is needed here. So here is the pipeline I used on your samples:
Step 1: Detect egdes
Resize the input image if it's too large (I noticed that this pipeline works better on down scaled version of a given input image)
Blur grayscale input and detect edges with Canny filter
Step 2: Find the card's corners
Compute the contours
Sort the contours by length and only keep the largest one
Generate the convex hull of this contour
Use approxPolyDP to simplify the convex hull (this should give a quadrilateral)
Create a mask out of the approximate polygon
return the 4 points of the quadrilateral
Step 3: Homography
Use findHomography to find the affine transformation of your paper sheet (with the 4 corner points found at Step 2)
Warp the input image using the computed homography matrix
NOTE: Of course, once you have found the corners of the paper sheet on the down scaled version of the input image, you can easily compute the position of the corners on the full sized input image. This, in order to have the best resolution for the warped paper sheet.
And here is the result:
vector<Point> getQuadrilateral(Mat & grayscale, Mat& output)
{
Mat approxPoly_mask(grayscale.rows, grayscale.cols, CV_8UC1);
approxPoly_mask = Scalar(0);
vector<vector<Point>> contours;
findContours(grayscale, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
vector<int> indices(contours.size());
iota(indices.begin(), indices.end(), 0);
sort(indices.begin(), indices.end(), [&contours](int lhs, int rhs) {
return contours[lhs].size() > contours[rhs].size();
});
/// Find the convex hull object for each contour
vector<vector<Point> >hull(1);
convexHull(Mat(contours[indices[0]]), hull[0], false);
vector<vector<Point>> polygon(1);
approxPolyDP(hull[0], polygon[0], 20, true);
drawContours(approxPoly_mask, polygon, 0, Scalar(255));
imshow("approxPoly_mask", approxPoly_mask);
if (polygon[0].size() >= 4) // we found the 4 corners
{
return(polygon[0]);
}
return(vector<Point>());
}
int main(int argc, char** argv)
{
Mat input = imread("papersheet1.JPG");
resize(input, input, Size(), 0.1, 0.1);
Mat input_grey;
cvtColor(input, input_grey, CV_BGR2GRAY);
Mat threshold1;
Mat edges;
blur(input_grey, input_grey, Size(3, 3));
Canny(input_grey, edges, 30, 100);
vector<Point> card_corners = getQuadrilateral(edges, input);
Mat warpedCard(400, 300, CV_8UC3);
if (card_corners.size() == 4)
{
Mat homography = findHomography(card_corners, vector<Point>{Point(warpedCard.cols, warpedCard.rows), Point(0, warpedCard.rows), Point(0, 0), Point(warpedCard.cols, 0)});
warpPerspective(input, warpedCard, homography, Size(warpedCard.cols, warpedCard.rows));
}
imshow("warped card", warpedCard);
imshow("edges", edges);
imshow("input", input);
waitKey(0);
return 0;
}
This is C++ code, but it shouldn't be to hard to translate into Java.
I try to dilate three image of characters in java opencv. i found out than ever it same character with same font and size, after dilate the result is different. so i try with same image the result is still different. Here is my test code.
for (int j = 0; j < 3; j++) {
Mat InputSrc = openFile("src\\myOpencv\\ocr\\crop1.png");
Mat tempImg =new Mat();
Imgproc.cvtColor(InputSrc, tempImg, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(tempImg, tempImg, 0, 255, Imgproc.THRESH_OTSU);
imageViewer.show(tempImg, "src");
Mat kernal5 = new Mat(5, 5, CV_8U);
Point midPoint = new Point(-1, -1);
Scalar scalarOne = new Scalar(1);
Mat binImg2 = new Mat();
Imgproc.dilate(tempImg, binImg2, kernal5, midPoint, 1, 1, scalarOne);
imageViewer.show(binImg2, "dilate");
}
thank
I'm working on a android application and I need to estimate online camera rotation in 3D-plan using images from camera and opencv library. I like to calculate Euler angles.
I have read this and this page and I can estimate homography matrix like here.
My first question is, should I really know the camera intrinsic matrix from camera calibrtion or is the homography matrix (camera extrinsic) enough to estimate euler angles (pitch, roll, yaw)?
If homography matrix is enough, how can I do it exactly?
Sorry, I am really beginner with opencv and cannot decompose "Mat" of homography to rotation matrix and translation matrix like describes here. How can I implement euler angles in android?
You can see my code using solvePnPRansac() and decomposeProjectionMatrix to calculate euler angles.
But it returns just a null-vector as double[] eulerArray = {0,0,0}!!! Can somebody help me?! What is wrong there?
Thank you very much for any response!
public double[] findEulerAngles(MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch matches){
KeyPoint[] k1 = keypoints1.toArray();
KeyPoint[] k2 = keypoints2.toArray();
List<DMatch> matchesList = matches.toList();
List<KeyPoint> referenceKeypointsList = keypoints2.toList();
List<KeyPoint> sceneKeypointsList = keypoints1.toList();
// Calculate the max and min distances between keypoints.
double maxDist = 0.0;
double minDist = Double.MAX_VALUE;
for(DMatch match : matchesList) {
double dist = match.distance;
if (dist < minDist) {
minDist = dist;
}
if (dist > maxDist) {
maxDist = dist;
}
}
// Identify "good" keypoints based on match distance.
List<Point3> goodReferencePointsList = new ArrayList<Point3>();
ArrayList<Point> goodScenePointsList = new ArrayList<Point>();
double maxGoodMatchDist = 1.75 * minDist;
for(DMatch match : matchesList) {
if (match.distance < maxGoodMatchDist) {
Point kk2 = k2[match.queryIdx].pt;
Point kk1 = k1[match.trainIdx].pt;
Point3 point3 = new Point3(kk1.x, kk1.y, 0.0);
goodReferencePointsList.add(point3);
goodScenePointsList.add( kk2);
sceneKeypointsList.get(match.queryIdx).pt);
}
}
if (goodReferencePointsList.size() < 4 || goodScenePointsList.size() < 4) {
// There are too few good points to find the pose.
return;
}
MatOfPoint3f goodReferencePoints = new MatOfPoint3f();
goodReferencePoints.fromList(goodReferencePointsList);
MatOfPoint2f goodScenePoints = new MatOfPoint2f();
goodScenePoints.fromList(goodScenePointsList);
MatOfDouble mRMat = new MatOfDouble(3, 3, CvType.CV_32F);
MatOfDouble mTVec = new MatOfDouble(3, 1, CvType.CV_32F);
//TODO: solve camera intrinsic matrix
Mat intrinsics = Mat.eye(3, 3, CvType.CV_32F); // dummy camera matrix
intrinsics.put(0, 0, 400);
intrinsics.put(1, 1, 400);
intrinsics.put(0, 2, 640 / 2);
intrinsics.put(1, 2, 480 / 2);
Calib3d.solvePnPRansac(goodReferencePoints, goodScenePoints, intrinsics, new MatOfDouble(), mRMat, mTVec);
MatOfDouble rotCameraMatrix1 = new MatOfDouble(3, 1, CvType.CV_32F);
double[] rVecArray = mRMat.toArray();
// Calib3d.Rodrigues(mRMat, rotCameraMatrix1);
double[] tVecArray = mTVec.toArray();
MatOfDouble projMatrix = new MatOfDouble(3, 4, CvType.CV_32F); //projMatrix 3x4 input projection matrix P.
projMatrix.put(0, 0, rVecArray[0]);
projMatrix.put(0, 1, rVecArray[1]);
projMatrix.put(0, 2, rVecArray[2]);
projMatrix.put(0, 3, 0);
projMatrix.put(1, 0, rVecArray[3]);
projMatrix.put(1, 1, rVecArray[4]);
projMatrix.put(1, 2, rVecArray[5]);
projMatrix.put(1, 3, 0);
projMatrix.put(2, 0, rVecArray[6]);
projMatrix.put(2, 1, rVecArray[7]);
projMatrix.put(2, 2, rVecArray[8]);
projMatrix.put(2, 3, 0);
MatOfDouble cameraMatrix = new MatOfDouble(3, 3, CvType.CV_32F); //cameraMatrix Output 3x3 camera matrix K.
MatOfDouble rotMatrix = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrix Output 3x3 external rotation matrix R.
MatOfDouble transVect = new MatOfDouble(4, 1, CvType.CV_32F); //transVect Output 4x1 translation vector T.
MatOfDouble rotMatrixX = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixX a rotMatrixX
MatOfDouble rotMatrixY = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixY a rotMatrixY
MatOfDouble rotMatrixZ = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixZ a rotMatrixZ
MatOfDouble eulerAngles = new MatOfDouble(3, 1, CvType.CV_32F); //eulerAngles Optional three-element vector containing three Euler angles of rotation in degrees.
Calib3d.decomposeProjectionMatrix( projMatrix,
cameraMatrix,
rotMatrix,
transVect,
rotMatrixX,
rotMatrixY,
rotMatrixZ,
eulerAngles);
double[] eulerArray = eulerAngles.toArray();
return eulerArray;
}
Homography relates images of the same planar surface, so it works only if there is a dominant plane in the image and you can find enough feature points lying on the plane in both images and successfully match them. Minimum number of matches is four and the math will work under the assumption, that the matches are 100% correct. With the help of robust estimation like RANSAC, you can get the result even if some elements in your set of feature point matches are obvious mismatches or are not placed on a plane.
For a more general case of a set of macthed features without the planarity assumption, you will need to find an essential matrix. The exact definition of the matrix can be found here. In short, it works more or less like homography - it relates corresponding points in two images. The minimum number of matches required to compute the essential matrix is five. To get the result from such a minimum sample, you need to make sure that the established matches are 100% correct. Again, robust estimation can help if there are outliers in your correspondence set -- and with automatic feature detection and matching there usually are.
OpenCV 3.0 has a function for essential matrix computation, conveniently integrated with RANSAC robust estimation. The essential matrix can be decomposed to the rotation matrix and translation vector as shown in the Wikipedia article I linked before. OpenCV 3.0 has a readily available function for this, too.
Now works the flowing code for me and I have decomposed the euler angles from homography matrix! I have some values for pitch, roll and yaw, which I don't know, whether there are correct. Have somebody any Idee, how can I test it?!
private static MatOfDMatch filterMatchesByHomography(MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch matches){
List<Point> lp1 = new ArrayList<Point>(500);
List<Point> lp2 = new ArrayList<Point>(500);
KeyPoint[] k1 = keypoints1.toArray();
KeyPoint[] k2 = keypoints2.toArray();
List<DMatch> matchesList = matches.toList();
if (matchesList.size() < 4){
MatOfDMatch mat = new MatOfDMatch();
return mat;
}
// Add matches keypoints to new list to apply homography
for(DMatch match : matchesList){
Point kk1 = k1[match.queryIdx].pt;
Point kk2 = k2[match.trainIdx].pt;
lp1.add(kk1);
lp2.add(kk2);
}
MatOfPoint2f srcPoints = new MatOfPoint2f(lp1.toArray(new Point[0]));
MatOfPoint2f dstPoints = new MatOfPoint2f(lp2.toArray(new Point[0]));
//---------------------------------------
Mat mask = new Mat();
Mat homography = Calib3d.findHomography(srcPoints, dstPoints, Calib3d.RANSAC, 0.2, mask); // Finds a perspective transformation between two planes. ---Calib3d.LMEDS
Mat pose = cameraPoseFromHomography(homography);
//Decomposing a rotation matrix to eulerangle
pitch = Math.atan2(pose.get(2, 1)[0], pose.get(2, 2)[0]); // arctan2(r32, r33)
roll = Math.atan2(-1*pose.get(2, 0)[0], Math.sqrt( Math.pow(pose.get(2, 1)[0], 2) + Math.pow(pose.get(2, 2)[0], 2)) ); // arctan2(-r31, sqrt(r32^2 + r33^2))
yaw = Math.atan2(pose.get(2, 0)[0], pose.get(0, 0)[0]);
List<DMatch> matches_homo = new ArrayList<DMatch>();
int size = (int) mask.size().height;
for(int i = 0; i < size; i++){
if ( mask.get(i, 0)[0] == 1){
DMatch d = matchesList.get(i);
matches_homo.add(d);
}
}
MatOfDMatch mat = new MatOfDMatch();
mat.fromList(matches_homo);
return mat;
}
This is my camera pose from homography matrix (see this page too):
private static Mat cameraPoseFromHomography(Mat h) {
//Log.d("DEBUG", "cameraPoseFromHomography: homography " + matToString(h));
Mat pose = Mat.eye(3, 4, CvType.CV_32FC1); // 3x4 matrix, the camera pose
float norm1 = (float) Core.norm(h.col(0));
float norm2 = (float) Core.norm(h.col(1));
float tnorm = (norm1 + norm2) / 2.0f; // Normalization value
Mat normalizedTemp = new Mat();
Core.normalize(h.col(0), normalizedTemp);
normalizedTemp.convertTo(normalizedTemp, CvType.CV_32FC1);
normalizedTemp.copyTo(pose.col(0)); // Normalize the rotation, and copies the column to pose
Core.normalize(h.col(1), normalizedTemp);
normalizedTemp.convertTo(normalizedTemp, CvType.CV_32FC1);
normalizedTemp.copyTo(pose.col(1));// Normalize the rotation and copies the column to pose
Mat p3 = pose.col(0).cross(pose.col(1)); // Computes the cross-product of p1 and p2
p3.copyTo(pose.col(2));// Third column is the crossproduct of columns one and two
Mat temp = h.col(2);
double[] buffer = new double[3];
h.col(2).get(0, 0, buffer);
pose.put(0, 3, buffer[0] / tnorm); //vector t [R|t] is the last column of pose
pose.put(1, 3, buffer[1] / tnorm);
pose.put(2, 3, buffer[2] / tnorm);
return pose;
}
Is there a way in Java or OpenCv ; preferably Java, that i can have an HSV histogram give RGB image.
I tried exploring JAI but it creates histogram for RGB image.
Thanks
Harshit
firs use cv::cvtColor to convert RGB to HSV
then use cv::calcHist to compute the histogram
Here is the pseudocode for a simple RGB to HSV converter. It will give a H of UNDEFINED if the color is a shade of gray, otherwise H is between 0 and 6.
x = min(R, G, B);
V = max(R, G, B);
if (V == x) {
H = UNDEFINED
S = 0
}
else {
if( R == x ) {
f = G - B;
i = 3;
} else if( G == x ) {
f = B - R;
i = 5;
} else {
f = R - G;
i = 1;
}
H = i - f /(V - x);
S = (V - x)/V;
}
Now you can either convert all your pixels and bin them to construct your HSV histogram, or you can convert each bin of your RGB histogram to an HSV bin.
You can use the "JavaCV" library to access OpenCV functions directly from Java:
http://code.google.com/p/javacv/
Then you can use my code for RGB to HSV that is better than OpenCV's cvConvert function:
http://www.shervinemami.co.cc/colorConversion.html
Cheers,
Shervin Emami.
Here is a code to do this:
// Assume SourceImage is a Bitmap ARGB_8888
BitmapFactory.Options options = new BitmapFactory.Options();
options.inPreferredConfig = Bitmap.Config.ARGB_8888;
Bitmap refImage = BitmapFactory.decodeFile(mBaseDir + "some_reference.jpg", options);
Mat hsvRef = new Mat();
Mat hsvSource = new Mat();
Mat srcRef = new Mat(refImage.getHeight(), refImage.getWidth(), CvType.CV_8U, new Scalar(4));
Utils.bitmapToMat(refImage, srcRef);
Mat srcSource = new Mat(SourceImage.getHeight(), SourceImage.getWidth(), CvType.CV_8U, new Scalar(4));
Utils.bitmapToMat(SourceImage, srcSource);
/// Convert to HSV
Imgproc.cvtColor(srcRef, hsvRef, Imgproc.COLOR_BGR2HSV);
Imgproc.cvtColor(srcSource, hsvSource, Imgproc.COLOR_BGR2HSV);
/// Using 50 bins for hue and 60 for saturation
int hBins = 50;
int sBins = 60;
MatOfInt histSize = new MatOfInt( hBins, sBins);
// hue varies from 0 to 179, saturation from 0 to 255
MatOfFloat ranges = new MatOfFloat( 0f,180f,0f,256f );
// we compute the histogram from the 0-th and 1-st channels
MatOfInt channels = new MatOfInt(0, 1);
Mat histRef = new Mat();
Mat histSource = new Mat();
ArrayList<Mat> histImages=new ArrayList<Mat>();
histImages.add(hsvRef);
Imgproc.calcHist(histImages,
channels,
new Mat(),
histRef,
histSize,
ranges,
false);
Core.normalize(histRef,
histRef,
0,
1,
Core.NORM_MINMAX,
-1,
new Mat());
histImages=new ArrayList<Mat>();
histImages.add(hsvSource);
Imgproc.calcHist(histImages,
channels,
new Mat(),
histSource,
histSize,
ranges,
false);
Core.normalize(histSource,
histSource,
0,
1,
Core.NORM_MINMAX,
-1,
new Mat());
double resp1 = Imgproc.compareHist(histRef, histSource, 0);
double resp2 = Imgproc.compareHist(histRef, histSource, 1);
double resp3 = Imgproc.compareHist(histRef, histSource, 2);
double resp4 = Imgproc.compareHist(histRef, histSource, 3);
First, you have to convert image to HSV using cv::cvtColor to convert RGB image into HSV image and then, you can use cv::calcHist to compute the HSV histogram.