Combine lane detection and object detection in Android, Android Studio - java

I want to make application that specified detects with line detection. First I made my application about really basic line detection. Below code is about part of line detection.
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame){
mRgba=inputFrame.rgba();
mGray=inputFrame.gray();
mat3=inputFrame.rgba();
// Code about ROI(Region of Interest)
double m_dWscale = (double) 5/6;
double m_dHscale = (double) 1/2;
double m_dWscale1 = (double) 4/6;
int mRoiWidth = (int)(mRgba.size().width * m_dWscale);
int mRoiHeight = (int)(mRgba.size().height * m_dHscale);
int mRoiWidth1 = (int)(mRgba.size().width * m_dWscale1);
int mRoiX = (int) (mRgba.size().width - mRoiWidth) ;
int mRoiY = (int) (mRgba.size().height - mRoiHeight) ;
roi_rect = new Rect(mRoiX,mRoiY,mRoiWidth1,mRoiHeight);
m_matRoi = mRgba.submat(roi_rect);
Imgproc.cvtColor(m_matRoi, m_matRoi, Imgproc.COLOR_BGR2GRAY, 1);
m_matRoi.copyTo(mRgba.submat(roi_rect));
//Code about BGR to HSV
scalarLow=new Scalar(0,0,200);
scalarHigh=new Scalar(180,255,255);
Imgproc.cvtColor(mRgba.submat(roi_rect),mat1,Imgproc.COLOR_BGR2HSV);
Core.inRange(mat1,scalarLow,scalarHigh,mat2);
Core.bitwise_and(mRgba.submat(roi_rect),mRgba.submat(roi_rect),mat1,mat2);
mRgba=mat3;
Imgproc.dilate(mat1,mat1,new Mat(),new Point(1,2),2);
Mat edges=new Mat();
//Code about Canny Edge
Imgproc.Canny(mat1,edges,90,150);
//Code about Hough transform
Mat lines=new Mat();
Point p1=new Point();
Point p2=new Point();
double a,b;
double x0,y0;
Imgproc.HoughLinesP(edges,lines,1.0,Math.PI/180.0,50,100.0,10.0);
for(int i=0;i<lines.rows();i++) {
double[] l = lines.get(i, 0);
Imgproc.line(mRgba.submat(roi_rect), new Point(l[0], l[1]), new Point(l[2], l[3]), new Scalar(0, 0, 255.0), 3);
}
//returns output
return mRgba;
}
And I want to combine upper code with tensorflow lite detection(see the below code please)
Code about tensorflow lite
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame){
mRgba=inputFrame.rgba();
mGray=inputFrame.gray();
Mat out=new Mat();
out= objectDetectorClass.recognizeImage(mRgba); //objectDetectorClass is another class which acts about object detection
return out;
}
So line detection code returns mRgba and object detection code returns out. I have no idea how to return both lines and object detection.
Thank you!
ps1) I forgot to upload github link about tensorflow lite, object detection application code. https://github.com/bendahouwael/Vehicle-Detection-App-Android. Hope this github helps you.

I solved the issue.
The answer was to return
//return mRgba;
Mat out=new Mat();
out= objectDetectorClass.recognizeImage(mRgba);
return out;

Related

How to detect glittering on a surface OpenCV

please help me with this as I am new to image processing.
I have the following images, there is glitterings/powder on the surface of the hand. How do I go about detecting these things on the hand?
I have tried with detecting by getting 70% of the max Intensity in the image. However, only image one works the rest does not. Could anyone how to suggest any methods that I can use to perform the detection please.(any available codes to try with will be good) Thank you?
Input Image 1: The only image that works with the above code
Input Image 2
Input Image 3
Desired Outcome
//convert from bitmap to mat
Mat mat = new Mat(bitmap1.getWidth(), bitmap1.getHeight(), CvType.CV_8UC3);
Utils.bitmapToMat(bitmap1, mat);
Mat grayMat = new Mat();
Imgproc.cvtColor(mat, grayMat, Imgproc.COLOR_BGR2GRAY);
//Log.d("TAGG","intensity" + mat.dump());
int rows = mat.rows();
int cols = mat.cols();
double maxIntensity = Core.minMaxLoc(grayMat).maxVal;
double minIntensity = 0.7 * maxIntensity;
Log.d("TAGG", "intensity" + maxIntensity);
Mat thresholdMat = new Mat();
Imgproc.threshold(grayMat, thresholdMat, minIntensity, maxIntensity, Imgproc.THRESH_BINARY_INV);
Bitmap outputBitmap = Bitmap.createBitmap(thresholdMat.cols(), thresholdMat.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(thresholdMat, outputBitmap);

Reduce quality of image for faster surf algorithm opencv Android

I am trying to match images with surf algorithm on android. I have a JavaCameraView object which capture image from camera in my java class. I passed this image to my native class like that:
#Override
public void onCameraViewStarted(int width, int height) {
_mRgba = new Mat(height, width, CvType.CV_8UC4);
}
#Override
public void onCameraViewStopped() {
_mRgba.release();
}
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame)
{
_mRgba = inputFrame.rgba();
nativeClass.detectAndMatch(_mRgba.getNativeObjAddr());
return _mRgba;
}
But in c++ code surf algorithm is very slow. This is my code:
Mat& frame = *(Mat*)addrRgba;
Mat grayFrame, desGrayFrame;
Mat object, desObject;
vector<KeyPoint> kpObject;
vector<KeyPoint> kpGrayFrame;
vector<vector<DMatch > > matches;
vector<DMatch > good_matches;
float thresholdMatchingNN=0.7;
unsigned int tresholdGoodMatches = 4;
int minHess = 3000;
FlannBasedMatcher matcher;
object = imread ("/sdcard/NoTouch/elli.png", CV_LOAD_IMAGE_GRAYSCALE);
if(! object.data )
{
__android_log_print(ANDROID_LOG_ERROR, "TRACKERS", "%s", "Can not
load image from phone");
}
SurfFeatureDetector detector(minHess);
detector.detect(object, kpObject);
SurfDescriptorExtractor extractor;
extractor.compute(object, kpObject, desObject);
cvtColor(frame, grayFrame, CV_RGB2GRAY);
detector.detect(grayFrame, kpGrayFrame);
extractor.compute(grayFrame, kpGrayFrame, desGrayFrame);
matcher.knnMatch(desObject, desGrayFrame, matches, 2);
I decided to reduce quality of image for faster process like that:
Size szSource = new Size(160,120);
_mRgba = new Mat(szSource, CvType.CV_8UC3);
it is a little faster but still slow. And I am confused with this code in java:
Size szSource = new Size(160,120);
Because some of the devices support different resolution. Can you explain clearly how to reduce the quality of the picture? And do you have any other suggestions for speeding up this process ?

Replicate Gimp Unsharp Mask with Java - OpenCv

I'm trying to replicate unsharp mask in Gimp with using Java and OpenCv. I use a grayscale image as input and apply unsharp mask, but results are not even close.
I try to implement this C++ code:
Mat blurred; double sigma = 1, threshold = 5, amount = 1;
GaussianBlur(img, blurred, Size(), sigma, sigma);
Mat lowContrastMask = abs(img - blurred) < threshold;
Mat sharpened = img*(1+amount) + blurred*(-amount);
img.copyTo(sharpened, lowContrastMask);
And this is my Java implementation:
double sigma = 1, threshold = 5, amount = 1;
Mat source = Imgcodecs.imread(input.getName());
Mat destination = new Mat();
Imgproc.GaussianBlur(source, destination, new Size(), sigma, sigma);
Mat lowContrastMask = new Mat();
Core.absdiff(source, destination, lowContrastMask);
Imgproc.threshold(lowContrastMask, lowContrastMask, 0, threshold, Imgproc.THRESH_BINARY);
Mat sharpened = new Mat();
Core.multiply(source, new Scalar(0), sharpened, amount+1);
Mat sharpened2 = new Mat();
Core.multiply(destination, new Scalar(0), sharpened2, -amount);
Core.add(sharpened2, sharpened, sharpened);
source.copyTo(sharpened, lowContrastMask);
Alternative Unsharp Masking method:
Mat source = Imgcodecs.imread(input.getName());
Mat destination = new Mat();
Imgproc.GaussianBlur(source, destination, new Size(0,0), 60);
Core.addWeighted(source, 1.5, destination, -1, 0, destination);
So, both methods are working but results are and not good as gimp result. I'm open to any suggestion. I know it look like a bad implementation. I'm a newbie, I appreciate any help.

How can I draw rectangle with MatOfKeyPoint for Text Detection | Java

I am working on a real time text detection and recognition with OpenCV4Android. Recognition part is totally completed. However, I have to ask question about text detection. I' m using the MSER FeatureDetector for detection text.
This is the real time and calling the method part:
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
carrierMat = inputFrame.gray();
carrierMat = General.MSER(carrierMat);
return carrierMat;
}
And this is the basic MSER implementation:
private static FeatureDetector fd = FeatureDetector.create(FeatureDetector.MSER);
private static MatOfKeyPoint mokp = new MatOfKeyPoint();
private static Mat edges = new Mat();
public static Mat MSER(Mat mat) {
//for mask
Imgproc.Canny(mat, edges, 400, 450);
fd.detect(mat, mokp, edges);
//for drawing keypoints
Features2d.drawKeypoints(mat, mokp, mat);
return mat;
}
It works fine for finding text with edges mask.
I would like to draw a rectangles for clusters like this:
or this:
You can assume that I have the right points.
As you can see, fd.detect() method is returning a MatOfKeyPoint. Hence I' ve tried this method for drawing rectangle:
public static Mat MSER_(Mat mat) {
fd.detect(mat, mokp);
KeyPoint[] refKp = mokp.toArray();
Point[] refPts = new Point[refKp.length];
for (int i = 0; i < refKp.length; i++) {
refPts[i] = refKp[i].pt;
}
MatOfPoint2f refMatPt = new MatOfPoint2f(refPts);
MatOfPoint2f approxCurve = new MatOfPoint2f();
//Processing on mMOP2f1 which is in type MatOfPoint2f
double approxDistance = Imgproc.arcLength(refMatPt, true) * 0.02;
Imgproc.approxPolyDP(refMatPt, approxCurve, approxDistance, true);
//Convert back to MatOfPoint
MatOfPoint points = new MatOfPoint(approxCurve.toArray());
// Get bounding rect
Rect rect = Imgproc.boundingRect(points);
// draw enclosing rectangle (all same color, but you could use variable i to make them unique)
Imgproc.rectangle(mat, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height), Detect_Color_, 5);
//Features2d.drawKeypoints(mat, mokp, mat);
return mat;
}
But when I was trying to Imgproc.arcLength() method, it suddenly stopped. I gave a random approxDistance value for Imgproc.approxPolyDP() method like 0.1, it doesn' t work really efficiently.
So how can I draw rectangle for detected text?
I tested your code and had exactly the same problem.
For now I still can't find the problem within.
But I found a project using both "MSER" and "Morphological".
you can find it here .
The project have very simple structure and the author put the
text detection in "onCameraFrame" method just like you.
I implemented the method from that project and it worked,
but the result was still not very good.
If you seek better text detection tool, here's two of them.
Stroke Width Transform(SWT):
A whole new method for finding text area. It's fast and efficient. however it is only available in c++ or python. you can find some example here.
Class-specific Extremal Regions using class ERFilter:An advanced version of the MSER. Unfortunately, it is only available in OpenCV 3.0.0-dev. You can't use it in current version of OpenCV4Android. The document is here.
To be honest I am new in this area(2 months), but I hope these information can help you finish your project.
(update:2015/9/13)
I've translated a c++ method from a post.
It works far better than the first github project I mentioned.
Here is the code:
public void apply(Mat src, Mat dst) {
if (dst != src) {
src.copyTo(dst);
}
Mat img_gray,img_sobel, img_threshold, element;
img_gray=new Mat();
Imgproc.cvtColor(src, img_gray, Imgproc.COLOR_RGB2GRAY);
img_sobel=new Mat();
Imgproc.Sobel(img_gray, img_sobel, CvType.CV_8U, 1, 0, 3, 1, 0,Core.BORDER_DEFAULT);
img_threshold=new Mat();
Imgproc.threshold(img_sobel, img_threshold, 0, 255, Imgproc.THRESH_OTSU+Imgproc.THRESH_BINARY);
element=new Mat();
element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(17, 3) );
Imgproc.morphologyEx(img_threshold, img_threshold, Imgproc.MORPH_CLOSE, element);
//Does the trick
List<MatOfPoint> contours=new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(img_threshold, contours, hierarchy, 0, 1);
List<MatOfPoint> contours_poly=new ArrayList<MatOfPoint>(contours.size());
contours_poly.addAll(contours);
MatOfPoint2f mMOP2f1,mMOP2f2;
mMOP2f1=new MatOfPoint2f();
mMOP2f2=new MatOfPoint2f();
for( int i = 0; i < contours.size(); i++ )
if (contours.get(i).toList().size()>100)
{
contours.get(i).convertTo(mMOP2f1, CvType.CV_32FC2);
Imgproc.approxPolyDP(mMOP2f1,mMOP2f2, 3, true );
mMOP2f2.convertTo(contours_poly.get(i), CvType.CV_32S);
Rect appRect=Imgproc.boundingRect(contours_poly.get(i));
if (appRect.width>appRect.height)
{
Imgproc.rectangle(dst, new Point(appRect.x,appRect.y) ,new Point(appRect.x+appRect.width,appRect.y+appRect.height), new Scalar(255,0,0));
}
}
}

Quick & fast template matching on screen. Coordinates needed too. Java

I need a way to find an image on the screen. I've searched for ways to do this on SO but some take extremely long. I need it to be fast and efficient, does not need to be accurate. Basically i'm planning to compare or search for a small pixelated image, say 11x10 pixels for example, on the screen.
I also need a way to know the x and y coordinates of the small image on the screen.
Although I've looked through many tools out there like JavaCV and OpenCV, I just wanted to see if there are any other ways to do this.
TL;DR
I need a fast way to search for a small (11x10 example.) image on the screen and know its x,y coordinates.
I think you many find this answer relevant! But it is for Windows & in c++. But i'm sure that you can convert it very easily to any language.
This question is very old, But im trying to acheive the exact same thing here. Ive found that combining these answers would do the trick:
Convert BufferedImage TYPE_INT_RGB to OpenCV Mat Object
OpenCV Template Matching example in Android
The reason you need to do a conversion is because when u grab a screenshot with awt.Robot class its in the INT_RGB format. The matching template example expects bytes and you cannot grab byte data from this type of image directly.
Heres my implementation of these two answers, but it is incomplete. The output is all screwed up and i think it may have something to do with the IntBuffer/ByteBuffers.
-Edit-
I've added a new helper method that converts a INT_RGB to a BYTE_BGR. I can now grab the coordinates of template on the image using matchLoc.This seems to work pretty well, I was able to use this with a robot that clicks the start menu for me based on the template.
private BufferedImage FindTemplate() {
System.out.println("\nRunning Template Matching");
int match_method = Imgproc.TM_SQDIFF;
BufferedImage screenShot = null;
try {
Robot rob = new Robot();
screenShot = rob.createScreenCapture(new Rectangle(Toolkit.getDefaultToolkit().getScreenSize()));
} catch (AWTException ex) {
Logger.getLogger(MainGUI.class.getName()).log(Level.SEVERE, null, ex);
}
if(screenShot == null) return;
Mat img = BufferedImageToMat(convertIntRGBTo3ByteBGR(screenShot));
String templateFile = "C:\\Temp\\template1.JPG";
Mat templ = Highgui.imread(templateFile);
// / Create the result matrix
int result_cols = img.cols() - templ.cols() + 1;
int result_rows = img.rows() - templ.rows() + 1;
Mat result = new Mat(result_rows, result_cols, CvType.CV_32FC1);
// / Do the Matching and Normalize
Imgproc.matchTemplate(img, templ, result, match_method);
Core.normalize(result, result, 0, 1, Core.NORM_MINMAX, -1, new Mat());
Highgui.imwrite("out2.png", result);
// / Localizing the best match with minMaxLoc
MinMaxLocResult mmr = Core.minMaxLoc(result);
Point matchLoc;
if (match_method == Imgproc.TM_SQDIFF
|| match_method == Imgproc.TM_SQDIFF_NORMED) {
matchLoc = mmr.minLoc;
} else {
matchLoc = mmr.maxLoc;
}
Graphics2D graphics = screenShot.createGraphics();
graphics.setColor(Color.red);
graphics.setStroke(new BasicStroke(3));
graphics.drawRect(matchLoc.x, matchLoc.y, templ.width(), templ.height());
graphics.dispose();
return screenShot;
}
private Mat BufferedImageToMat(BufferedImage img){
int[] data = ((DataBufferInt) img.getRaster().getDataBuffer()).getData();
ByteBuffer byteBuffer = ByteBuffer.allocate(data.length * 4);
IntBuffer intBuffer = byteBuffer.asIntBuffer();
intBuffer.put(data);
Mat mat = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3);
mat.put(0, 0, byteBuffer.array());
return mat;
}`
private BufferedImage convertIntRGBTo3ByteBGR(BufferedImage img){
BufferedImage convertedImage = new BufferedImage(img.getWidth(), img.getHeight(), BufferedImage.TYPE_3BYTE_BGR);
Graphics2D graphics = convertedImage.createGraphics();
graphics.drawImage(img, 0, 0, null);
graphics.dispose();
return convertedImage;
}
Results:
Template:

Categories