Related
I am trying use Mask_RCNN in android. So, written its code. But Dnn.readNetFromTensorflow(MODEL_WEIGHTS, TEXT_GRAPH) function is not able to open the model weights and text graph file. Seems like they app is not able to find the path of files stored.
I have inserted these files(Text_Graph and Model_Weights) in java folder where mainactivity.java file is present. But it shows same error as mentioned below.
I also tried adding these files to res folder and assets folder but could not parse the model weight path to the function Dnn.readNetFromTensorflow(MODEL_WEIGHTS, TEXT_GRAPH) because its arguments are of String type. But assetmanager returns input stream.
I am using opencv 3.4.10 and android studio 4.0.
Also please help me in drawing contours over image in java. If anyone knows how to run it in android using Model in C++ then please suggest. As I have already tried it in that also but was getting build error of undefined reference to 'cv::dnn::experimental_dnn_v4::Net::~Net()'
Any help would be appreciated. Thanks in advance.
On running the app it is giving exception of : ****
Caused by: CvException [org.opencv.core.CvException: cv::Exception:
OpenCV(3.4.10)
/build/3_4_pack-android/opencv/modules/dnn/src/caffe/caffe_io.cpp:1132:
error: (-2:Unspecified error) FAILED: fs.is_open(). Can't open
"./frozen_inference_graph.pb" in function 'bool
cv::dnn::ReadProtoFromBinaryFile(const char*,
google::protobuf::Message*)'
]
at org.opencv.dnn.Dnn.readNetFromTensorflow_0(Native Method)
at org.opencv.dnn.Dnn.readNetFromTensorflow(Dnn.java:659)
at com.example.imagecompressor.MainActivity.onActivityResult(MainActivity.java:189)(Error
at this line : Net net = Dnn.readNetFromTensorflow(MODEL_WEIGHTS,
TEXT_GRAPH);)
at android.app.Activity.dispatchActivityResult(Activity.java:7454)
at android.app.ActivityThread.deliverResults(ActivityThread.java:4353)
Source code is given below:
Mainactivity.java
final String TEXT_GRAPH = "./mask_rcnn_inception_v2_coco_2018_01_28.pbtxt";
final String MODEL_WEIGHTS = "./frozen_inference_graph.pb";
final String CLASSES_FILE ="./mscoco_labels";
Mat tmp = new Mat(bitmap.getWidth(), bitmap.getHeight(), CV_8UC1);
Utils.bitmapToMat(bitmap, tmp);
Mat image = Imgcodecs.imread(img_path);
image=tmp;
Size size = image.size();
int cols = image.cols();
int rows = image.rows();
double h = size.height;
double w = size.width;
int hh = (int)size.height;
int ww = (int)size.width;
if(!image.empty()) {
Mat blob = Dnn.blobFromImage(image, 1.0, new Size(w, h), new Scalar(0), true, false);
// Load the network
Net net = Dnn.readNetFromTensorflow(MODEL_WEIGHTS, TEXT_GRAPH);
net.setPreferableBackend(Dnn.DNN_BACKEND_OPENCV);
net.setPreferableTarget(Dnn.DNN_TARGET_CPU);
net.setInput(blob);
ArrayList<String> outputlayers = new ArrayList<String>();
ArrayList<Mat> outputMats = new ArrayList<Mat>();
outputlayers.add("detection_out_final");
outputlayers.add("detection_masks");
net.forward(outputMats, outputlayers);
Mat numClasses = outputMats.get(0);
Mat numMasks = outputMats.get(1);
numClasses = numClasses.reshape(1, (int) numClasses.total() / 7);
for (int i = 0; i < numClasses.rows(); ++i) {
double confidence = numClasses.get(i, 2)[0];
//System.out.println(confidence);
// Mat objectMask=outputMats.get(i);
if (confidence > 0.5) {
int classId = (int) numClasses.get(i, 1)[0];
String label = classes.get(classId) + ": " + confidence;
System.out.println(label);
int left = (int) (numClasses.get(i, 3)[0] * cols);
int top = (int) (numClasses.get(i, 4)[0] * rows);
int right = (int) (numClasses.get(i, 5)[0] * cols);
int bottom = (int) (numClasses.get(i, 6)[0] * rows);
System.out.println(left + " " + top + " " + right + " " + bottom);
left = max(0, min(left, cols - 1));
top = max(0, min(top, rows - 1));
right = max(0, min(right, cols - 1));
bottom = max(0, min(bottom, rows - 1));
final Rect box = new Rect(left, top, right - left + 1, bottom - top + 1);
//Mat objectMask(numMasks.rows(), numMasks.size[3],CV_32F, numMasks.ptr<float>(i,classId));
// Mat obj();
Mat objectMask = new Mat(numMasks.rows(), numMasks.cols(), CV_32F);
rectangle(image, new Point(box.x, box.y), new Point(box.x + box.width, box.y + box.height), new Scalar(255, 178, 50), 3);
/* String lab = format("%.2f", confidence);
if (!classes.isEmpty()){
//CV_Assert(classId < (int)classes.size());
if(classId<(int)classes.size()) {
lab = classes.get(classId) + ":" + lab;
}
}*/
Scalar color = new Scalar(rng.nextInt(256), rng.nextInt(256), rng.nextInt(256));
double maskThreshold = 0.3;
// Resize the mask, threshold, color and apply it on the image
resize(objectMask, objectMask, new Size(box.width, box.height));
Imgproc.threshold(objectMask, objectMask, 255 * maskThreshold, 255, Imgproc.THRESH_BINARY);
// Mat mask = (objectMask > maskThreshold);
Mat ili = new Mat();
multiply(image, new Scalar(0.7), ili);
Mat coloredRoi = new Mat();
add(ili, new Scalar(0.3).mul(color), coloredRoi);
coloredRoi.convertTo(coloredRoi, CV_8UC3);
List<MatOfPoint> contours = null;
Mat hierarchy = null;
objectMask.convertTo(objectMask, CV_8U);
findContours(objectMask, contours, hierarchy, RETR_CCOMP,CHAIN_APPROX_SIMPLE);
drawContours(coloredRoi, contours, -1, color, 5, LINE_8, hierarchy, 100);
coloredRoi.copyTo(image, objectMask);
}
Mat detectedFrame = new Mat();
image.convertTo(detectedFrame, CV_8U);
Imgcodecs.imwrite("outputFile.jpg", detectedFrame);
}
}
I am developing an Android application and I'm processing frames using Opencv in real time.
The image processing is heavy as I'm detecting the largest contour, rotating and cropping it and apply some sort of segmentation on the cropped Mat object.
The problem is that FPS start with a value of 3 and drops to 1.
I'm not doing some heavy operations like matToBitmap and I'm not making native calls yet.
It's my first big project and I don't have much experience in Opencv. What can I do to increase my FPS?
Here is my code:
long e1 = Core.getTickCount();
Mat mGray = new Mat();
MatOfDouble mu = new MatOfDouble();
MatOfDouble stddev = new MatOfDouble();
Imgproc.cvtColor(origMat, origMat, Imgproc.COLOR_BGRA2BGR);
Imgproc.cvtColor(origMat, mGray, Imgproc.COLOR_BGR2GRAY);
Core.meanStdDev(mGray, mu, stddev);
Imgproc.GaussianBlur(mGray, mGray, new Size(5, 5), 5);
Imgproc.Canny(mGray, mGray, (mu.get(0, 0)[0]) * 0.66, (mu.get(0, 0)[0]) * 1.33, 3, false);
Mat kernell = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9, 9));
Imgproc.morphologyEx(mGray, mGray, Imgproc.MORPH_CLOSE, kernell);
Imgproc.dilate(mGray, mGray, Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3)));
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(mGray, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
double largest_area = 0;
Rect rect = new Rect();
int largest_idx = 0;
for (int idx = 0; idx < contours.size(); idx++) {
double a = Imgproc.contourArea(contours.get(idx)); //Find the area of contour
if (a > largest_area) {
largest_area = a;
largest_idx = idx;
}
}
if (contours.size() > 0) {
MatOfPoint2f new_mat = new MatOfPoint2f(contours.get(largest_idx).toArray());
RotatedRect rbox = Imgproc.minAreaRect(new_mat);
Point vertices[] = new Point[4];
rbox.points(vertices);
List<MatOfPoint> boxContours = new ArrayList<>();
boxContours.add(new MatOfPoint(vertices));
for (int i = 0; i < 4; ++i) {
Imgproc.line(origMat, vertices[i], vertices[(i + 1) % 4], new Scalar(255, 0, 0));
}
double rect_angle = rbox.angle - 90.0f;
Size rect_size = rbox.size;
double d = rect_size.width;
rect_size.width = rect_size.height;
rect_size.height = d;
M = Imgproc.getRotationMatrix2D(rbox.center, rect_angle, 1.0);
Imgproc.warpAffine(origMat, rotated, M, origMat.size());
if (rect_size.width > 70 && rect_size.height > 70)
Imgproc.getRectSubPix(rotated, new Size(rect_size.width - 70, rect_size.height - 70), rbox.center, rotated);
Imgproc.resize(rotated, rotated, origMat.size());
Mat orr = rotated.clone();
Imgproc.cvtColor(orr, orr, Imgproc.COLOR_RGB2HSV);
Core.split(orr, channels);
orr = channels.get(2);
CLAHE clahe = Imgproc.createCLAHE();
clahe.setClipLimit(1);
clahe.apply(orr, orr);
Imgproc.GaussianBlur(orr, orr, new Size(5, 5), 5);
Core.meanStdDev(mGray, mu, stddev);
Imgproc.Canny(orr, orr, (mu.get(0, 0)[0]) * 0.66, (mu.get(0, 0)[0]) * 1.33, 3, false);
Imgproc.morphologyEx(orr, orr, Imgproc.MORPH_CLOSE, kernell);
Imgproc.dilate(orr, orr, Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3)));
Imgproc.findContours(orr, contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
for (int i = contours.size() - 1; i >= 0; i--) {
double area = Imgproc.contourArea(contours.get(i), false); // Find the area of contour
if (area < min_area)
contours.remove(i);
}
if (contours.size() > 0) {
MatOfPoint2f approxCurve = new MatOfPoint2f();
rectList.clear();
for (int idx = 0; idx < contours.size(); idx++) {
//Convert contours(i) from MatOfPoint to MatOfPoint2f
MatOfPoint2f contour2f = new MatOfPoint2f(contours.get(idx).toArray());
//Processing on mMOP2f1 which is in type MatOfPoint2f
double approxDistance = Imgproc.arcLength(contour2f, true) * 0.02;
Imgproc.approxPolyDP(contour2f, approxCurve, approxDistance, true);
//Convert back to MatOfPoint
MatOfPoint points = new MatOfPoint(approxCurve.toArray());
// Get bounding rect of contour
Rect rectt = Imgproc.boundingRect(points);
rectList.add(rect);
Mat miniature = new Mat(orr, new Rect(rectt.tl(), rectt.br()));
mats.add(miniature);
// draw enclosing rectangle (all same color, but you could use variable i to make them unique)
Imgproc.rectangle(rotated, rectt.tl(), rectt.br(), new Scalar(255, 0, 0));
}
}
long e2 = Core.getTickCount();
long e = e2 - e1;
double time = e / Core.getTickFrequency();
Log.d("timeTAG", "" + time);
return orr;
}
return rotated;
I am working on a licence plate recognition software using OpenCV, Tesseract and Java but experiencing issues, I cant seem to segment my text correctly, its not always that I get all characters to be detected and bounded with a bounding box these are some of my outputs with my code...and also when I detect the characters I never know which character is in which box so when I pass them through tesseract they get jumbled, how do I format my string?
This one fails despite the clearly visible characters:
Here Z and 6 fail to be detected even when 6 is clearly visible:
Below is my code:
originalFrame = image.clone();
roiColor = image.clone();
Imgproc.cvtColor(image, image, Imgproc.COLOR_BGR2GRAY, 0);
originalFrameGrayScale = image.clone();
Mat morph = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9, 9));
Imgproc.morphologyEx(image, image, Imgproc.MORPH_TOPHAT, morph);
Imgproc.Sobel(image, image, -1, 2, 0);
Imgproc.GaussianBlur(image, image, new Size(5,5), 3,3);
Imgproc.morphologyEx(image, image, Imgproc.MORPH_CLOSE, morph);
Imgproc.threshold(image, image, 200, 255, Imgproc.THRESH_OTSU);
Vector<Rect> rectangles = detectionContour(image);
Mat roi = originalFrameGrayScale.clone();
if(!rectangles.isEmpty()){
roi = originalFrameGrayScale.submat(rectangles.get(0));
roiBlack = roi.clone();
roiColor = roiColor.submat(rectangles.get(0));
Imgproc.rectangle(originalFrame, rectangles.get(0).br(), rectangles.get(0).tl(), new Scalar(0,0,255), 2);
}
Imgproc.medianBlur(roi, roi, 3);
Imgproc.adaptiveThreshold(roi, roi, 225, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 15, 3);
roiBinarize = roi.clone();
Mat erode = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(1, 1));
Mat dilate = Imgproc.getStructuringElement(Imgproc.MORPH_RECT,new Size(1, 1));
Imgproc.morphologyEx(roi, roi, Imgproc.MORPH_OPEN, dilate);
Imgproc.morphologyEx(roi, roi, Imgproc.MORPH_OPEN, erode);
Imgproc.Canny(roi, roi, 150, 150 * 3, 3, true);
Vector<Rect> letters = detectionPlateCharacterContour(roi);
doTesseractOCR(letters, roiBinarize);
private static void doTesseractOCR(Vector<Rect> letters, Mat plate){
Tesseract instance = new Tesseract(); //
instance.setLanguage(LANGUAGE);
String resultPlate = "";
for(int i= 0; i < letters.size(); i++){
BufferedImage letter = OpenCvUtils.Mat2bufferedImage(plate.submat(letters.get(i)));
try {
String result = instance.doOCR(letter);
resultPlate += result + " position "+i;
} catch (TesseractException e) {
System.err.println(e.getMessage());
}
System.out.println("Tesseract output: "+resultPlate);
}
}
private static Vector<Rect> detectionPlateCharacterContour(Mat roi) {
Mat contHierarchy = new Mat();
Mat imageMat = roi.clone();
Rect rect = null;
List<MatOfPoint> contours = new ArrayList<>();
Imgproc.findContours(imageMat, contours, contHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE);
Vector<Rect> rect_array = new Vector<>();
for (int i = 0; i < contours.size(); i++) {
rect = Imgproc.boundingRect(contours.get(i));
double ratio = 0;
if(rect.height > rect.width){
ratio = rect.height/rect.width;
}else{
ratio = rect.width/rect.height;
}
Logger.printMessage("Ratio of letter: "+ratio);
double contourarea = Imgproc.contourArea(contours.get(i));
if (contourarea >= 100 && contourarea <= 1000 && ( ratio >= 1 && ratio <= 2)) {
Imgproc.rectangle(roiColor, rect.br(), rect.tl(), new Scalar(255,0,0));
rect_array.add(rect);
}
}
contHierarchy.release();
return rect_array;
}
I have read from a list of image from a folder and then do cropping and segmented the character from the image. Then the file created is save to another folder with new name. For example the first source image(image1) I have segmented 7 new character images and it goes on with the next image.
My question is how can I regroup these images according to their source image like assigning them (im01-im07 is from source image1) and so on. This is the code:
for (int sf = 0; sf < listOfSrcFFiles.length; sf++) {
File imgFile = listOfSrcFFiles[sf];
String fileName = imgFile.getName();
if (fileName.equals("Thumbs.db")) {
imgFile.delete();
} else {
try{
Mat img_grayROI = Highgui.imread(imgFile.getAbsolutePath(), Highgui.CV_LOAD_IMAGE_GRAYSCALE);
Rect roi = new Rect(300, 200, 450, 200);
Mat cropped = new Mat(img_grayROI, roi);
Imgproc.threshold(cropped, cropped, 0, 255, Imgproc.THRESH_BINARY + Imgproc.THRESH_OTSU);
Point shift = new Point(0, 150);
Mat result = cropped;
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(result, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
Imgproc.floodFill(result, new Mat(), shift, new Scalar(255, 0, 0));
Core.bitwise_not(result, result);
for (int i = 0; i < contours.size(); i++) {
if (Imgproc.contourArea(contours.get(i)) > 50) {
Rect rect = Imgproc.boundingRect(contours.get(i));
/*if (rect.height > 25 && rect.height < 50 && rect.width > 10 && rect.width < 50) {*/
if (rect.height > 25 && rect.width < 25) {
Core.rectangle(result, rect.tl(), rect.br(), new Scalar(0, 0, 0));
Mat crop = new Mat(result, rect);
Highgui.imwrite(tmpFolder + fSprator + sf + "" + i + ".bmp", crop);
}
}
}
}catch(Exception e){
}
}
}
This is so I can later save them into text file and separated them according to their source image name.
Thank you, any help much appreciated.
The cleanest solution is to create a bean to hold the results.
Eg:
public class TransformResult {
private File input;
private List<File> transformedFiles = new ArrayList<>();
// getters / setters
}
public List<TransformResult> doTransform(File listOfSrcFFiles) {
List<TransformResult> results = new ArrayList<>();
for (File srcFile : listOfSrcFFiles) {
if (...) {
TransformResult result = new TransformResult(srcFile);
for (...) {
if (...) {
File transformed = doSomeTransformation(srcFile);
result.getTransformedFiles().add(transformed);
}
}
results.add(result);
}
}
return results;
}
I have been having some issues getting the outline of the detected object in the correct place, its as if the coordinates are in the wrong place. I have the hessian set to 2000 and I have filtered for matches that are less than 3 times the minimum distance. Any help would be appreciated.
Results from running matching and homography:
Code sample below:
public static void findMatches()
{
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
//Load Image 1
Mat img_object = Highgui.imread("./resources/Database/box.png");
//Load Image 2
Mat img_scene = Highgui.imread("./resources/Database/box_in_scene.png");
//Check if either image is null if so exit application
if (img_object == null || img_scene == null)
{
System.exit(0);
}
//Convert Image 1 to greyscale
Mat grayImageobject = new Mat(img_object.rows(), img_object.cols(), img_object.type());
Imgproc.cvtColor(img_object, grayImageobject, Imgproc.COLOR_BGRA2GRAY);
Core.normalize(grayImageobject, grayImageobject, 0, 255, Core.NORM_MINMAX);
//Convert image 2 to greyscale
Mat grayImageScene = new Mat(img_scene.rows(), img_scene.cols(), img_scene.type());
Imgproc.cvtColor(img_scene, grayImageScene, Imgproc.COLOR_BGRA2GRAY);
Core.normalize(grayImageScene, grayImageScene, 0, 255, Core.NORM_MINMAX);
//Create a SURF feature detector
FeatureDetector detector = FeatureDetector.create(4); //4 = SURF
//Cannot input hessian value as normal so we have to write the desired value into a
//file and then read value from file into detector.read
try (Writer writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream("hessian.txt"), "utf-8"))) {
writer.write("%YAML:1.0\nhessianThreshold: 2000.\noctaves:3\noctaveLayers: 4\nupright: 0\n");
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
detector.read("hessian.txt");
//Mat of keypoints for object and scene
MatOfKeyPoint keypoints_object = new MatOfKeyPoint();
MatOfKeyPoint keypoints_scene = new MatOfKeyPoint();
//Detect keypoints in scene and object storing them in mat of keypoints
detector.detect(img_object, keypoints_object);
detector.detect(img_scene, keypoints_scene);
DescriptorExtractor extractor = DescriptorExtractor.create(2); //2 = SURF;
Mat descriptor_object = new Mat();
Mat descriptor_scene = new Mat() ;
extractor.compute(img_object, keypoints_object, descriptor_object);
extractor.compute(img_scene, keypoints_scene, descriptor_scene);
DescriptorMatcher matcher = DescriptorMatcher.create(1); // 1 = FLANNBASED
MatOfDMatch matches = new MatOfDMatch();
matcher.match(descriptor_object, descriptor_scene, matches);
List<DMatch> matchesList = matches.toList();
Double max_dist = 0.0;
Double min_dist = 100.0;
for(int i = 0; i < descriptor_object.rows(); i++){
Double dist = (double) matchesList.get(i).distance;
if(dist < min_dist) min_dist = dist;
if(dist > max_dist) max_dist = dist;
}
System.out.println("-- Max dist : " + max_dist);
System.out.println("-- Min dist : " + min_dist);
LinkedList<DMatch> good_matches = new LinkedList<DMatch>();
MatOfDMatch gm = new MatOfDMatch();
for(int i = 0; i < descriptor_object.rows(); i++){
if(matchesList.get(i).distance < 3*min_dist){
good_matches.addLast(matchesList.get(i));
}
}
gm.fromList(good_matches);
Mat img_matches = new Mat();
Features2d.drawMatches(img_object,keypoints_object,img_scene,keypoints_scene, gm, img_matches, new Scalar(255,0,0), new Scalar(0,0,255), new MatOfByte(), 2);
if(good_matches.size() >= 10){
LinkedList<Point> objList = new LinkedList<Point>();
LinkedList<Point> sceneList = new LinkedList<Point>();
List<KeyPoint> keypoints_objectList = keypoints_object.toList();
List<KeyPoint> keypoints_sceneList = keypoints_scene.toList();
for(int i = 0; i<good_matches.size(); i++){
objList.addLast(keypoints_objectList.get(good_matches.get(i).queryIdx).pt);
sceneList.addLast(keypoints_sceneList.get(good_matches.get(i).trainIdx).pt);
}
MatOfPoint2f obj = new MatOfPoint2f();
obj.fromList(objList);
MatOfPoint2f scene = new MatOfPoint2f();
scene.fromList(sceneList);
Mat homography = Calib3d.findHomography(obj, scene);
Mat obj_corners = new Mat(4,1,CvType.CV_32FC2);
Mat scene_corners = new Mat(4,1,CvType.CV_32FC2);
obj_corners.put(0, 0, new double[] {0,0});
obj_corners.put(1, 0, new double[] {img_object.cols(),0});
obj_corners.put(2, 0, new double[] {img_object.cols(),img_object.rows()});
obj_corners.put(3, 0, new double[] {0,img_object.rows()});
//Compute the most probable perspective transformation
//out of several pairs of corresponding points.
//Imgproc.getPerspectiveTransform(obj_corners, scene_corners);
Core.perspectiveTransform(obj_corners,scene_corners, homography);
Core.line(img_matches, new Point(scene_corners.get(0,0)), new Point(scene_corners.get(1,0)), new Scalar(0, 255, 0),4);
Core.line(img_matches, new Point(scene_corners.get(1,0)), new Point(scene_corners.get(2,0)), new Scalar(0, 255, 0),4);
Core.line(img_matches, new Point(scene_corners.get(2,0)), new Point(scene_corners.get(3,0)), new Scalar(0, 255, 0),4);
Core.line(img_matches, new Point(scene_corners.get(3,0)), new Point(scene_corners.get(0,0)), new Scalar(0, 255, 0),4);
Highgui.imwrite("./resources/ImageMatching" + ".jpg", img_matches);
createWindow("Image Matching", "resources/ImageMatching.jpg");
}
else
{
System.out.println("Not enough Matches");
System.exit(0);
}
}
The coordinates are in the correct place, you're simply drawing on the wrong image.
Your coordinates are relative to the second image img_scene . So if you draw your lines only on that image they'll be correct.
If you want to draw the lines on a composed image, where img_scene is translated to the right by the width of the first image img_object, you simply need to add img_object.cols() to the points x coordinates.
For example:
Core.line(img_matches,
new Point(scene_corners.get(0,0)[0] + img_object.cols(), scene_corners.get(0,0)[1]),
new Point(scene_corners.get(1,0)[0] + img_object.cols(), scene_corners.get(1,0)[1]),
new Scalar(0, 255, 0),4);
for the first line, and the same for next 3 lines.