Recently I am trying to implement an image object detection tool based on YOLO. To start with, I have used the codes here. Things sounds fine except the fact the program doesnt pass the following line of code (line 72) and will not go into the loop. :
if (cap.read(frame))
In other words, if a break point is placed at that line, the program wont go to next step.. Any idea how to fix this?
package yoloexample;
import org.opencv.core.*;
import org.opencv.dnn.*;
import org.opencv.utils.*;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import org.opencv.objdetect.CascadeClassifier;
import org.opencv.videoio.VideoCapture;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import java.awt.image.WritableRaster;
import java.io.ByteArrayInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import javax.imageio.ImageIO;
import javax.swing.ImageIcon;
import javax.swing.JFrame;
import javax.swing.JLabel;
public class Yoloexample {
private static List<String> getOutputNames(Net net) {
List<String> names = new ArrayList<>();
List<Integer> outLayers = net.getUnconnectedOutLayers().toList();
List<String> layersNames = net.getLayerNames();
outLayers.forEach((item) -> names.add(layersNames.get(item - 1)));//unfold and create R-CNN layers from the loaded YOLO model//
System.out.println(names);
return names;
}
/**
* #param args the command line arguments
*/
public static void main(String[] args) {
// TODO code application logic here
System.load("\\opencv\\opencv\\build\\java\\x64\\opencv_java420.dll"); // Load the openCV 4.0 dll //
String modelWeights = "g:\\yolov3.weights"; //Download and load only wights for YOLO , this is obtained from official YOLO site//
String modelConfiguration = "g:\\yolov3.cfg";//Download and load cfg file for YOLO , can be obtained from official site//
String filePath = "test.mp4"; //My video file to be analysed//
VideoCapture cap = new VideoCapture(filePath);// Load video using the videocapture method//
Mat frame = new Mat(); // define a matrix to extract and store pixel info from video//
//cap.read(frame);
JFrame jframe = new JFrame("Video"); // the lines below create a frame to display the resultant video with object detection and localization//
JLabel vidpanel = new JLabel();
jframe.setContentPane(vidpanel);
jframe.setSize(600, 600);
jframe.setVisible(true);// we instantiate the frame here//
Net net = Dnn.readNetFromDarknet(modelConfiguration, modelWeights); //OpenCV DNN supports models trained from various frameworks like Caffe and TensorFlow. It also supports various networks architectures based on YOLO//
//Thread.sleep(5000);
//Mat image = Imgcodecs.imread("D:\\yolo-object-detection\\yolo-object-detection\\images\\soccer.jpg");
Size sz = new Size(288, 288);
List<Mat> result = new ArrayList<>();
List<String> outBlobNames = getOutputNames(net);
while (true) {
if (cap.read(frame)) {
Mat blob = Dnn.blobFromImage(frame, 0.00392, sz, new Scalar(0), true, false); // We feed one frame of video into the network at a time, we have to convert the image to a blob. A blob is a pre-processed image that serves as the input.//
net.setInput(blob);
net.forward(result, outBlobNames); //Feed forward the model to get output //
// outBlobNames.forEach(System.out::println);
// result.forEach(System.out::println);
float confThreshold = 0.6f; //Insert thresholding beyond which the model will detect objects//
List<Integer> clsIds = new ArrayList<>();
List<Float> confs = new ArrayList<>();
List<Rect> rects = new ArrayList<>();
for (int i = 0; i < result.size(); ++i) {
// each row is a candidate detection, the 1st 4 numbers are
// [center_x, center_y, width, height], followed by (N-4) class probabilities
Mat level = result.get(i);
for (int j = 0; j < level.rows(); ++j) {
Mat row = level.row(j);
Mat scores = row.colRange(5, level.cols());
Core.MinMaxLocResult mm = Core.minMaxLoc(scores);
float confidence = (float) mm.maxVal;
Point classIdPoint = mm.maxLoc;
if (confidence > confThreshold) {
int centerX = (int) (row.get(0, 0)[0] * frame.cols()); //scaling for drawing the bounding boxes//
int centerY = (int) (row.get(0, 1)[0] * frame.rows());
int width = (int) (row.get(0, 2)[0] * frame.cols());
int height = (int) (row.get(0, 3)[0] * frame.rows());
int left = centerX - width / 2;
int top = centerY - height / 2;
clsIds.add((int) classIdPoint.x);
confs.add((float) confidence);
rects.add(new Rect(left, top, width, height));
}
}
}
float nmsThresh = 0.5f;
MatOfFloat confidences = new MatOfFloat(Converters.vector_float_to_Mat(confs));
Rect[] boxesArray = rects.toArray(new Rect[0]);
MatOfRect boxes = new MatOfRect(boxesArray);
MatOfInt indices = new MatOfInt();
Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThresh, indices); //We draw the bounding boxes for objects here//
int[] ind = indices.toArray();
int j = 0;
for (int i = 0; i < ind.length; ++i) {
int idx = ind[i];
Rect box = boxesArray[idx];
Imgproc.rectangle(frame, box.tl(), box.br(), new Scalar(0, 0, 255), 2);
//i=j;
System.out.println(idx);
}
// Imgcodecs.imwrite("D://out.png", image);
//System.out.println("Image Loaded");
ImageIcon image = new ImageIcon(Mat2bufferedImage(frame)); //setting the results into a frame and initializing it //
vidpanel.setIcon(image);
vidpanel.repaint();
System.out.println(j);
System.out.println("Done");
}
}
}
private static BufferedImage Mat2bufferedImage(Mat image) { // The class described here takes in matrix and renders the video to the frame //
MatOfByte bytemat = new MatOfByte();
Imgcodecs.imencode(".jpg", image, bytemat);
byte[] bytes = bytemat.toArray();
InputStream in = new ByteArrayInputStream(bytes);
BufferedImage img = null;
try {
img = ImageIO.read(in);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return img;
}
}
Related
I am doing some basic experimentation on picture filtering using convolution matrix, based on the Wikipedia page about kernels in image processing.
In order to compute the RGB transformations, I am reading the bitmap via a BufferedImage then get the pixels with getRgb(). While testing the simplest identity filter I noticed that for a specific picture I was getting some grey instead of the original black, while for some other picture, the black was OK.
After more testing, I found that without any transform, a simple BufferedImage -> int[] -> BufferedImage results in the greyed result.
What am I missing ? ImageMagick identify shows that both are 8-bit 256 colors pictures without alpha channels.
betty1.png PNG 339x600 339x600+0+0 8-bit Gray 256c 24526B 0.000u 0:00.000
betty2.jpg JPEG 603x797 603x797+0+0 8-bit Gray 256c 126773B 0.000u 0:00.001
With this picture the result is as expected.
With this one, the result is unexpectedly greyed.
Here is a simple sscce test class to show the problem:
import java.awt.BorderLayout;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import javax.imageio.ImageIO;
import javax.swing.ImageIcon;
import javax.swing.JFrame;
import javax.swing.JLabel;
import javax.swing.WindowConstants;
/* simple test class for convolution matrix */
public class CopyPic {
public static void main(String args[]) throws FileNotFoundException, IOException {
if (args.length < 1) {
System.err.println("Usage: CopyPic <picture_file>");
System.exit(1);
}
String imgPath = args[0];
String inputName = imgPath.substring(0, imgPath.lastIndexOf("."));
File ifile = new File(imgPath);
InputStream fis_in = new FileInputStream(ifile);
BufferedImage bi_in = ImageIO.read(fis_in);
fis_in.close();
int width = bi_in.getWidth();
int height = bi_in.getHeight();
System.out.println(String.format("%s = %d x %d", imgPath, width, height));
int[] rgb_in = new int[width * height];
bi_in.getRGB(0, 0, width, height, rgb_in, 0, width);
BufferedImage bi_out = new BufferedImage(width, height, BufferedImage.TYPE_INT_RGB);
// for (int y = 0; y < height; y++) {
// for (int x = 0; x < width; x++) {
// bi_out.setRGB(x, y, rgb_out[y * width + x]);
// }
// }
bi_out.setRGB(0, 0, width, height, rgb_in, 0, width);
display(bi_in, bi_out);
String outputName = inputName + "-copy.png";
File ofile = new File(outputName);
OutputStream fos_out = new FileOutputStream(ofile);
ImageIO.write(bi_out, "PNG", fos_out);
fos_out.flush();
fos_out.close();
System.out.println("Wrote " + outputName);
}
// use that to have internal viewer
private static JFrame frame;
private static JLabel label1, label2;
private static void display(BufferedImage img1, BufferedImage img2) {
if (frame == null) {
frame = new JFrame();
frame.setTitle(String.format("%dx%d Original / Copy", img1.getWidth(), img1.getHeight()));
frame.setSize(img1.getWidth() + img2.getWidth(), img1.getHeight());
frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
label1 = new JLabel();
label1.setIcon(new ImageIcon(img1));
frame.getContentPane().add(label1, BorderLayout.WEST);
label2 = new JLabel();
label2.setIcon(new ImageIcon(img2));
frame.getContentPane().add(label2, BorderLayout.EAST);
frame.setLocationRelativeTo(null);
frame.pack();
frame.setVisible(true);
} else {
label1.setIcon(new ImageIcon(img1));
label2.setIcon(new ImageIcon(img2));
}
}
}
When the ImageIO.read function creates a BufferedImage it uses the type that it thinks is best suited. This type might not be what you expect. In particular, for a JPG image the type might not be TYPE_INT_ARGB.
This is the case for your second image and becomes evident when you print the type of that image:
System.out.println(bi_in.getType());
For that image, this prints 10 on my machine, which represents TYPE_BYTE_GRAY.
So, to fix your problem you should use:
BufferedImage bi_out = new BufferedImage(width, height, bi_in.getType());
I am new to itext7.
I saw an example about columndocumentrenderer. code below.
package com.itextpdf.highlevel.chapter02;
import com.itextpdf.io.font.FontConstants;
import com.itextpdf.kernel.font.PdfFont;
import com.itextpdf.kernel.font.PdfFontFactory;
import com.itextpdf.kernel.geom.PageSize;
import com.itextpdf.kernel.geom.Rectangle;
import com.itextpdf.kernel.pdf.PdfDocument;
import com.itextpdf.kernel.pdf.PdfWriter;
import com.itextpdf.layout.ColumnDocumentRenderer;
import com.itextpdf.layout.Document;
import com.itextpdf.layout.element.AreaBreak;
import com.itextpdf.layout.element.Paragraph;
import com.itextpdf.layout.hyphenation.HyphenationConfig;
import com.itextpdf.layout.property.AreaBreakType;
import com.itextpdf.layout.property.TextAlignment;
import com.itextpdf.test.annotations.WrapToTest;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
/**
*
* #author RNDPC-03
*/#WrapToTest
public class C02E08_JekyllHydeV4 {
public static final String SRC = "src/main/resources/txt/jekyll_hyde.txt";
public static final String DEST = "results/chapter02/jekyll_hyde_v4.pdf";
public static void main(String args[]) throws IOException {
File file = new File(DEST);
file.getParentFile().mkdirs();
new C02E08_JekyllHydeV4().createPdf(DEST);
}
public void createPdf(String dest) throws IOException {
//Initialize PDF document
PdfDocument pdf = new PdfDocument(new PdfWriter(dest));
// Initialize document
Document document = new Document(pdf);
//Set column parameters
float offSet = 36;
float gutter = 23;
float columnWidth = (PageSize.A4.getWidth() - offSet * 2) / 2 - gutter;
float columnHeight = PageSize.A4.getHeight() - offSet * 2;
//Define column areas
Rectangle[] columns = {
new Rectangle(offSet, offSet, columnWidth, columnHeight),
new Rectangle(offSet + columnWidth + gutter, offSet, columnWidth, columnHeight)};
document.setRenderer(new ColumnDocumentRenderer(document, columns));
PdfFont font = PdfFontFactory.createFont(FontConstants.TIMES_ROMAN);
PdfFont bold = PdfFontFactory.createFont(FontConstants.HELVETICA_BOLD);
document.setTextAlignment(TextAlignment.JUSTIFIED)
.setFont(font)
.setHyphenation(new HyphenationConfig("en", "uk", 3, 3));
BufferedReader br = new BufferedReader(new FileReader(SRC));
String line;
Paragraph p;
boolean title = true;
AreaBreak nextArea = new AreaBreak(AreaBreakType.NEXT_AREA);
while ((line = br.readLine()) != null) {
p = new Paragraph(line);
if (title) {
p.setFont(bold).setFontSize(12);
title = false;
}
else {
p.setFirstLineIndent(36);
}
if (line.isEmpty()) {
document.add(nextArea);
title = true;
}
document.add(p);
}
//Close document
document.close();
}
}
now this will have a result of this . picture below
now I want to make it into a three(3) column. how to do this? please anyone? or just guide me. thank you!
Here is where you define two columns:
float offSet = 36;
float gutter = 23;
float columnWidth = (PageSize.A4.getWidth() - offSet * 2) / 2 - gutter;
float columnHeight = PageSize.A4.getHeight() - offSet * 2;
//Define column areas
Rectangle[] columns = {
new Rectangle(offSet, offSet, columnWidth, columnHeight),
new Rectangle(offSet + columnWidth + gutter, offSet, columnWidth, columnHeight)};
Changing this to three columns is a no-brainer.
Just replace all of the above by something like this:
Rectangle[] columns = {
new Rectangle(20, 20, 180, 802),
new Rectangle(207, 15, 180, 802),
new Rectangle(394, 15, 180, 802) };
Now that columns has three Rectangle elements, three columns will be drawn. Feel free to adapt the values of x, y, width and height in the Rectangle constructor if the three columns don't have the desired size.
I need to rotate my BufferedImage on 3 axis (x, y and z), by the angles given in 3 integers. Is there any native methods in java? If not, how would I achieve that?
Update #1: I've done some of it with OpenCV... Will update when finished!
Update #2: Since this was just a part of my project, I realized that solving just a part of the problem wouldn't be good, so I used OpenCV getPerspectiveTransform() and then warpPerspective() methods from Imgproc class to transform image. I have basically just ported this code to java and it works fine :)
Also I have changed the thread name due the changes to make it fit the actual question/solution.
Code (I used OpenCV 3.1, since it's the latest version):
import java.awt.Graphics;
import java.awt.image.BufferedImage;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import javax.imageio.ImageIO;
import javax.swing.JFrame;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfByte;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.imgproc.Imgproc;
import org.opencv.imgcodecs.Imgcodecs;
public class Main extends JFrame {
private static final long serialVersionUID = 1L;
BufferedImage transformed = null;
//These locations are just the corners of the 4 reference points. I am writing the auto recognition part right now :)
Point p4 = new Point(260, 215);
Point p1 = new Point(412, 221);
Point p2 = new Point(464, 444);
Point p3 = new Point(312, 435);
public Main() {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
File f = new File("FILEPATH ");
MatOfPoint2f corners = new MatOfPoint2f();
Mat src = Imgcodecs.imread(f.getAbsolutePath());
corners.push_back(new MatOfPoint2f(p1));
corners.push_back(new MatOfPoint2f(p2));
corners.push_back(new MatOfPoint2f(p3));
corners.push_back(new MatOfPoint2f(p4));
Point center = new Point(0, 0);
for (int i = 0; i < corners.toArray().length; i++) {
center.x += corners.toArray()[i].x;
center.y += corners.toArray()[i].y;
}
center.x /= corners.toArray().length;
center.y /= corners.toArray().length;
sortCorners(corners, center);
Mat quad = Mat.zeros(1000, 1900, CvType.CV_8U);
MatOfPoint2f quad_pts = new MatOfPoint2f();
quad_pts.push_back(new MatOfPoint2f(new Point(0, 0)));
quad_pts.push_back(new MatOfPoint2f(new Point(quad.width(), 0)));
quad_pts.push_back(new MatOfPoint2f(new Point(quad.width(), quad.height())));
quad_pts.push_back(new MatOfPoint2f(new Point(0, quad.height())));
Mat transmtx = Imgproc.getPerspectiveTransform(corners, quad_pts);
Imgproc.warpPerspective(src, quad, transmtx, quad.size());
transformed = matToBufferedImage(quad);
setSize(500, 500);
setLocationRelativeTo(null);
setDefaultCloseOperation(EXIT_ON_CLOSE);
setVisible(true);
}
public void paint(Graphics g) {
g.clearRect(0, 0, this.getWidth(), this.getHeight());
g.drawImage(transformed, 0, 22, null);
}
public MatOfPoint2f sortCorners(MatOfPoint2f corners, Point center) {
MatOfPoint2f top = new MatOfPoint2f();
MatOfPoint2f bot = new MatOfPoint2f();
for (int i = 0; i < corners.toArray().length; i++) {
if (corners.toArray()[i].y < center.y){
top.push_back(new MatOfPoint2f(corners.toArray()[i]));
}
else
bot.push_back(new MatOfPoint2f(corners.toArray()[i]));
}
Point tl = p4;
Point tr = p1;
Point bl = p2;
Point br = p3;
tl = top.toArray()[0].x > top.toArray()[1].x ? top.toArray()[1] : top.toArray()[0];
tr = top.toArray()[0].x > top.toArray()[1].x ? top.toArray()[0] : top.toArray()[1];
bl = bot.toArray()[0].x > bot.toArray()[1].x ? bot.toArray()[1] : bot.toArray()[0];
br = bot.toArray()[0].x > bot.toArray()[1].x ? bot.toArray()[0] : bot.toArray()[1];
corners.release();
corners.push_back(new MatOfPoint2f(tl));
corners.push_back(new MatOfPoint2f(tr));
corners.push_back(new MatOfPoint2f(br));
corners.push_back(new MatOfPoint2f(bl));
System.out.println(corners.toArray()[0] + ", " + corners.toArray()[1] + ", " + corners.toArray()[2] + ", " + corners.toArray()[3] + ", ");
return corners;
}
public BufferedImage matToBufferedImage(Mat image) {
Mat image_tmp = image;
MatOfByte matOfByte = new MatOfByte();
Imgcodecs.imencode(".jpg", image_tmp, matOfByte);
byte[] byteArray = matOfByte.toArray();
BufferedImage bufImage = null;
try {
InputStream in = new ByteArrayInputStream(byteArray);
bufImage = ImageIO.read(in);
} catch (Exception e) {
e.printStackTrace();
}
return bufImage;
}
}
I think that the TransformJ package does what you want, but I don't think it contains native code.
I am new to OpenCV, and working on a face detection project. I wanted to create a webcam interface for face detection and I referred to the tutorial: here
I made following changes to the code( modification to the CascadeClassifier position and HaarCascade file path only):
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfRect;
import org.opencv.core.Point;
import org.opencv.core.Rect;
import org.opencv.core.Size;
import org.opencv.core.Scalar;
import org.opencv.highgui.Highgui;
import org.opencv.highgui.VideoCapture;
import org.opencv.imgproc.Imgproc;
import org.opencv.objdetect.CascadeClassifier;
import static com.googlecode.javacv.cpp.opencv_core.cvFlip;
public class detedctAndDisplay {
/** Global variables */
private static String face_cascade_name = "haarcascade_frontalface_alt.xml";
private static String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
private static CascadeClassifier face_cascade;
private static CascadeClassifier eyes_cascade;
private static String window_name = "Capture - Face detection";
public detedctAndDisplay(){
face_cascade = new CascadeClassifier("C:\\Users\\Admin\\Desktop\\face detection\\FaceDetection\\src\\haarcascade_frontalface_alt.xml");
eyes_cascade = new CascadeClassifier("C:\\Users\\Admin\\Desktop\\face detection\\FaceDetection\\src\\"+eyes_cascade_name);
}
public static void detectAndDisplay(Mat frame)
{
CascadeClassifier face_cascade= new CascadeClassifier("C:\\Users\\Admin\\Desktop\\face detection\\FaceDetection\\src\\haarcascade_frontalface_alt.xml");
Mat frame_gray = new Mat();
MatOfRect faces = new MatOfRect();
Rect[] facesArray = faces.toArray();
// Imgproc.cvtColor(frame, frame_gray, Imgproc.COLOR_BGRA2GRAY);
Imgproc.equalizeHist(frame_gray, frame_gray);
//-- Detect faces
face_cascade.detectMultiScale(frame_gray,faces );
for (int i = 0; i < facesArray.length; i++)
{
Point center = new Point(facesArray[i].x + facesArray[i].width * 0.5, facesArray[i].y + facesArray[i].height * 0.5);
Core.ellipse(frame, center, new Size(facesArray[i].width * 0.5, facesArray[i].height * 0.5), 0, 0, 360, new Scalar(255, 0, 255), 4, 8, 0);
Mat faceROI = frame_gray.submat(facesArray[i]);
MatOfRect eyes = new MatOfRect();
Rect[] eyesArray = eyes.toArray();
//-- In each face, detect eyes
eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0,new Size(30, 30), new Size());
for (int j = 0; j < eyesArray.length; j++)
{
Point center1 = new Point(facesArray[i].x + eyesArray[i].x + eyesArray[i].width * 0.5, facesArray[i].y + eyesArray[i].y + eyesArray[i].height * 0.5);
int radius = (int) Math.round((eyesArray[i].width + eyesArray[i].height) * 0.25);
Core.circle(frame, center1, radius, new Scalar(255, 0, 0), 4, 8, 0);
}
}
//-- Show what you got
Highgui.imwrite(window_name, frame);
}
/**
* #param args
*/
public static void main(String[] args)
{
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
//face_cascade = new CascadeClassifier("C:\\Users\\Admin\\Desktop\\face detection\\FaceDetection\\src\\haarcascade_frontalface_alt.xml");
CascadeClassifier face_cascade1 = new CascadeClassifier("C:\\Users\\Admin\\Desktop\\face detection\\FaceDetection\\src\\haarcascade_frontalface_alt.xml");
//eyes_cascade = new CascadeClassifier("C:\\Users\\Admin\\Desktop\\face detection\\FaceDetection\\src\\"+eyes_cascade_name);
VideoCapture capture;
Mat frame = new Mat();
capture = new VideoCapture(0);
if(!capture.isOpened())
{
System.out.println("Did not connect to camera.");
}
else
{
capture.retrieve(frame);
detectAndDisplay(frame);
capture.release();
}
}
}
I am getting following error:
OpenCV Error: Bad flag (parameter or structure field) (Unrecognized or unsupported array type
in unknown function, file ..\..\..\..\opencv\modules\core\src\array.cpp, line 2482
Exception in thread "main" CvException [org.opencv.core.CvException:
cv::Exception: ..\..\..\..\opencv\modules\core\src\array.cpp:2482: error: (-206) Unrecognized or
unsupported array type
]
at org.opencv.objdetect.CascadeClassifier.detectMultiScale_1(Native Method)
at org.opencv.objdetect.CascadeClassifier.detectMultiScale(CascadeClassifier.java:127)
at detedctAndDisplay.detectAndDisplay(detedctAndDisplay.java:55)
at detedctAndDisplay.main(detedctAndDisplay.java:125)
I am unable to resolve the error.
Also, please tell which jar to import so that cvtColor could be used?
Thanks!
OpenCV is complaining that the "array" (matrix, or image) type is not known because you never initialized frame_gray. You need to actually convert the image to grayscale. You actually have a line that does it but it's commented out:
// Imgproc.cvtColor(frame, frame_gray, Imgproc.COLOR_BGRA2GRAY);
Uncomment that line.
I'm trying to write strings to images, so it's harder to copy the text and run it through a translator.
My code works fine, but I get always a really long image - I rather would like to have a more readable box in where the string is written. My method "StringDiver" does add "\n" but it does not help when writing the string to an image.
Right now I get this output.
Any hint what I could do?
import java.awt.Color;
import java.awt.Font;
import java.awt.Graphics2D;
import java.awt.font.FontRenderContext;
import java.awt.geom.Rectangle2D;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import javax.imageio.ImageIO;
public class writeToImage {
/**
* #param args
*/
public static void main(String[] args) {
// TODO Auto-generated method stub
String newString = "Mein Eindruck ist, dass die politische und öffentliche Meinung in Deutschland anfängt, die wirtschaftliche Zerstörung im Inland und in Europa zu erkennen, die auf einen eventuellen Zusammenbruch des Euro folgen würde.";
String sampleText = StringDivider(newString);
//Image file name
String fileName = "Image";
//create a File Object
File newFile = new File("./" + fileName + ".jpg");
//create the font you wish to use
Font font = new Font("Tahoma", Font.PLAIN, 15);
//create the FontRenderContext object which helps us to measure the text
FontRenderContext frc = new FontRenderContext(null, true, true);
//get the height and width of the text
Rectangle2D bounds = font.getStringBounds(sampleText, frc);
int w = (int) bounds.getWidth();
int h = (int) bounds.getHeight();
//create a BufferedImage object
BufferedImage image = new BufferedImage(w, h, BufferedImage.TYPE_INT_RGB);
//calling createGraphics() to get the Graphics2D
Graphics2D g = image.createGraphics();
//set color and other parameters
g.setColor(Color.WHITE);
g.fillRect(0, 0, w, h);
g.setColor(Color.BLACK);
g.setFont(font);
g.drawString(sampleText, (float) bounds.getX(), (float) -bounds.getY());
//releasing resources
g.dispose();
//creating the file
try {
ImageIO.write(image, "jpg", newFile);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public static String StringDivider(String s){
StringBuilder sb = new StringBuilder(s);
int i = 0;
while ((i = sb.indexOf(" ", i + 30)) != -1) {
sb.replace(i, i + 1, "\n");
}
return sb.toString();
}
}
g.drawString(sampleText, (float) bounds.getX(), (float) -bounds.getY());
Split text and write every part to image.
Rectangle2D bounds = font.getStringBounds(sampleText, frc);
int w = (int) bounds.getWidth();
int h = (int) bounds.getHeight();
String[] parts = sampleText.split("\n");
//create a BufferedImage object
BufferedImage image = new BufferedImage(w, h * parts.length, BufferedImage.TYPE_INT_RGB);
int index = 0;
for(String part : parts){
g.drawString(part, 0, h * index++);
}
ex:
first part: x=0 ; y=0
second part: x=0 ; y=5
third part: x=0 ; y=10;
heightText = h
Take a look at LineBreakMeasurer. The first code example in the Javadoc is exactly what you're looking for.