OpenCV in JAVA face recognizer accuracy? - java

I tried to train the classifier with the sample from internet (from yale), but I keep getting wrong prediction (in fact it only output category "1") when I input one of the training picture as test sample. Could anyone give me some hint? I attached my code below.
(I think the problem might be the training samples as they are already grayscaled. I grayscaled them again because the error bad argument, size(1,30000), (10000,66).... thing shows up If I don't do so.)
import org.opencv.face.FaceRecognizer;
import org.opencv.core.Mat;
import org.opencv.core.MatOfInt;
import org.opencv.core.*;
import org.opencv.face.Face;
import org.opencv.imgcodecs.Imgcodecs;
import java.io.File;
import java.io.FilenameFilter;
import java.util.List;
import java.util.ArrayList;
import org.opencv.imgproc.Imgproc;
public class FaceRecognization {
public static void main(String[] args) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
String trainingDir = "C:\\Users\\songli\\Desktop\\yale\\train";
String testImgPath = "C:\\Users\\songli\\Desktop\\yale\\5-6.bmp";
Mat testImg = Imgcodecs.imread(testImgPath);
Mat gray = new Mat();
File root = new File(trainingDir);
FilenameFilter bmpFilter = new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.toLowerCase().endsWith(".bmp");
}
};
File[] imageFiles = root.listFiles(bmpFilter);
List<Mat> list = new ArrayList<Mat>(imageFiles.length);
int[] labels = new int[imageFiles.length];
int counter = 0;
int label;
Mat grayImg = new Mat();
Mat grayTestImg = new Mat();
Mat img = new Mat();
for (File image : imageFiles) {
img = Imgcodecs.imread(image.getAbsolutePath());
// System.out.print(img.elemSize());
label = Integer.parseInt(image.getName().split("\\-")[0]);
grayImg.create(img.width(), img.height(), 1);
Imgproc.cvtColor(img, grayImg, Imgproc.COLOR_BGR2GRAY);
list.add(grayImg);
labels[counter] = label;
counter++;
}
// System.out.print(labels[11]);
MatOfInt labels1 = new MatOfInt();
labels1.fromArray(labels);
FaceRecognizer fr = Face.createEigenFaceRecognizer();
fr.train(list, labels1);
grayTestImg.create(testImg.width(), testImg.height(), 1);
Imgproc.cvtColor(testImg, grayTestImg, Imgproc.COLOR_BGR2GRAY);
int predictedlabel = fr.predict_label(grayTestImg);
// Imgcodecs.imwrite("C:\\Users\\songli\\Desktop\\testImg.jpg",
// testImg);
// int[] predLabel = new int[1];
// double[] confidence = new double[1];
// int result = -1;
// fr.predict(testImgGrey,predLabel,confidence);
// result = predLabel[0];
System.out.println("Predicted label: " + predictedlabel);
}
}

Alright guys I know the place I went wrong.
Imgproc.cvtColor(img, grayImg, Imgproc.COLOR_BGR2GRAY); ->
Imgproc.cvtColor(img,img,Imgproc.COLOR_BGR2GRAY);
Done!
A stupid mistake. Sorry guys.

Related

Label number 3 mismatch the shape on axis 1

I'm currently trying to create an image classification Android app using a TensorFlow Lite model. When I open the Android App and try to perform classification, I keep getting this error message
java.lang.IllegalArgumentException: Label number 3 mismatch the shape on axis 1
Here's the content inside my label file
0 A
1 B
2 C
And here's the code of my Classifier class:
package com.ukzn.signchat;
import android.annotation.SuppressLint;
import android.content.Context;
import android.graphics.Bitmap;
import android.media.Image;
import android.util.Log;
import androidx.camera.core.ImageProxy;
import org.tensorflow.lite.DataType;
import org.tensorflow.lite.Interpreter;
import org.tensorflow.lite.support.common.FileUtil;
import org.tensorflow.lite.support.common.TensorProcessor;
import org.tensorflow.lite.support.common.ops.NormalizeOp;
import org.tensorflow.lite.support.image.ImageProcessor;
import org.tensorflow.lite.support.image.TensorImage;
import org.tensorflow.lite.support.image.ops.ResizeOp;
import org.tensorflow.lite.support.image.ops.ResizeWithCropOrPadOp;
import org.tensorflow.lite.support.image.ops.Rot90Op;
import org.tensorflow.lite.support.label.TensorLabel;
import org.tensorflow.lite.support.tensorbuffer.TensorBuffer;
import java.io.IOException;
import java.nio.MappedByteBuffer;
import java.util.List;
import java.util.Map;
public class Classifier {
private Context context;
Interpreter tflite;
final String ASSOCIATED_AXIS_LABELS = "labels.txt";
List<String> associatedAxisLabels = null;
public Classifier(Context context) {
this.context = context;
// load labels to a List<String>
try {
associatedAxisLabels = FileUtil.loadLabels(context, ASSOCIATED_AXIS_LABELS);
} catch (IOException e) {
Log.e("tfliteSupport", "Error reading label file", e);
}
// load model to interpreter
try {
MappedByteBuffer tfliteModel = FileUtil.loadMappedFile(context, "model.tflite");
tflite = new Interpreter(tfliteModel);
} catch (IOException e) {
Log.e("tfliteSupport", "Error reading model", e);
}
}
public String classify(ImageProxy image) {
#SuppressLint("UnsafeExperimentalUsageError")
Image img = image.getImage();
Bitmap bitmap = Utils.toBitmap(img);
int rotation = Utils.getImageRotation(image);
int width = bitmap.getWidth();
int height = bitmap.getHeight();
int size = height > width ? width : height;
ImageProcessor imageProcessor = new ImageProcessor.Builder()
.add(new ResizeWithCropOrPadOp(size, size))
.add(new ResizeOp(224, 224, ResizeOp.ResizeMethod.BILINEAR)) // changed from 128x128
.add(new Rot90Op(rotation))
.build();
TensorImage tensorImage = new TensorImage(DataType.UINT8);
tensorImage.load(bitmap);
tensorImage = imageProcessor.process(tensorImage);
TensorBuffer probabilityBuffer = TensorBuffer.createFixedSize(new int[]{1, 224, 224, 3}, DataType.UINT8);
if (null != tflite) {
tflite.run(tensorImage.getBuffer(), probabilityBuffer.getBuffer());
}
TensorProcessor probabilityProcessor = new TensorProcessor.Builder().add(new NormalizeOp(0, 255)).build();
String result = "";
if (null != associatedAxisLabels) {
// Map of labels and their corresponding probability
TensorLabel labels = new TensorLabel(associatedAxisLabels, probabilityProcessor.process(probabilityBuffer));
// Create a map to access the result based on label
Map<String, Float> floatMap = labels.getMapWithFloatValue();
result = Utils.writeResults(floatMap);
}
return result;
}
}
The classifier is probably based on the MobileNet label format, which requires that labels start from 1. Since you have 0, 1, 2 & it ignores the 0, it doesn't find the 3.

How can i optimize batik svg so it can generate small size file

import java.awt.Color;
import java.awt.Dimension;
import java.awt.Insets;
import java.io.IOException;
import java.io.StringWriter;
import java.nio.file.Files;
import java.nio.file.Paths;
import org.apache.batik.dom.GenericDOMImplementation;
import org.apache.batik.svggen.SVGGeneratorContext;
import org.apache.batik.svggen.SVGGraphics2D;
import org.scilab.forge.jlatexmath.TeXConstants;
import org.scilab.forge.jlatexmath.TeXFormula;
import org.scilab.forge.jlatexmath.TeXFormula.TeXIconBuilder;
import org.scilab.forge.jlatexmath.TeXIcon;
import org.w3c.dom.DOMImplementation;
import org.w3c.dom.Document;
public class jlatexmath {
private final static String SVG_NS = "http://www.w3.org/2000/svg";
private final static String SVG_ROOT = "svg";
private final static float FONT_SIZE = 20;
private String renderLatex(String source) {
DOMImplementation DOMImpl = GenericDOMImplementation.getDOMImplementation();
Document document = DOMImpl.createDocument(SVG_NS, SVG_ROOT, null);
SVGGeneratorContext ctx = SVGGeneratorContext.createDefault(document);
SVGGraphics2D g = new SVGGraphics2D(ctx, true);
TeXFormula formula = new TeXFormula(source);
TeXFormula.TeXIconBuilder builder = formula.new TeXIconBuilder();
builder.setStyle(TeXConstants.STYLE_DISPLAY);
builder.setSize(FONT_SIZE);
TeXIcon icon = builder.build();
icon.setInsets(new Insets(0, 0, 0, 0));
g.setSVGCanvasSize(new Dimension(icon.getIconWidth(), icon.getIconHeight()));
g.setColor(new Color(0, 0, 0, 0));
g.fillRect(0, 0, icon.getIconWidth(), icon.getIconHeight());
icon.paintIcon(null, g, 0, 0);
StringWriter out = new StringWriter();
try {
g.stream(out, true);
out.flush();
out.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
return out.toString();
}
public static void main(String[] args) throws IOException {
String latex = "(a+b)^{2}=a^{2}+2ab+b^{2}";
jlatexmath j = new jlatexmath();
String svgString = j.renderLatex(latex);
Files.write(Paths.get("D:/latex.svg"), svgString.getBytes());
System.out.println(svgString);
}
}
I am using jlatexmath-1.0.7 for generating latex images that works fine. but i need svg out so i use apache batik for the same and above code is generating ~17KB file. Some online tool like codecogs.com generating svg of ~7KB for same latex input. how can i remove un-necessary information from svg in java so it can generate less size image.
Have you tried passing false in the constructor SVGGraphics2D(ctx, true)?
It uses text rather than graphics and so the resulting file size is also smaller.

How to run a storm topology with tensorflow model in hadoop cluster

The following is my code to load a tensorflow model in java for object detection and execute the same in storm:
package object_det.object_det;
import java.awt.BorderLayout;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Base64;
import java.util.HashMap;
import java.util.List;
import java.util.Map.Entry;
import javax.swing.ImageIcon;
import javax.swing.JFrame;
import javax.swing.JLabel;
import org.apache.storm.LocalCluster;
import org.apache.storm.kafka.BrokerHosts;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.StringScheme;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseBasicBolt;
import org.apache.storm.tuple.Tuple;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.imgproc.Imgproc;
import org.tensorflow.SavedModelBundle;
import org.tensorflow.Tensor;
import org.tensorflow.framework.ConfigProto;
import org.tensorflow.framework.GPUOptions;
import org.tensorflow.framework.MetaGraphDef;
import org.tensorflow.framework.SignatureDef;
import org.tensorflow.framework.TensorInfo;
import org.tensorflow.types.UInt8;
import com.google.protobuf.TextFormat;
import object_detection.protos.StringIntLabelMapOuterClass.StringIntLabelMap;
import object_detection.protos.StringIntLabelMapOuterClass.StringIntLabelMapItem;
public class Objectdetect1 {
static int left = 0;
static int bot = 0;
static int top = 0;
static int right = 0;
public static class PrinterBolt extends BaseBasicBolt {
int ii = 1;
JFrame frame=new JFrame();
JLabel jLabel = new JLabel();
static {
nu.pattern.OpenCV.loadShared();
}
public void declareOutputFields(OutputFieldsDeclarer declarer) {
}
public void execute(Tuple tuple, BasicOutputCollector collector) {
String output = tuple.getString(0);
output = output.replaceAll("\\[", "").replaceAll("\\]","");
JSONParser parser = new JSONParser();
JSONObject json = null;
try {
json = (JSONObject) parser.parse(output);
} catch (ParseException e) {
e.printStackTrace();
}
long rows = (Long) json.get("rows");
int row=(int)rows;
long columns = (Long) json.get("cols");
int cols=(int)columns;
long type_data = (Long) json.get("type");
int typedata=(int)type_data;
String base64 = (String) json.get("data");
String cameraId = (String) json.get("cameraId");
String timestamp = (String) json.get("timestamp");
Mat mat1 = new Mat(row,cols, typedata);
mat1.put(0, 0, Base64.getDecoder().decode(base64));
String[] labels = null;
try {
labels = loadLabels("label.pbtxt");
} catch (Exception e) {
e.printStackTrace();
}
try (SavedModelBundle model = SavedModelBundle.load("model", "serve")) {
printSignature(model);
List<Tensor<?>> outputs = null;
try (Tensor<UInt8> input = makeImageTensor(mat1))
{
outputs =
model
.session()
.runner()
.feed("image_tensor", input)
.fetch("detection_scores")
.fetch("detection_classes")
.fetch("detection_boxes")
.run();
}
try (Tensor<Float> scoresT = outputs.get(0).expect(Float.class);
Tensor<Float> classesT = outputs.get(1).expect(Float.class);
Tensor<Float> boxesT = outputs.get(2).expect(Float.class)) {
int maxObjects = (int) scoresT.shape()[1];
float[] scores = scoresT.copyTo(new float[1][maxObjects])[0];
float[] classes = classesT.copyTo(new float[1][maxObjects])[0];
float[][] boxes = boxesT.copyTo(new float[1][maxObjects][4])[0];
boolean foundSomething = false;
int cnt = 0;
for (int i = 0; i < scores.length; ++i) {
if (scores[i] < 0.5) {
continue;
}
cnt ++;
foundSomething = true;
System.out.printf("\tFound %-20s (score: %.4f)\n", labels[(int) classes[i]], scores[i]);
left = (int) Math.round(boxes[i][1] * cols);
top = (int) Math.round(boxes[i][0] * row);
right = (int) Math.round(boxes[i][3] * cols);
bot = (int) Math.round(boxes[i][2] * row);
Imgproc.rectangle(mat1, new Point(right,bot), new Point(left,top),new Scalar(0,69,255),2);
Imgproc.putText(mat1,labels[(int) classes[i]] , new Point(left,top), Core.FONT_HERSHEY_PLAIN, 1.6, new Scalar(240,248,255),2);
BufferedImage bimg = bufferedImage(mat1);
ImageIcon imageIcon = new ImageIcon(bimg);
jLabel.setIcon(imageIcon);
frame.getContentPane().add(jLabel, BorderLayout.CENTER);
frame.pack();
frame.setLocationRelativeTo(null);
frame.setVisible(true);
}
if (!foundSomething) {
System.out.println("No objects detected with a high enough score.");
BufferedImage bimg = bufferedImage(mat1);
ImageIcon imageIcon = new ImageIcon(bimg);
jLabel.setIcon(imageIcon);
frame.getContentPane().add(jLabel, BorderLayout.CENTER);
frame.pack();
frame.setLocationRelativeTo(null);
frame.setVisible(true);
}
}
}
catch (Exception e) {
e.printStackTrace();
}
}
private static void printSignature(SavedModelBundle model) throws Exception {
MetaGraphDef m = MetaGraphDef.parseFrom(model.metaGraphDef());
SignatureDef sig = m.getSignatureDefOrThrow("serving_default");
int numInputs = sig.getInputsCount();
int i = 1;
System.out.println("MODEL SIGNATURE");
System.out.println("Inputs:");
for (Entry<String, TensorInfo> entry : sig.getInputsMap().entrySet()) {
TensorInfo t = entry.getValue();
System.out.printf(
"%d of %d: %-20s (Node name in graph: %-20s, type: %s)\n",
i++, numInputs, entry.getKey(), t.getName(), t.getDtype());
}
int numOutputs = sig.getOutputsCount();
i = 1;
System.out.println("Outputs:");
for (Entry<String, TensorInfo> entry : sig.getOutputsMap().entrySet()) {
TensorInfo t = entry.getValue();
System.out.printf(
"%d of %d: %-20s (Node name in graph: %-20s, type: %s)\n",
i++, numOutputs, entry.getKey(), t.getName(), t.getDtype());
}
System.out.println("-----------------------------------------------");
}
private static String[] loadLabels(String filename) throws Exception {
String text = new String(Files.readAllBytes(Paths.get(filename)), StandardCharsets.UTF_8);
StringIntLabelMap.Builder builder = StringIntLabelMap.newBuilder();
TextFormat.merge(text, builder);
StringIntLabelMap proto = builder.build();
int maxId = 0;
for (StringIntLabelMapItem item : proto.getItemList()) {
if (item.getId() > maxId) {
maxId = item.getId();
}
}
String[] ret = new String[maxId + 1];
for (StringIntLabelMapItem item : proto.getItemList()) {
ret[item.getId()] = item.getDisplayName();
}
return ret;
}
private static void bgr2rgb(byte[] data) {
for (int i = 0; i < data.length; i += 3) {
byte tmp = data[i];
data[i] = data[i + 2];
data[i + 2] = tmp;
}
}
public static BufferedImage bufferedImage(Mat m) {
int type = BufferedImage.TYPE_BYTE_GRAY;
if ( m.channels() > 1 ) {
type = BufferedImage.TYPE_3BYTE_BGR;
}
BufferedImage image = new BufferedImage(m.cols(),m.rows(), type);
m.get(0,0,((DataBufferByte)image.getRaster().getDataBuffer()).getData()); // get all the pixels
return image;
}
private static Tensor<UInt8> makeImageTensor(Mat m) throws IOException {
BufferedImage img = bufferedImage(m);
if (img.getType() != BufferedImage.TYPE_3BYTE_BGR) {
throw new IOException(
String.format(
"Expected 3-byte BGR encoding in BufferedImage, found %d (file: %s). This code could be made more robust",
img.getType()));
}
byte[] data = ((DataBufferByte) img.getData().getDataBuffer()).getData();
bgr2rgb(data);
final long BATCH_SIZE = 1;
final long CHANNELS = 3;
long[] shape = new long[] {BATCH_SIZE, img.getHeight(), img.getWidth(), CHANNELS};
return Tensor.create(UInt8.class, shape, ByteBuffer.wrap(data));
}
private static void printUsage(PrintStream s) {
s.println("USAGE: <model> <label_map> <image> [<image>] [<image>]");
s.println("");
s.println("Where");
s.println("<model> is the path to the SavedModel directory of the model to use.");
s.println(" For example, the saved_model directory in tarballs from ");
s.println(
" https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md)");
s.println("");
s.println(
"<label_map> is the path to a file containing information about the labels detected by the model.");
s.println(" For example, one of the .pbtxt files from ");
s.println(
" https://github.com/tensorflow/models/tree/master/research/object_detection/data");
s.println("");
s.println("<image> is the path to an image file.");
s.println(" Sample images can be found from the COCO, Kitti, or Open Images dataset.");
s.println(
" See: https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md");
}
}
public static void main(String[] args) {
final BrokerHosts zkrHosts = new ZkHosts(args[0]);
final String kafkaTopic = args[1];
final String zkRoot = args[2];
final String clientId = args[3];
final SpoutConfig kafkaConf = new SpoutConfig(zkrHosts, kafkaTopic, zkRoot, clientId);
kafkaConf.fetchSizeBytes = 30971520;
kafkaConf.scheme = new SchemeAsMultiScheme(new StringScheme());
final TopologyBuilder topologyBuilder = new TopologyBuilder();
topologyBuilder.setSpout("kafka-spout", new KafkaSpout(kafkaConf), 1);
topologyBuilder.setBolt("print-messages", new PrinterBolt()).shuffleGrouping("kafka-spout");
final LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology("kafka-topology", new HashMap<Object, Object>(), topologyBuilder.createTopology());
}
}
The above code is converted to jar using
mvn clean install shade:shade
When this jar is submitted to single node cluster using
storm -jar file.jar object_det.object_det.Objectdetect1
zookeeperhost:2181 topicname /brokers test
the code gets successfully executed.
But when the same jar is submitted in multi-node hadoop cluster ,the following error is shown
Running: /usr/jdk64/jdk1.8.0_112/bin/java -server -Ddaemon.name=
-Dstorm.options= -Dstorm.home=/usr/hdp/3.1.0.0-78/storm -Dstorm.log.dir=/var/log/storm -Djava.library.path=/usr/local/lib:/opt/local/lib:/usr/lib -Dstorm.conf.file= -cp /usr/hdp/3.1.0.0-78/storm/*:/usr/hdp/3.1.0.0-78/storm/lib/*:/usr/hdp/3.1.0.0-78/storm/extlib/* org.apache.storm.daemon.ClientJarTransformerRunner org.apache.storm.hack.StormShadeTransformer strmjr2-0.0.1-SNAPSHOT.jar /tmp/011dcea098a811e9b8d1f9e5e43755af.jar Exception in thread "main" java.lang.IllegalArgumentException at org.apache.storm.hack.shade.org.objectweb.asm.ClassReader.<init>(Unknown Source) at org.apache.storm.hack.shade.org.objectweb.asm.ClassReader.<init>(Unknown Source) at org.apache.storm.hack.shade.org.objectweb.asm.ClassReader.<init>(Unknown Source) at org.apache.storm.hack.DefaultShader.addRemappedClass(DefaultShader.java:182) at org.apache.storm.hack.DefaultShader.shadeJarStream(DefaultShader.java:103) at org.apache.storm.hack.StormShadeTransformer.transform(StormShadeTransformer.java:35) at org.apache.storm.daemon.ClientJarTransformerRunner.main(ClientJarTransformerRunner.java:37) Running: /usr/jdk64/jdk1.8.0_112/bin/java -Ddaemon.name=
-Dstorm.options= -Dstorm.home=/usr/hdp/3.1.0.0-78/storm -Dstorm.log.dir=/var/log/storm -Djava.library.path=/usr/local/lib:/opt/local/lib:/usr/lib -Dstorm.conf.file= -cp /usr/hdp/3.1.0.0-78/storm/*:/usr/hdp/3.1.0.0-78/storm/lib/*:/usr/hdp/3.1.0.0-78/storm/extlib/*:/tmp/011dcea098a811e9b8d1f9e5e43755af.jar:/usr/hdp/current/storm-supervisor/conf:/usr/hdp/3.1.0.0-78/storm/bin
-Dstorm.jar=/tmp/011dcea098a811e9b8d1f9e5e43755af.jar -Dstorm.dependency.jars= -Dstorm.dependency.artifacts={} artifactid.groupid.main
Error: Could not find or load main class
Is it possible to run a storm topology with tensorflow model in hadoop cluster.If yes, please help.

OpenCV Error: Assertion failed (!empty()) in java and opencv 3.0 and what does it mean?

I am trying to run a code in java and when I run this code, it throws the following error:
OpenCV Error: Assertion failed (!empty()) in cv::CascadeClassifier::detectMultiScale, file C:\builds\master_PackSlaveAddon-win64-vc12-static\opencv\modules\objdetect\src\cascadedetect.cpp, line 1634
Exception in thread "main" CvException [org.opencv.core.CvException: cv::Exception: C:\builds\master_PackSlaveAddon-win64-vc12-static\opencv\modules\objdetect\src\cascadedetect.cpp:1634: error: (-215) !empty() in function cv::CascadeClassifier::detectMultiScale
]
at org.opencv.objdetect.CascadeClassifier.detectMultiScale_1(Native Method)
at org.opencv.objdetect.CascadeClassifier.detectMultiScale(CascadeClassifier.java:103)
at FaceDetector.main(FaceDetector.java:30)
My source code is the following:
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfRect;
import org.opencv.core.Point;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.objdetect.CascadeClassifier;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
public class FaceDetector {
public static void main(String[] args) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
System.out.println("\nRunning FaceDetector");
CascadeClassifier faceDetector = new CascadeClassifier(FaceDetector.class.getResource("haarcascade_frontalface_alt.xml").getPath());
//CascadeClassifier cascade1 = new CascadeClassifier("C:/OpenCV/opencv/sources/data/haarcascades/haarcascade_frontalface_alt.xml");
//CascadeClassifier cascade1 = new CascadeClassifier("C:/OpenCV/opencv/sources/data/lbpcascade/lbpcascade_frontalface.xml");
//CascadeClassifier cascade1=new CascadeClassifier();
//cascade1.load("C:/opencv2.4.9/sources/data/haarcascades/haarcascade_frontalface_alt.xml");
faceDetector.load("C:/opencv2.4.9/sources/data/haarcascades/haarcascade_frontalface_alt.xml");
System.out.println("step1");
Mat image = Imgcodecs.imread(FaceDetector.class.getResource("anuj.jpg").getPath());
System.out.println("step2");
MatOfRect faceDetections = new MatOfRect();
System.out.println("step3");
faceDetector.detectMultiScale(image, faceDetections);
System.out.println("step4");
try {
System.out.println(String.format("Detected %s faces", faceDetections.toArray().length));
} catch (Exception e) {
// TODO Auto-generated catch block
System.err.println("ERROR IS HERE");
//e.printStackTrace();
}
for (Rect rect : faceDetections.toArray()) {
Imgproc.rectangle(image, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height),
new Scalar(0, 255, 0));
}
String filename = "ouput.png";
System.out.println(String.format("Writing %s", filename));
Imgcodecs.imwrite(filename, image);
}
}
Please tell me what is my mistake. I am not able to solve this. I also tried many variations in the code but it does not work.
It seems that the classifier is not being loaded properly from file.
Please ensure that faceDetector.load() returns true, otherwise the file is not being read.
This was posted 5 months ago, but for the sake of people who are still going to be faced with this challenge after trying all proposed solutions, there is another possibility which I found out after facing same challenge. If there are spaces in the URL returned by getPath(), the spaces are returned as "%20".
For example:
/C:/Users/Ayomide.Johnson/Documents/NetBeansProjects/OpenCV%20Test%20Project/build/classes/haarcascade_frontalface_alt.xml
You need to change the "%20" back to spaces.
My tweek was:
FaceDetector.class.getResource("x.JPG").getPath().substring(1).replace("%20", " ") and it worked!
Note: The substring(1) is to remove the initial "/" in the path. If you do not need that call, you can remove it.
I was also struggling with the same problem. Indicating the directory of the haarcascade_frontalface_alt.xml file worked fine for me. You may try it too.
package faceDetection;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfRect;
import org.opencv.core.Point;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import org.opencv.objdetect.CascadeClassifier;
public class FaceDetection
{
public static void main(String[] args)
{
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
CascadeClassifier faceDetector = new CascadeClassifier();
faceDetector.load("D:\\OpenCv\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml");
System.out.println ( "Working" );
// Input image
Mat image = Imgcodecs.imread("E:\\input.jpg");
// Detecting faces
MatOfRect faceDetections = new MatOfRect();
faceDetector.detectMultiScale(image, faceDetections);
// Creating a rectangular box showing faces detected
for (Rect rect : faceDetections.toArray())
{
Imgproc.rectangle(image, new Point(rect.x, rect.y),
new Point(rect.x + rect.width, rect.y + rect.height),
new Scalar(0, 255, 0));
}
// Saving the output image
String filename = "Ouput.jpg";
Imgcodecs.imwrite("E:\\"+filename, image);
}
}
"C:\\opencv2.4.9\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml"
Use double slash when you give path in windows.
Adding convert-buffered-image to mat type variable fixes the problem.
package facedetect;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import java.io.File;
import java.io.IOException;
import javax.imageio.ImageIO;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfRect;
import org.opencv.core.Rect;
import org.opencv.objdetect.CascadeClassifier;
public class FaceDetector {
// https://blog.openshift.com/day-12-opencv-face-detection-for-java-developers/
// make user library and add it to project
public static void main(String[] args) throws IOException {
BufferedImage image = ImageIO.read(new File("/hayanchoi/scene1.png"));
detectFace(image);
}
private static Mat convertBufImg2Mat(BufferedImage image) {
DataBufferByte s;
byte[] data = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
Mat mat = new Mat(image.getHeight(), image.getWidth(), CvType.CV_8UC3);
mat.put(0, 0, data);
return mat;
}
private static int detectFace(BufferedImage image) {
System.out.println("step0: Running FaceDetector");
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
CascadeClassifier faceDetector = new CascadeClassifier(
FaceDetector.class.getResource("haarcascade_frontalface_alt.xml").getPath());
if (!faceDetector.load("E:/hayanchoi/FaceDetectionTest/bin/facedetect/haarcascade_frontalface_alt.xml")) {
return -1;
}
System.out.println("step1: convert bufferedimage to mat type");
Mat matImage = convertBufImg2Mat(image);
System.out.print("step2: detect face- ");
MatOfRect faceDetections = new MatOfRect();
faceDetector.detectMultiScale(matImage, faceDetections);
System.out.println(String.format(" %s faces", faceDetections.toArray().length));
System.out.println("step3: write faces");
String filename = "/0_research/" + "ouput.png";
for (Rect rect : faceDetections.toArray()) {
writeFrame(filename, matImage, rect);
}
return faceDetections.toArray().length;
}
private static BufferedImage cropImage(BufferedImage src, Rect rect) {
BufferedImage dest = src.getSubimage(rect.x, rect.y, rect.width, rect.height);
return dest;
}
public static void writeFrame(String filename, Mat mat, Rect rect) {
byte[] data = new byte[mat.rows() * mat.cols() * (int) (mat.elemSize())];
mat.get(0, 0, data);
if (mat.channels() == 3) {
for (int i = 0; i < data.length; i += 3) {
byte temp = data[i];
data[i] = data[i + 2];
data[i + 2] = temp;
}
}
BufferedImage image = new BufferedImage(mat.cols(), mat.rows(), BufferedImage.TYPE_3BYTE_BGR);
image.getRaster().setDataElements(0, 0, mat.cols(), mat.rows(), data);
BufferedImage frame = cropImage(image, rect);
try {
ImageIO.write(frame, "png", new File(filename + ".png"));
} catch (IOException e) {
e.printStackTrace();
}
}
}
Java didn't work with getResoure("...").getPath()
So change all lines have that function to absolute path , example :"C:/Users/USER/workspace/SmallTest/bin/face.jpg"
I just have resolved it.
Sorry for bad English
First of all in this case you should check if CascadeClassifier has properly loaded the specified XML resource.
There are 2 ways of doing this: either check if the load() method returns true. Another way (e.g. if you didn't use this method just specifying the necessary resource in the constructor) is to use empty() method to ensure classifier has been loaded properly.

Unstable face recognition using OpenCV

I’m developing an android application for face recognition, using JavaCV which is an unofficial wrapper of OpenCV. After importing com.googlecode.javacv.cpp.opencv_contrib.FaceRecognizer,
I apply and test the following known methods:
LBPH using createLBPHFaceRecognizer() method
FisherFace using createFisherFaceRecognizer() method
EigenFace using createEigenFaceRecognizer() method
Before I recognize the detected face, I correct the rotated face and crop the proper zone, inspiring from this method
In general when I pass on camera a face already exist in the database, the recognition is ok. But this is not always correct. Sometimes it recognizes the unknown face (not found in Database of trained samples) with a high probability. When we have in the DB two or more faces of similar features (beard, mustache, glasses...) the recognition may be highly mistaken between those faces!
To predict the result using the test face image, I apply the following code:
public String predict(Mat m) {
int n[] = new int[1];
double p[] = new double[1];
IplImage ipl = MatToIplImage(m,WIDTH, HEIGHT);
faceRecognizer.predict(ipl, n, p);
if (n[0]!=-1)
mProb=(int)p[0];
else
mProb=-1;
if (n[0] != -1)
return labelsFile.get(n[0]);
else
return "Unkown";
}
I can’t control the threshold of the probability p, because:
Small p < 50 could predict a correct result.
High p > 70 could predict a false result.
Middle p could predict a correct or false.
As well, I don’t understand why predict() function gives sometime a probability greater than 100 in case of using LBPH??? and in case of Fisher and Eigen it gives very big values (>2000) ??
Can someone help in finding a solution for these bizarre problems?
Is there any suggestion to improve robustness of recognition? especially in case of similarity of two different faces.
The following is the entire class using Facerecognizer:
package org.opencv.javacv.facerecognition;
import static com.googlecode.javacv.cpp.opencv_highgui.*;
import static com.googlecode.javacv.cpp.opencv_core.*;
import static com.googlecode.javacv.cpp.opencv_imgproc.*;
import static com.googlecode.javacv.cpp.opencv_contrib.*;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.util.ArrayList;
import org.opencv.android.Utils;
import org.opencv.core.Mat;
import com.googlecode.javacv.cpp.opencv_imgproc;
import com.googlecode.javacv.cpp.opencv_contrib.FaceRecognizer;
import com.googlecode.javacv.cpp.opencv_core.IplImage;
import com.googlecode.javacv.cpp.opencv_core.MatVector;
import android.graphics.Bitmap;
import android.os.Environment;
import android.util.Log;
import android.widget.Toast;
public class PersonRecognizer {
public final static int MAXIMG = 100;
FaceRecognizer faceRecognizer;
String mPath;
int count=0;
labels labelsFile;
static final int WIDTH= 128;
static final int HEIGHT= 128;;
private int mProb=999;
PersonRecognizer(String path)
{
faceRecognizer = com.googlecode.javacv.cpp.opencv_contrib.createLBPHFaceRecognizer(2,8,8,8,200);
// path=Environment.getExternalStorageDirectory()+"/facerecog/faces/";
mPath=path;
labelsFile= new labels(mPath);
}
void changeRecognizer(int nRec)
{
switch(nRec) {
case 0: faceRecognizer = com.googlecode.javacv.cpp.opencv_contrib.createLBPHFaceRecognizer(1,8,8,8,100);
break;
case 1: faceRecognizer = com.googlecode.javacv.cpp.opencv_contrib.createFisherFaceRecognizer();
break;
case 2: faceRecognizer = com.googlecode.javacv.cpp.opencv_contrib.createEigenFaceRecognizer();
break;
}
train();
}
void add(Mat m, String description) {
Bitmap bmp= Bitmap.createBitmap(m.width(), m.height(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(m,bmp);
bmp= Bitmap.createScaledBitmap(bmp, WIDTH, HEIGHT, false);
FileOutputStream f;
try {
f = new FileOutputStream(mPath+description+"-"+count+".jpg",true);
count++;
bmp.compress(Bitmap.CompressFormat.JPEG, 100, f);
f.close();
} catch (Exception e) {
Log.e("error",e.getCause()+" "+e.getMessage());
e.printStackTrace();
}
}
public boolean train() {
File root = new File(mPath);
Log.i("mPath",mPath);
FilenameFilter pngFilter = new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.toLowerCase().endsWith(".jpg");
};
};
File[] imageFiles = root.listFiles(pngFilter);
MatVector images = new MatVector(imageFiles.length);
int[] labels = new int[imageFiles.length];
int counter = 0;
int label;
IplImage img=null;
IplImage grayImg;
int i1=mPath.length();
for (File image : imageFiles) {
String p = image.getAbsolutePath();
img = cvLoadImage(p);
if (img==null)
Log.e("Error","Error cVLoadImage");
Log.i("image",p);
int i2=p.lastIndexOf("-");
int i3=p.lastIndexOf(".");
int icount=Integer.parseInt(p.substring(i2+1,i3));
if (count<icount) count++;
String description=p.substring(i1,i2);
if (labelsFile.get(description)<0)
labelsFile.add(description, labelsFile.max()+1);
label = labelsFile.get(description);
grayImg = IplImage.create(img.width(), img.height(), IPL_DEPTH_8U, 1);
cvCvtColor(img, grayImg, CV_BGR2GRAY);
images.put(counter, grayImg);
labels[counter] = label;
counter++;
}
if (counter>0)
if (labelsFile.max()>1)
faceRecognizer.train(images, labels);
labelsFile.Save();
return true;
}
public boolean canPredict()
{
if (labelsFile.max()>1)
return true;
else
return false;
}
public String predict(Mat m) {
if (!canPredict())
return "";
int n[] = new int[1];
double p[] = new double[1];
IplImage ipl = MatToIplImage(m,WIDTH, HEIGHT);
// IplImage ipl = MatToIplImage(m,-1, -1);
faceRecognizer.predict(ipl, n, p);
if (n[0]!=-1)
mProb=(int)p[0];
else
mProb=-1;
// if ((n[0] != -1)&&(p[0]<95))
if (n[0] != -1)
return labelsFile.get(n[0]);
else
return "Unkown";
}
IplImage MatToIplImage(Mat m,int width,int heigth)
{
Bitmap bmp=Bitmap.createBitmap(m.width(), m.height(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(m, bmp);
return BitmapToIplImage(bmp,width, heigth);
}
IplImage BitmapToIplImage(Bitmap bmp, int width, int height) {
if ((width != -1) || (height != -1)) {
Bitmap bmp2 = Bitmap.createScaledBitmap(bmp, width, height, false);
bmp = bmp2;
}
IplImage image = IplImage.create(bmp.getWidth(), bmp.getHeight(),
IPL_DEPTH_8U, 4);
bmp.copyPixelsToBuffer(image.getByteBuffer());
IplImage grayImg = IplImage.create(image.width(), image.height(),
IPL_DEPTH_8U, 1);
cvCvtColor(image, grayImg, opencv_imgproc.CV_BGR2GRAY);
return grayImg;
}
protected void SaveBmp(Bitmap bmp,String path)
{
FileOutputStream file;
try {
file = new FileOutputStream(path , true);
bmp.compress(Bitmap.CompressFormat.JPEG,100,file);
file.close();
}
catch (Exception e) {
// TODO Auto-generated catch block
Log.e("",e.getMessage()+e.getCause());
e.printStackTrace();
}
}
public void load() {
train();
}
public int getProb() {
// TODO Auto-generated method stub
return mProb;
}
}
I think you need to implement something to be more robust to illumination changes. see: Illumination normalization in OpenCV
Then, in order to manage similarity between images maybe you can use something like Principal component Analysis.

Categories