Currently, I've been asked to write CNN code using DL4J using YOLOv2 architecture. But the problem is after the model has complete, I do a simple GUI for validation testing then the image shown is too bright and sometimes the image can be displayed. Im not sure where does this problem comes from whether at earliest stage of training or else. Here, I attach the code that I have for now. For Iterator:
public class faceMaskIterator {
private static final Logger log = org.slf4j.LoggerFactory.getLogger(faceMaskIterator.class);
private static final int seed = 123;
private static Random rng = new Random(seed);
private static String dataDir;
private static Path pathDirectory;
private static InputSplit trainData, testData;
private static final String[] allowedFormats = NativeImageLoader.ALLOWED_FORMATS;
private static final double splitRatio = 0.8;
private static final int nChannels = 3;
public static final int gridWidth = 13;
public static final int gridHeight = 13;
public static final int yolowidth = 416;
public static final int yoloheight = 416;
private static RecordReaderDataSetIterator makeIterator(InputSplit split, Path dir, int batchSize) throws Exception {
ObjectDetectionRecordReader recordReader = new ObjectDetectionRecordReader(yoloheight, yolowidth, nChannels,
gridHeight, gridWidth, new VocLabelProvider(dir.toString()));
recordReader.initialize(split);
RecordReaderDataSetIterator iter = new RecordReaderDataSetIterator(recordReader, batchSize, 1, 1,true);
iter.setPreProcessor(new ImagePreProcessingScaler(0, 1));
return iter;
}
public static RecordReaderDataSetIterator trainIterator(int batchSize) throws Exception {
return makeIterator(trainData, pathDirectory, batchSize);
}
public static RecordReaderDataSetIterator testIterator(int batchSize) throws Exception {
return makeIterator(testData, pathDirectory, batchSize);
}
public static void setup() throws IOException {
log.info("Load data...");
dataDir = Paths.get(
System.getProperty("user.home"),
Helper.getPropValues("dl4j_home.data")
).toString();
pathDirectory = Paths.get(dataDir,"face_mask_dataset");
FileSplit fileSplit = new FileSplit(new File(pathDirectory.toString()),allowedFormats,rng);
PathFilter pathFilter = new RandomPathFilter(rng,allowedFormats);
InputSplit[] sample = fileSplit.sample(pathFilter, splitRatio,1-splitRatio);
trainData = sample[0];
testData = sample[1];
}}
For training:
public class faceMaskPreTrained {
private static final Logger log = LoggerFactory.getLogger(ai.certifai.groupProjek.faceMaskPreTrained.class);
private static int seed = 420;
private static double detectionThreshold = 0.9;
private static int nBoxes = 3;
private static double lambdaNoObj = 0.7;
private static double lambdaCoord = 1.0;
private static double[][] priorBoxes = {{1, 1}, {2, 1}, {1, 2}};
private static int batchSize = 3;
private static int nEpochs = 1;
private static double learningRate = 1e-4;
private static int nClasses = 3;
private static List<String> labels;
private static File modelFilename = new File(System.getProperty("user.dir"), "generated-models/facemask_detector.zip");
private static ComputationGraph model;
private static Frame frame = null;
private static final Scalar GREEN = RGB(0, 255.0, 0);
private static final Scalar YELLOW = RGB(255, 255, 0);
private static final Scalar RED = RGB(255, 0, 0);
private static Scalar[] colormap = {GREEN, YELLOW, RED};
private static String labeltext = null;
public static void main(String[] args) throws Exception {
faceMaskIterator.setup();
RecordReaderDataSetIterator trainIter = faceMaskIterator.trainIterator(batchSize);
RecordReaderDataSetIterator testIter = faceMaskIterator.testIterator(1);
labels = trainIter.getLabels();
if (modelFilename.exists()) {
Nd4j.getRandom().setSeed(seed);
log.info("Load model...");
model = ModelSerializer.restoreComputationGraph(modelFilename);
} else {
Nd4j.getRandom().setSeed(seed);
INDArray priors = Nd4j.create(priorBoxes);
log.info("Build model...");
ComputationGraph pretrained = (ComputationGraph) YOLO2.builder().build().initPretrained();
FineTuneConfiguration fineTuneConf = getFineTuneConfiguration();
model = getComputationGraph(pretrained, priors, fineTuneConf);
System.out.println(model.summary(InputType.convolutional(
faceMaskIterator.yoloheight,
faceMaskIterator.yolowidth,
nClasses)));
log.info("Train model...");
UIServer server = UIServer.getInstance();
StatsStorage storage = new InMemoryStatsStorage();
server.attach(storage);
model.setListeners(new ScoreIterationListener(5), new StatsListener(storage,5));
for (int i = 1; i < nEpochs + 1; i++) {
trainIter.reset();
while (trainIter.hasNext()) {
model.fit(trainIter.next());
}
log.info("*** Completed epoch {} ***", i);
}
ModelSerializer.writeModel(model, modelFilename, true);
System.out.println("Model saved.");
}
// Evaluate the model's accuracy by using the test iterator.
OfflineValidationWithTestDataset(testIter);
// Inference the model and process the webcam stream and make predictions.
doInference();
}
private static ComputationGraph getComputationGraph(ComputationGraph pretrained, INDArray priors, FineTuneConfiguration fineTuneConf) {
return new TransferLearning.GraphBuilder(pretrained)
.fineTuneConfiguration(fineTuneConf)
.removeVertexKeepConnections("conv2d_23")
.removeVertexKeepConnections("outputs")
.addLayer("conv2d_23",
new ConvolutionLayer.Builder(1, 1)
.nIn(1024)
.nOut(nBoxes * (5 + nClasses))
.stride(1, 1)
.convolutionMode(ConvolutionMode.Same)
.weightInit(WeightInit.XAVIER)
.activation(Activation.IDENTITY)
.build(),
"leaky_re_lu_22")
.addLayer("outputs",
new Yolo2OutputLayer.Builder()
.lambdaNoObj(lambdaNoObj)
.lambdaCoord(lambdaCoord)
.boundingBoxPriors(priors.castTo(DataType.FLOAT))
.build(),
"conv2d_23")
.setOutputs("outputs")
.build();
}
private static FineTuneConfiguration getFineTuneConfiguration() {
return new FineTuneConfiguration.Builder()
.seed(seed)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
.gradientNormalizationThreshold(1.0)
.updater(new Adam.Builder().learningRate(learningRate).build())
.l2(0.00001)
.activation(Activation.IDENTITY)
.trainingWorkspaceMode(WorkspaceMode.ENABLED)
.inferenceWorkspaceMode(WorkspaceMode.ENABLED)
.build();
}
// Evaluate visually the performance of the trained object detection model
private static void OfflineValidationWithTestDataset(RecordReaderDataSetIterator test) throws InterruptedException {
NativeImageLoader imageLoader = new NativeImageLoader();
CanvasFrame canvas = new CanvasFrame("Validate Test Dataset");
OpenCVFrameConverter.ToMat converter = new OpenCVFrameConverter.ToMat();
org.deeplearning4j.nn.layers.objdetect.Yolo2OutputLayer yout = (org.deeplearning4j.nn.layers.objdetect.Yolo2OutputLayer) model.getOutputLayer(0);
Mat convertedMat = new Mat();
Mat convertedMat_big = new Mat();
while (test.hasNext() && canvas.isVisible()) {
org.nd4j.linalg.dataset.DataSet ds = test.next();
INDArray features = ds.getFeatures();
INDArray results = model.outputSingle(features);
List<DetectedObject> objs = yout.getPredictedObjects(results, detectionThreshold);
YoloUtils.nms(objs, 0.4);
Mat mat = imageLoader.asMat(features);
mat.convertTo(convertedMat, CV_8U, 255, 0);
int w = mat.cols() * 2;
int h = mat.rows() * 2;
resize(convertedMat, convertedMat_big, new Size(w, h));
convertedMat_big = drawResults(objs, convertedMat_big, w, h);
canvas.showImage(converter.convert(convertedMat_big));
canvas.waitKey();
}
canvas.dispose();
}
// Stream video frames from Webcam and run them through YOLOv2 model and get predictions
private static void doInference() {
String cameraPos = "front";
int cameraNum = 0;
Thread thread = null;
NativeImageLoader loader = new NativeImageLoader(
faceMaskIterator.yolowidth,
faceMaskIterator.yoloheight,
3,
new ColorConversionTransform(COLOR_BGR2RGB));
ImagePreProcessingScaler scaler = new ImagePreProcessingScaler(0, 1);
if (!cameraPos.equals("front") && !cameraPos.equals("back")) {
try {
throw new Exception("Unknown argument for camera position. Choose between front and back");
} catch (Exception e) {
e.printStackTrace();
}
}
FrameGrabber grabber = null;
try {
grabber = FrameGrabber.createDefault(cameraNum);
} catch (FrameGrabber.Exception e) {
e.printStackTrace();
}
OpenCVFrameConverter.ToMat converter = new OpenCVFrameConverter.ToMat();
try {
grabber.start();
} catch (FrameGrabber.Exception e) {
e.printStackTrace();
}
CanvasFrame canvas = new CanvasFrame("Object Detection");
int w = grabber.getImageWidth();
int h = grabber.getImageHeight();
canvas.setCanvasSize(w, h);
while (true) {
try {
frame = grabber.grab();
} catch (FrameGrabber.Exception e) {
e.printStackTrace();
}
//if a thread is null, create new thread
if (thread == null) {
thread = new Thread(() ->
{
while (frame != null) {
try {
Mat rawImage = new Mat();
//Flip the camera if opening front camera
if (cameraPos.equals("front")) {
Mat inputImage = converter.convert(frame);
flip(inputImage, rawImage, 1);
} else {
rawImage = converter.convert(frame);
}
Mat resizeImage = new Mat();
resize(rawImage, resizeImage, new Size(faceMaskIterator.yolowidth, faceMaskIterator.yoloheight));
INDArray inputImage = loader.asMatrix(resizeImage);
scaler.transform(inputImage);
INDArray outputs = model.outputSingle(inputImage);
org.deeplearning4j.nn.layers.objdetect.Yolo2OutputLayer yout = (org.deeplearning4j.nn.layers.objdetect.Yolo2OutputLayer) model.getOutputLayer(0);
List<DetectedObject> objs = yout.getPredictedObjects(outputs, detectionThreshold);
YoloUtils.nms(objs, 0.4);
rawImage = drawResults(objs, rawImage, w, h);
canvas.showImage(converter.convert(rawImage));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
});
thread.start();
}
KeyEvent t = null;
try {
t = canvas.waitKey(33);
} catch (InterruptedException e) {
e.printStackTrace();
}
if ((t != null) && (t.getKeyCode() == KeyEvent.VK_Q)) {
break;
}
}
}
private static Mat drawResults(List<DetectedObject> objects, Mat mat, int w, int h) {
for (DetectedObject obj : objects) {
double[] xy1 = obj.getTopLeftXY();
double[] xy2 = obj.getBottomRightXY();
String label = labels.get(obj.getPredictedClass());
int x1 = (int) Math.round(w * xy1[0] / faceMaskIterator.gridWidth);
int y1 = (int) Math.round(h * xy1[1] / faceMaskIterator.gridHeight);
int x2 = (int) Math.round(w * xy2[0] / faceMaskIterator.gridWidth);
int y2 = (int) Math.round(h * xy2[1] / faceMaskIterator.gridHeight);
//Draw bounding box
rectangle(mat, new Point(x1, y1), new Point(x2, y2), colormap[obj.getPredictedClass()], 2, 0, 0);
//Display label text
labeltext = label + " " + String.format("%.2f", obj.getConfidence() * 100) + "%";
int[] baseline = {0};
Size textSize = getTextSize(labeltext, FONT_HERSHEY_DUPLEX, 1, 1, baseline);
rectangle(mat, new Point(x1 + 2, y2 - 2), new Point(x1 + 2 + textSize.get(0), y2 - 2 - textSize.get(1)), colormap[obj.getPredictedClass()], FILLED, 0, 0);
putText(mat, labeltext, new Point(x1 + 2, y2 - 2), FONT_HERSHEY_DUPLEX, 1, RGB(0, 0, 0));
}
return mat;
}
CanvasFrame tries to do gamma correction by default because it's typically needed by cameras used for CV, but cheap webcams usually output gamma corrected images, so make sure to let CanvasFrame know about it this way:
// We should also specify the relative monitor/camera response for proper gamma correction.
CanvasFrame frame = new CanvasFrame("Some Title", CanvasFrame.getDefaultGamma()/grabber.getGamma());
https://github.com/bytedeco/javacv/
Related
I'm trying to reproduce an example from this question, importing an STL file with InteractiveMesh. I've used as a model a calibration cube used in 3d printing.
This is my class code:
public class Example extends Application {
private Group root;
private PointLight pointLight;
private static final int VIEWPORT_SIZE_V1 = 720;
private static final double MODEL_SCALE_FACTOR = 400;
private static final double MODEL_X_OFFSET = 0; // standard
private static final double MODEL_Y_OFFSET = 0; // standard
private static final Color lightColor = Color.rgb(244, 255, 250);
private static final Color modelColor = Color.rgb(0, 190, 222);
private static final String MESH_FILENAME =
"/Users/user/Downloads/cube.stl";
static MeshView[] loadMeshViews(File file) {
StlMeshImporter importer = new StlMeshImporter();
try {
importer.read(file);
}
catch (ImportException e){
e.printStackTrace();
}
Mesh mesh = importer.getImport();
return new MeshView[] { new MeshView(mesh) };
}
private Group buildScene(MeshView[] meshViews) {
for (int i = 0; i < meshViews.length; i++) {
meshViews[i].setTranslateX(VIEWPORT_SIZE_V1 / 2 + MODEL_X_OFFSET);
meshViews[i].setTranslateY(VIEWPORT_SIZE_V1 / 2 + MODEL_Y_OFFSET);
meshViews[i].setTranslateZ(VIEWPORT_SIZE_V1 / 2);
meshViews[i].setScaleX(MODEL_SCALE_FACTOR);
meshViews[i].setScaleY(MODEL_SCALE_FACTOR);
meshViews[i].setScaleZ(MODEL_SCALE_FACTOR);
PhongMaterial sample = new PhongMaterial(modelColor);
sample.setSpecularColor(lightColor);
sample.setSpecularPower(16);
meshViews[i].setMaterial(sample);
meshViews[i].getTransforms().setAll(new Rotate(38, Rotate.Z_AXIS), new Rotate(20, Rotate.X_AXIS));
}
pointLight = new PointLight(lightColor);
pointLight.setTranslateX(VIEWPORT_SIZE_V1*3/4);
pointLight.setTranslateY(VIEWPORT_SIZE_V1/2);
pointLight.setTranslateZ(VIEWPORT_SIZE_V1/2);
PointLight pointLight2 = new PointLight(lightColor);
pointLight2.setTranslateX(VIEWPORT_SIZE_V1*1/4);
pointLight2.setTranslateY(VIEWPORT_SIZE_V1*3/4);
pointLight2.setTranslateZ(VIEWPORT_SIZE_V1*3/4);
PointLight pointLight3 = new PointLight(lightColor);
pointLight3.setTranslateX(VIEWPORT_SIZE_V1*5/8);
pointLight3.setTranslateY(VIEWPORT_SIZE_V1/2);
pointLight3.setTranslateZ(0);
Color ambientColor = Color.rgb(80, 80, 80, 0);
AmbientLight ambient = new AmbientLight(ambientColor);
root = new Group(meshViews);
root.getChildren().add(pointLight);
root.getChildren().add(pointLight2);
root.getChildren().add(pointLight3);
root.getChildren().add(ambient);
return root;
}
private PerspectiveCamera addCamera(Scene scene) {
PerspectiveCamera perspectiveCamera = new PerspectiveCamera();
System.out.println("Near Clip: " + perspectiveCamera.getNearClip());
System.out.println("Far Clip: " + perspectiveCamera.getFarClip());
System.out.println("FOV: " + perspectiveCamera.getFieldOfView());
scene.setCamera(perspectiveCamera);
return perspectiveCamera;
}
#Override
public void start(Stage stage) throws IOException {
File file = new File(MESH_FILENAME);
MeshView[] meshViews = loadMeshViews(file);
Group group = buildScene(meshViews);
group.setScaleX(2);
group.setScaleY(2);
group.setScaleZ(2);
group.setTranslateX(50);
group.setTranslateY(50);
Scene scene = new Scene(group, VIEWPORT_SIZE_V1, VIEWPORT_SIZE_V1,true);
scene.setFill(Color.rgb(10, 10, 40));
addCamera(scene);
stage.setScene(scene);
stage.setTitle("Example");
stage.show();
}
public static void main(String[] args) {
launch();
}
}
the result output of is:
I've followed each row of the example but something is wring. I've also tried to move the camera and change the scale...
Someone knows how can I fix it?
Thanks
I want to make an imitative hack System. There will be 3 windows displayed on screen. Each window will show some string Constantly (like some movie scene) . However only the third (the last) window works. So how to make every window show string at the same time?
the frame class as follow:
import java.awt.*;
import java.awt.event.*;
class sys extends Thread {
private static Frame frm;
private static TextArea txa;
private int fsx, fsy, flx, fly, tsx, tsy, tlx, tly;
private String strarr[] = new String[7];
private String frmName;
public sys(String str, String SAEC[]) {
frmName = str;
strarr = SAEC;
}
public void SettingFRM(int sx, int sy, int lx, int ly) {
//frame's location and size
fsx = sx;
fsy = sy;
flx = lx;
fly = ly;
}
public void SettingTXA(int sx, int sy, int lx, int ly) {
//textArea's location and size
tsx = sx;
tsy = sy;
tlx = lx;
tly = ly;
}
public void run() {
frm = new Frame(frmName);
//the exterior design
txa = new TextArea("", 100, 100, TextArea.SCROLLBARS_BOTH);
txa.setBounds(tlx, tly, tsx, tsy);
txa.setBackground(Color.darkGray);
txa.setFont(new Font("Arial", Font.PLAIN, 16));
txa.setForeground(Color.green);
frm.setLayout(null);
frm.setSize(fsx, fsy);
frm.setLocation(flx, fly);
frm.setVisible(true);
frm.setBackground(Color.darkGray);
frm.add(txa);
while (1 != 0) {
txa.append(strarr[(int) (Math.random() * 7)]);// to obtain new string
frm.addWindowListener(new WindowAdapter() {
public void windowClosing(WindowEvent e) {
System.exit(0);
}
});
try {
sleep(200);
} catch (InterruptedException e) {
}
}
}
}
main program as follow:
public class ImitativeHackSystem {
public static void main(String[] args) throws InterruptedException {
//specific string
String strarr[] = new String[9];
strarr[0] = new String("Hacking... Time estimate 25 seconds...\n");
strarr[1] = new String("Retrying...\n");
strarr[2] = new String("Error! Code:e2130443523\n");
strarr[3] = new String("Success! IP:192.168.0.1\n");
strarr[4] = new String("picking datas...\n");
strarr[5] = new String("Anti-system started\n");
strarr[6] = new String("Has been discovering... fake random IP address\n");
strarr[7] = new String("01011010000011001000000011111101010");
strarr[8] = new String("111101010101001101101101010011010");
//object array
sys fhs[] = new sys[3];
for (int i = 0; i < 3; i++)
fhs[i] = new sys("Fake Hacking System", strarr);
fhs[0].SettingTXA(635, 690, 5, 30);
fhs[0].SettingFRM(640, 720, 0, 0);
fhs[1].SettingTXA(635, 330, 5, 30);
fhs[1].SettingFRM(640, 360, 645, 0);
fhs[2].SettingTXA(635, 330, 5, 30);
fhs[2].SettingFRM(640, 360, 645, 365);
//to execute
for (int i = 0; i < 3; i++) {
fhs[i].start();
Thread.sleep(500);
}
}
}
I have to build a GPS parser. I need to parse the NMEA string in another thread, which will be parsing a single NMEA string and update chart at 1 Hz. For now I build part of my code, but I parse data in main thread in while loop; my teacher said that is wrong. I was programming some on Java but not in multi-threading aspects. How I could move parsing process and refreshing chart to background thread?
public class MainFrame extends JFrame {
private JButton btnWybPlik;
private JLabel jlDroga;
private JLabel jlPredkosc;
private JLabel jlCzas;
private JPanel mainjpanel;
private JPanel jpMenu;
private JPanel jpTablica;
//private String sciezkaPliku;
private SekwencjaGGA sekGGA = null;
private SekwencjaGGA popSekGGA = null;
private SekwencjaGSA sekGSA;
private SekwencjaGLL sekGLL;
private SekwencjaRMC sekRMC;
private double droga;
private double predkosc;
private XYSeries series1;
private XYSeriesCollection dataset;
public MainFrame() {
droga = 0;
btnWybPlik.addActionListener(new ActionListener() {
#Override
public void actionPerformed(ActionEvent e) {
JFileChooser fileChooser = new JFileChooser();
fileChooser.setCurrentDirectory(new File(System.getProperty("user.home")));
int result = fileChooser.showOpenDialog(mainjpanel);
if (result == JFileChooser.APPROVE_OPTION) {
File selectedFile = fileChooser.getSelectedFile();
//System.out.println("Selected file: " + selectedFile.getAbsolutePath());
String sciezkaPliku = selectedFile.getAbsolutePath();
wczytaniePliku(sciezkaPliku);
}
}
});
jpTablica = new JPanel();
mainjpanel.add(jpTablica);
this.series1 = new XYSeries("Trasa", false);
final XYSeriesCollection dataset = new XYSeriesCollection(this.series1);
final JFreeChart chart = createChart(dataset);
final ChartPanel chartPanel = new ChartPanel(chart);
jpTablica.add(chartPanel);
}
private void wczytaniePliku(String sciezkaDoPliku) {
try (BufferedReader br = new BufferedReader(new FileReader(sciezkaDoPliku))) {
String line;
//series1.add(53.448, 14.4907);
while ((line = br.readLine()) != null) {
parseLine(line);
}
//series1.add(53.4485, 14.4910);
} catch (IOException e) {
e.printStackTrace();
}
}
private void parseLine(String line) {
String bezSumKont = line.substring(0, line.length() - 3);
List<String> podzSekw = Arrays.asList(bezSumKont.split(","));
if (podzSekw.get(0).equalsIgnoreCase("$GPGGA")) {
if (check(line)) {
if (sekGGA != null)
popSekGGA = sekGGA;
sekGGA = new SekwencjaGGA(podzSekw);
if (popSekGGA != null) {
droga += obliczOdleglosc(popSekGGA, sekGGA);
jlDroga.setText(String.valueOf(droga));
}
series1.add(sekGGA.getWspolzedne().getLongitude(), sekGGA.getWspolzedne().getLatitude());
System.out.println(sekGGA.getWspolzedne().getLatitude() + " " + sekGGA.getWspolzedne().getLongitude());
//System.out.println(series1.getMaxY() + " " + series1.getMinY());
} else {
//TODO: Zlicz błąd
}
}
if (podzSekw.get(0).equalsIgnoreCase("$GPGSA")) {
if (check(line)) {
sekGSA = new SekwencjaGSA(podzSekw);
} else {
//TODO: Zlicz błąd
}
}
if (podzSekw.get(0).equalsIgnoreCase("$GPGLL")) {
if (check(line)) {
sekGLL = new SekwencjaGLL(podzSekw);
} else {
//TODO: Zlicz błąd
}
}
if (podzSekw.get(0).equalsIgnoreCase("$GPRMC")) {
//TODO: Sekwencja RMC (Recommended minimum of data)
if (check(line)) {
sekRMC = new SekwencjaRMC(podzSekw);
} else {
//TODO: Zlicz błąd
}
}
}
private double obliczOdleglosc(SekwencjaGGA pkt1, SekwencjaGGA pkt2) {
double odleglosc = 0;
double earthRadius = 6371000; //meters
double dLat = Math.toRadians(pkt2.getWspolzedne().getLatitude() - pkt1.getWspolzedne().getLatitude());
double dLng = Math.toRadians(pkt2.getWspolzedne().getLongitude() - pkt1.getWspolzedne().getLongitude());
double a = Math.sin(dLat / 2) * Math.sin(dLat / 2) +
Math.cos(Math.toRadians(pkt1.getWspolzedne().getLatitude())) * Math.cos(Math.toRadians(pkt1.getWspolzedne().getLatitude())) *
Math.sin(dLng / 2) * Math.sin(dLng / 2);
double c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1 - a));
odleglosc = earthRadius * c;
return odleglosc;
}
/**
* Funkcja sprawdzająca sume kontrolną
*
* #param tekst cała linia NMEA
* #return true jeśli się suma kontrolna zgadza
*/
private boolean check(String tekst) {
String suma = tekst.substring(tekst.length() - 2, tekst.length());
tekst = tekst.substring(1, tekst.length() - 3);
int checksum = 0;
for (int i = 0; i < tekst.length(); i++) {
checksum = checksum ^ tekst.charAt(i);
}
if (Integer.parseInt(suma, 16) == checksum) {
return true;
}
return false;
}
private JFreeChart createChart(final XYDataset dataset) { ... }
private void customizeChart(JFreeChart chart) { ... }
public static void main(String[] args) {
JFrame frame = new JFrame("MainFrame");
frame.setContentPane(new MainFrame().mainjpanel);
frame.setPreferredSize(new Dimension(640, 480));
frame.pack();
frame.setVisible(true);
}
To avoid blocking the event dispatch thread, construct an instance of SwingWorker. Collect data in your implementation of doInBackground(), publish() intermediate results, and update the XYSeries in your implementation of process(). The listening chart will update itself in response. A related example that uses jfreechart is seen below and examined here.
public class signal_identifier {
private static final Log LOG = LogFactory.getLog(signal_identifier.class);
public static void main(String[] args) throws Exception {
long t = cvGetTickCount();
Configuration conf = new Configuration();
long milliSeconds = 1800000;
conf.setLong("mapred.task.timeout", milliSeconds);
Job job = new Job(conf, "TrafficSignalProcessing");
job.setJarByClass(signal_identifier.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(signal_mapper.class);
job.setReducerClass(signal_reducer.class);
job.setInputFormatClass(VideoInputFormat.class);
job.setOutputValueClass(TextOutputFormat.class);
FileInputFormat.addInputPath(job, new Path("hdfs://localhost:9000/tmp/traffic_signal.mp4"));
FileOutputFormat.setOutputPath(job, new Path("hdfs://localhost:9000/tmp/ouputv"));
job.waitForCompletion(true);
}
}
Mapper Class:
public class signal_mapper extends Mapper<Text, VideoObject, Text, IntWritable> {
private static final Log LOG = LogFactory.getLog(signal_mapper.class);
private static OpenCVFrameGrabber grabber;
private static IplImage currentFrame;
private static IplImage frame;
private static IplImage imgHSV;
private static IplImage imgThresholdr;
private static IplImage imgThresholdg;
private static IplImage imgC;
static int LowerRed = 160;
static int UpperRed = 180;
static int LowerGreen = 40;
static int UpperGreen = 80;
CvArr mask;
//private static final int FOURCC = CV_FOURCC('X', 'V', 'I', 'D');
public void map(Text key, VideoObject value, Context context, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException, InterruptedException {
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(value.getVideoByteArray());
LOG.info("Log__VideoConverter__byteArray: "+ byteArrayInputStream.available());
String fileName = key.toString();
int id = value.getId();
long differencer = 0;
long differenceg = 0;
long lStartTime = 0;
String flag = "start";
//LocalFileSystem fs = FileSystem.getLocal(context.getConfiguration());
Path filePath = new Path("/tmp", fileName);
//Path resFile = new Path("/tmp", "res_"+fileName);
System.out.println("File to Process :"+filePath.toString());
//FSDataOutputStream out = fs.create(filePath, true);
//out.write(value.getVideoByteArray());
//out.close();
try{
grabber = new OpenCVFrameGrabber(filePath.toString());
grabber.start();
CvMemStorage storage = CvMemStorage.create();
CvSize frameSize = new CvSize(grabber.getImageWidth(), grabber.getImageHeight());
currentFrame = cvCreateImage(frameSize, 8, 3);
IplImage cropped;// = cvCreateImage(frameSize, 8, 3);
CvRect r = new CvRect(250, 40, 350, 350);
System.out.println("Video processing .........started");
while(queryFrame()) {
cvClearMemStorage(storage);
if(flag.equals("start")){
lStartTime = new Date().getTime();
}
cvSetImageROI(currentFrame, r);
cropped = cvCreateImage(cvGetSize(currentFrame), currentFrame.depth(),currentFrame.nChannels());
// Copy original image (only ROI) to the cropped image
cvCopy(currentFrame, cropped);
imgHSV = cvCreateImage(cvGetSize(cropped), 8, 3);
cvCvtColor(cropped, imgHSV, CV_BGR2HSV);
imgThresholdr = cvCreateImage(cvGetSize(cropped), 8, 1);
imgThresholdg = cvCreateImage(cvGetSize(cropped), 8, 1);
imgC = cvCreateImage(cvGetSize(cropped),8,1);
cvInRangeS(imgHSV, cvScalar(LowerRed,150,75,0), cvScalar(UpperRed, 255, 255, 0), imgThresholdr);
cvInRangeS(imgHSV, cvScalar(LowerGreen,150,75,0), cvScalar(UpperGreen, 255, 255, 0), imgThresholdg);
Dimension positionr = getCoordinates(imgThresholdr);
int posr = positionr.width+positionr.height;
Dimension positiong = getCoordinates(imgThresholdg);
int posg = positiong.width+positiong.height;
//&& !flag.equalsIgnoreCase("red") && !flag.equalsIgnoreCase("green")
if(posr > 255 && posr < 265 ){
flag = "red";
}else {
long lEndTime = new Date().getTime();
differenceg = (lEndTime - lStartTime) - differencer;
output.collect(new Text("Green Color found at second- => "),new IntWritable((int)differenceg/1000));
//System.out.println("Green Color found at second: " + differenceg/1000);
}
if(posg > 430 && posg < 440){
flag = "green";
}else{
long lEndTime = new Date().getTime();
differencer = (lEndTime - lStartTime) - differenceg;
output.collect(new Text("Red Color found at second- => "),new IntWritable((int)differencer/1000));
//System.out.println("Red Color found at second: " + differencer/1000);
}
}
grabber.stop();
System.out.println("Video processing .........Completed");
}catch(Exception e) {
e.printStackTrace();
}
}
private static boolean queryFrame() throws Exception {
try {
IplImage frame = grabber.grab();
if (frame != null) {
cvConvertImage(frame, currentFrame, 0);
return true;
} else {
return false;
}
}catch(com.googlecode.javacv.FrameGrabber.Exception fge) {
return false;
}
catch(Exception e) {
return false;
}
}
static Dimension getCoordinates(IplImage thresholdImage) {
int posX = 0;
int posY = 0;
CvMoments moments = new CvMoments();
cvMoments(thresholdImage, moments, 1);
double momX10 = cvGetSpatialMoment(moments, 1, 0);
double momY01 = cvGetSpatialMoment(moments, 0, 1);
double area = cvGetCentralMoment(moments, 0, 0);
posX = (int) (momX10 / area);
posY = (int) (momY01 / area);
return new Dimension(posX, posY);
}
}
Reducer Class:
public class signal_reducer extends Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
int sum = 0;
while (values.hasNext()) {
sum += values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
Wait I'll post my stack trace for Exception
I'm trying to show a simple window in SWT. I want to divide the window into two equal parts, in the left area the user will provide his input, and the right area will show the result of the output.
Specifically, the user will type in some characters in the left area (in a textbox), these will be reflected in a label below the textbox. When the user clicks a 'Process' button, a screenshot of just the label control will be taken using Awt.Robot.createScreenCapture. I want to display this screenshot in the right area.
Later on, I want to use optical character recognition to read the label and display to the user what he typed in the textbox. But for now, I'm simply having trouble getting this layout to show up correctly. If I press process, then the screenshot image is displayed in the right column, but if I resize or maximize the window, it disappears.
Here's the code of the relevant classes. First, this is the class which sets up the controls on a composite, and returns the composite using getControl(). The composite it returns is then inserted as a TabItem in a TabFolder:
public class ReadChars
{
//This is the root container to which all the other controls are added. This container is inserted
//in a TabFolder as a TabItem:
private Composite container;
//Left side input column:
private Group input;
//Right side output column:
private Group output;
private String fontName;
private int fontSize;
private int fontStyle;
private Font font;
private Text sourceChars;
private Label sourceCharsLbl;
private Button processBtn;
//Code for this is provided later:
private ImgCanvas sourceCharsImg;
public ReadChars(TabFolder parent)
{
fontName = "Courier New";
fontSize = 12;
fontStyle = SWT.NORMAL;
initCompontents(parent);
}
private void initCompontents(TabFolder parent)
{
container = new Composite(parent, SWT.NONE);
GridLayout layout = new GridLayout();
layout.numColumns = 2;
layout.makeColumnsEqualWidth = true;
container.setLayout(layout);
font = new Font(Display.getDefault(), fontName, fontSize, fontStyle);
container.setFont(font);
input = new Group(container, SWT.NONE);
input.setLayoutData(getGridData());
output = new Group(container, SWT.NONE);
output.setLayoutData(getGridData());
initInputGroup();
initOutputGroup();
}
private void initInputGroup()
{
GridLayout layout = new GridLayout();
layout.numColumns = 1;
layout.makeColumnsEqualWidth = true;
input.setLayout(layout);
getHeadingLabel(input).setText("Options");
new Label(input, SWT.NONE).setText("Type the characters to parse below (0-9 and .):");
String defaults = "012345689.";
sourceChars = getTextbox(input);
sourceChars.setText(defaults);
sourceCharsLbl = new Label(input, SWT.NONE);
sourceCharsLbl.setLayoutData( getGridData() );
sourceCharsLbl.setFont(font);
sourceCharsLbl.setBackground( new Color(Display.getDefault(), 224, 224,224) );
sourceCharsLbl.setText(defaults);
sourceChars.addKeyListener(new KeyAdapter()
{
public void keyReleased(KeyEvent e)
{
System.out.println("here: " + sourceChars.getText());
sourceCharsLbl.setText( sourceChars.getText() );
System.out.println("Text now: " + sourceCharsLbl.getText());
}
});
processBtn = new Button(input, SWT.NONE);
processBtn.setText("Process");
processBtn.addSelectionListener( getProcessHandler() );
}
private void initOutputGroup()
{
//output.setVisible(false);
GridLayout layout = new GridLayout();
layout.numColumns = 1;
layout.makeColumnsEqualWidth = true;
output.setLayout(layout);
getHeadingLabel(output).setText("Output");
new Label(output, SWT.NONE).setText("Source Image: ");
sourceCharsImg = new ImgCanvas(output );
}
protected Label getHeadingLabel(Group parent)
{
Font font = new Font(Display.getDefault(), fontName, 16, SWT.BOLD);
Label result = new Label(parent, SWT.NONE);
result.setFont(font);
return result;
}
protected Text getTextbox(Group parent)
{
Text box = new Text(parent, SWT.BORDER);
GridData data = getGridData();
data.widthHint = 200;
box.setLayoutData(data);
return box;
}
protected GridData getGridData()
{
GridData data = new GridData();
data.horizontalAlignment = SWT.FILL;
data.grabExcessHorizontalSpace = true;
return data;
}
protected void updateSourceImg()
{
Image screenshot = ImgUtility.getScreenShot(sourceCharsLbl);
GridData gd = new GridData();
gd.widthHint = screenshot.getBounds().width;
gd.heightHint = screenshot.getBounds().height;
sourceCharsImg.setImage(screenshot);
sourceCharsImg.getCanvas().setLayoutData(gd);
sourceCharsImg.redraw();
}
protected SelectionAdapter getProcessHandler()
{
return new SelectionAdapter()
{
public void widgetSelected(SelectionEvent e)
{
updateSourceImg();
}
};
}
public Composite getControl()
{
return container;
}
}
Here's the code to the ImgCanvas class referenced above:
public class ImgCanvas
{
private Composite container;
private Canvas canvas;
private Image img;
private Object layoutData;
public ImgCanvas(Composite parent)
{
container = new Composite(parent, SWT.NONE);
container.setLayout( new FillLayout() );
}
public ImgCanvas(Composite parent, Image img)
{
container = new Composite(parent, SWT.NONE);
container.setLayout( new FillLayout() );
setImage(img);
}
public void setCanvas(Canvas canvas)
{
if (this.canvas != null)
{
System.out.println("Calling dispose");
this.canvas.dispose();
}
else
System.out.println("Canvas is null");
this.canvas = canvas;
initCanvas();
}
public void setCanvas()
{
Canvas canvas = new Canvas(container, SWT.NONE);
if (layoutData != null)
canvas.setLayoutData(layoutData);
setCanvas(canvas);
}
public void setImage(Image img)
{
setCanvas();
this.img = img; //keep this below setCanvas() to avoid being disposed.
Composite parent = container.getParent();
parent.setSize(parent.getBounds().width + img.getBounds().width,
parent.getBounds().height + img.getBounds().height);
container.setSize(img.getBounds().width, img.getBounds().height);
canvas.setSize(img.getBounds().width, img.getBounds().height);
System.out.println("Set image: " + img.getBounds() + ", " + img.toString());
redraw();
}
public Canvas getCanvas()
{
return canvas;
}
public Composite getContainer()
{
return container;
}
public Image getImage()
{
return img;
}
public void redraw()
{
System.out.println("redrawing");
canvas.redraw();
}
public void setLayoutData( Object data )
{
container.setLayoutData(data);
canvas.setLayoutData(data);
this.layoutData = data;
}
protected void initCanvas()
{
System.out.println("Canvas started");
canvas.addPaintListener( getPaintListener() );
canvas.addDisposeListener(getDisposeListener());
}
protected PaintListener getPaintListener()
{
return new PaintListener()
{
public void paintControl(PaintEvent e)
{
System.out.println("Painting");
if (img != null )
{
System.out.println("Img:" + img.getBounds() );
e.gc.drawImage(img, 0, 0);
System.out.println("Canvas: " + canvas.getBounds() );
//canvas.setSize(img.getBounds().width, img.getBounds().width);
//canvas.pack();
}
else
System.out.println("Img is null: " + img);
}
};
}
protected DisposeListener getDisposeListener()
{
return new DisposeListener()
{
#Override
public void widgetDisposed(DisposeEvent e)
{
System.out.println("Disposing");
if (img != null)
{
Composite parent = container.getParent();
parent.setSize(parent.getBounds().width - img.getBounds().width,
parent.getBounds().height - img.getBounds().height);
img.dispose();
}
}
};
}
}
Here's the code for ImgUtility:
public class ImgUtility
{
private static Robot bot;
private static Display display;
public static BufferedImage convertToAWT(ImageData data)
{
ColorModel colorModel = null;
PaletteData palette = data.palette;
if (palette.isDirect)
{
colorModel = new DirectColorModel(data.depth, palette.redMask, palette.greenMask, palette.blueMask);
BufferedImage bufferedImage = new BufferedImage(colorModel, colorModel.createCompatibleWritableRaster(data.width, data.height), false, null);
for (int y = 0; y < data.height; y++)
{
for (int x = 0; x < data.width; x++)
{
int pixel = data.getPixel(x, y);
RGB rgb = palette.getRGB(pixel);
bufferedImage.setRGB(x, y, rgb.red << 16 | rgb.green << 8 | rgb.blue);
}
}
return bufferedImage;
}
else
{
RGB[] rgbs = palette.getRGBs();
byte[] red = new byte[rgbs.length];
byte[] green = new byte[rgbs.length];
byte[] blue = new byte[rgbs.length];
for (int i = 0; i < rgbs.length; i++) {
RGB rgb = rgbs[i];
red[i] = (byte)rgb.red;
green[i] = (byte)rgb.green;
blue[i] = (byte)rgb.blue;
}
if (data.transparentPixel != -1) {
colorModel = new IndexColorModel(data.depth, rgbs.length, red, green, blue, data.transparentPixel);
}
else
{
colorModel = new IndexColorModel(data.depth, rgbs.length, red, green, blue);
}
BufferedImage bufferedImage = new BufferedImage(colorModel, colorModel.createCompatibleWritableRaster(data.width, data.height), false, null);
WritableRaster raster = bufferedImage.getRaster();
int[] pixelArray = new int[1];
for (int y = 0; y < data.height; y++)
{
for (int x = 0; x < data.width; x++)
{
int pixel = data.getPixel(x, y);
pixelArray[0] = pixel;
raster.setPixel(x, y, pixelArray);
}
}
return bufferedImage;
}
}
public static ImageData convertToSWT(BufferedImage bufferedImage)
{
if (bufferedImage.getColorModel() instanceof DirectColorModel) {
DirectColorModel colorModel = (DirectColorModel)bufferedImage.getColorModel();
PaletteData palette = new PaletteData(colorModel.getRedMask(), colorModel.getGreenMask(), colorModel.getBlueMask());
ImageData data = new ImageData(bufferedImage.getWidth(), bufferedImage.getHeight(), colorModel.getPixelSize(), palette);
for (int y = 0; y < data.height; y++)
{
for (int x = 0; x < data.width; x++) {
int rgb = bufferedImage.getRGB(x, y);
int pixel = palette.getPixel(new RGB((rgb >> 16) & 0xFF, (rgb >> 8) & 0xFF, rgb & 0xFF));
data.setPixel(x, y, pixel);
if (colorModel.hasAlpha()) {
data.setAlpha(x, y, (rgb >> 24) & 0xFF);
}
}
}
return data;
}
else if (bufferedImage.getColorModel() instanceof IndexColorModel)
{
IndexColorModel colorModel = (IndexColorModel)bufferedImage.getColorModel();
int size = colorModel.getMapSize();
byte[] reds = new byte[size];
byte[] greens = new byte[size];
byte[] blues = new byte[size];
colorModel.getReds(reds);
colorModel.getGreens(greens);
colorModel.getBlues(blues);
RGB[] rgbs = new RGB[size];
for (int i = 0; i < rgbs.length; i++)
{
rgbs[i] = new RGB(reds[i] & 0xFF, greens[i] & 0xFF, blues[i] & 0xFF);
}
PaletteData palette = new PaletteData(rgbs);
ImageData data = new ImageData(bufferedImage.getWidth(), bufferedImage.getHeight(), colorModel.getPixelSize(), palette);
data.transparentPixel = colorModel.getTransparentPixel();
WritableRaster raster = bufferedImage.getRaster();
int[] pixelArray = new int[1];
for (int y = 0; y < data.height; y++)
{
for (int x = 0; x < data.width; x++)
{
raster.getPixel(x, y, pixelArray);
data.setPixel(x, y, pixelArray[0]);
}
}
return data;
}
return null;
}
public static Image getImage(ImageData data)
{
return new Image(display, data);
}
public static void setDisplay(Display newDisplay)
{
display = newDisplay;
}
public static BufferedImage getRawScreenShot(int x, int y, int width, int height)
{
java.awt.Rectangle region = new java.awt.Rectangle(x, y, width, height);
BufferedImage bim = getRobot().createScreenCapture(region);
return bim;
}
public static BufferedImage getRawScreenShot(Control ctrl)
{
Point loc = ctrl.getLocation();
loc = ctrl.toDisplay(1, 1);
//ctrl.toDisplay(1, y)
Point size = ctrl.getSize();
return getRawScreenShot(loc.x, loc.y, size.x, size.y);
}
public static Image getScreenShot(int x, int y, int width, int height)
{
BufferedImage bim = getRawScreenShot(x, y, width, height);
return getImage( convertToSWT(bim) );
}
public static Image getScreenShot(Control ctrl)
{
BufferedImage bim = getRawScreenShot(ctrl);
return getImage( convertToSWT(bim) );
}
public static Robot getRobot()
{
if (bot == null)
{
try
{
bot = new java.awt.Robot();
}
catch (Exception e)
{
e.printStackTrace();
}
}
return bot;
}
}
*Edit: * Here is the main class:
public class Main
{
public static void main(String[] args)
{
ImgUtility.setDisplay(Display.getDefault());
WinMgr main = WinMgr.getMain();
Shell win = main.getShell();
win.setLayout( new FillLayout() );
initComponents(win);
WinMgr.init(win);
}
private static void initComponents(Shell win)
{
win.setText("Optical Recognition Libraries");
Rectangle area = win.getClientArea();
TabFolder tabs = new TabFolder(win, SWT.FILL);
tabs.setLocation(area.x, area.y);
new TabItem(tabs, SWT.NONE).setText("Read Characters");
tabs.getItem(0).setControl( new ReadChars(tabs).getControl() );
}
}
And here is the WinMgr class:
public class WinMgr
{
private Shell shell;
private static WinMgr mainWinMgr;
public WinMgr()
{
shell = new Shell();
initShell();
}
public WinMgr(int style)
{
shell = new Shell(style);
initShell();
}
public WinMgr(Display parent, int style)
{
shell = new Shell(parent, style);
this.initShell();
}
public WinMgr(Display parent)
{
shell = new Shell(parent);
this.initShell();
}
public WinMgr(Shell parent, int style)
{
shell = new Shell(parent, style);
this.initShell();
}
public Shell getShell()
{
return shell;
}
public void setShell(Shell newShell)
{
shell = newShell;
}
public void center()
{
Monitor primary = Display.getDefault().getPrimaryMonitor();
}
protected void initShell()
{
shell.addListener(SWT.Close, this.getOnClose() );
}
protected Listener getOnClose()
{
return new Listener()
{
public void handleEvent(Event event)
{
System.out.println("closing");
shell.dispose();
}
};
}
public static void init(Shell mainWin, boolean open)
{
Display display = Display.getDefault();
if (open)
mainWin.open();
while (! mainWin.isDisposed())
{
try
{
if (! display.readAndDispatch())
display.sleep();
}
catch (Exception e)
{
e.printStackTrace();
}
}
display.dispose();
}
public static void init(Shell mainWin)
{
init(mainWin, true);
}
public static WinMgr getMain()
{
if (mainWinMgr == null)
mainWinMgr = new WinMgr( Display.getDefault() );
return mainWinMgr;
}
public static Shell getMainShell()
{
return mainWinMgr.getShell();
}
}
Any help on what I'm doing wrong or how to improve this would be appreciated.
Ok, here you go, just replace your ImgCanvas class with this:
private Composite container;
private Canvas canvas;
private Image img;
public ImgCanvas(Composite parent)
{
container = new Composite(parent, SWT.NONE);
container.setLayout(new GridLayout(1, false));
container.setLayoutData(new GridData(SWT.FILL, SWT.FILL, true, true));
canvas = new Canvas(container, SWT.BORDER);
canvas.setLayoutData(new GridData(SWT.FILL, SWT.FILL, true, true));
initCanvas();
}
public ImgCanvas(Composite parent, Image img)
{
this(parent);
setImage(img);
}
public void setImage(Image img)
{
this.img = img; // keep this below setCanvas() to avoid being disposed.
System.out.println("Set image: " + img.getBounds() + ", " + img.toString());
redraw();
}
public void redraw()
{
System.out.println("redrawing");
canvas.redraw();
}
protected void initCanvas()
{
System.out.println("Canvas started");
canvas.addPaintListener(new PaintListener()
{
public void paintControl(PaintEvent e)
{
System.out.println("Painting");
if (img != null)
{
System.out.println("Img:" + img.getBounds());
e.gc.drawImage(img, 0, 0);
System.out.println("Canvas: " + canvas.getBounds());
}
else
System.out.println("Img is null: " + img);
}
});
canvas.addDisposeListener(new DisposeListener()
{
#Override
public void widgetDisposed(DisposeEvent e)
{
System.out.println("Disposing");
if (img != null)
{
img.dispose();
}
}
});
}
Not sure why you created a new Canvas each time you change the image... You can just reuse the old one.
Note: Never use .setSize(int, int) unless absolutely necessary, that's what Layouts are for. Read this:
Understanding Layouts in SWT