This is the entire source code for the java file.
package gephifyer;
import java.awt.Color;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.gephi.data.attributes.api.AttributeColumn;
import org.gephi.data.attributes.api.AttributeController;
import org.gephi.data.attributes.api.AttributeModel;
import org.gephi.graph.api.DirectedGraph;
import org.gephi.graph.api.GraphController;
import org.gephi.graph.api.GraphModel;
import org.gephi.io.exporter.api.ExportController;
import org.gephi.io.importer.api.Container;
import org.gephi.io.importer.api.EdgeDefault;
import org.gephi.io.importer.api.ImportController;
import org.gephi.io.importer.spi.FileImporter;
import org.gephi.io.processor.plugin.DefaultProcessor;
import org.gephi.partition.api.Partition;
import org.gephi.partition.api.PartitionController;
import org.gephi.partition.plugin.NodeColorTransformer;
import org.gephi.preview.api.PreviewController;
import org.gephi.preview.api.PreviewModel;
import org.gephi.preview.api.PreviewProperty;
import org.gephi.preview.types.DependantOriginalColor;
import org.gephi.project.api.ProjectController;
import org.gephi.project.api.Workspace;
import org.gephi.ranking.api.Ranking;
import org.gephi.ranking.api.RankingController;
import org.gephi.ranking.plugin.transformer.AbstractSizeTransformer;
import org.gephi.statistics.plugin.Modularity;
import org.openide.util.Lookup;
import org.gephi.layout.plugin.force.StepDisplacement;
import org.gephi.layout.plugin.force.yifanHu.YifanHu;
import org.gephi.layout.plugin.force.yifanHu.YifanHuLayout;
import org.gephi.layout.plugin.openord.*;
public class Gephifyer {
public void doStuff(String[] args)
{
String filename = new String();
try{
filename = args[0];
} catch (ArrayIndexOutOfBoundsException ex) {
System.out.println("Supply the subreddit name as the argument.");
System.exit(0);
}
ProjectController pc = Lookup.getDefault().lookup(ProjectController.class);
pc.newProject();
Workspace workspace = pc.getCurrentWorkspace();
ImportController importController = Lookup.getDefault().lookup(ImportController.class);
Container container;
try{
File file = new File(filename + ".csv");
//File file = new File(getClass().getResource("askscience.csv").toURI());
container = importController.importFile(file);
container.getLoader().setEdgeDefault(EdgeDefault.DIRECTED);
container.setAllowAutoNode(false); // don't create missing nodes
} catch (Exception ex) {
ex.printStackTrace();
return;
}
// Append imported data to graph api
importController.process(container, new DefaultProcessor(), workspace);
GraphModel graphModel = Lookup.getDefault().lookup(GraphController.class).getModel();
DirectedGraph directedGraph = graphModel.getDirectedGraph();
// Now let's manipulate the graph api, which stores / serves graphs
System.out.println("Nodes: " + directedGraph.getNodeCount() + "\nEdges: " + directedGraph.getEdgeCount());
//Run OpenOrd.
//OpenOrdLayout layout = new OpenOrdLayout(null);
YifanHuLayout layout = new YifanHuLayout(null, new StepDisplacement(0.95f));
layout.setGraphModel(graphModel);
layout.resetPropertiesValues();
layout.initAlgo();
layout.goAlgo();
while (layout.canAlgo()) // This is only possible because OpenOrd has a finite number of iterations.
{
layout.goAlgo();
}
AttributeModel attributemodel = Lookup.getDefault().lookup(AttributeController.class).getModel();
// Get modularity for coloring
Modularity modularity = new Modularity();
modularity.setUseWeight(true);
modularity.setRandom(true);
modularity.setResolution(1.0);
modularity.execute(graphModel, attributemodel);
// Partition with modularity
AttributeColumn modcol = attributemodel.getNodeTable().getColumn(Modularity.MODULARITY_CLASS);
PartitionController partitionController = Lookup.getDefault().lookup(PartitionController.class);
Partition p = partitionController.buildPartition(modcol, directedGraph);
NodeColorTransformer nodeColorTransformer = new NodeColorTransformer();
nodeColorTransformer.randomizeColors(p);
partitionController.transform(p, nodeColorTransformer);
// Ranking
RankingController rankingController = Lookup.getDefault().lookup(RankingController.class);
Ranking degreeRanking = rankingController.getModel().getRanking(Ranking.NODE_ELEMENT, Ranking.INDEGREE_RANKING);
AbstractSizeTransformer sizeTransformer = (AbstractSizeTransformer) rankingController.getModel().getTransformer(Ranking.NODE_ELEMENT, org.gephi.ranking.api.Transformer.RENDERABLE_SIZE);
sizeTransformer.setMinSize(5.0f);
sizeTransformer.setMaxSize(40.0f);
rankingController.transform(degreeRanking,sizeTransformer);
// Finally, the preview model
PreviewController previewController = Lookup.getDefault().lookup(PreviewController.class);
PreviewModel previewModel = previewController.getModel();
previewModel.getProperties().putValue(PreviewProperty.SHOW_NODE_LABELS, Boolean.TRUE);
previewModel.getProperties().putValue(PreviewProperty.NODE_LABEL_COLOR, new DependantOriginalColor(Color.BLACK));
previewModel.getProperties().putValue(PreviewProperty.NODE_LABEL_FONT, previewModel.getProperties().getFontValue(PreviewProperty.NODE_LABEL_FONT).deriveFont(8));
previewModel.getProperties().putValue(PreviewProperty.EDGE_CURVED, Boolean.FALSE);
previewModel.getProperties().putValue(PreviewProperty.EDGE_OPACITY, 50);
previewModel.getProperties().putValue(PreviewProperty.EDGE_RADIUS, 10f);
previewModel.getProperties().putValue(PreviewProperty.BACKGROUND_COLOR, Color.TRANSLUCENT);
previewController.refreshPreview();
System.out.println("starting export");
ExportController ec = Lookup.getDefault().lookup(ExportController.class);
try{
ec.exportFile(new File(filename + ".svg"));
}
catch (IOException ex){
ex.printStackTrace();
return;
}
System.out.println("Done.");
}
public static void main(String[] args)
{
Gephifyer g = new Gephifyer();
g.doStuff(args);
}
}
At its heart, it's the various demos' code cobbled together to do what I want it to do.
I expect a graph that looks like this svg file, but the result is this svg file. That is, the problem is that the above code yields a graph where the arrows aren't fully connected to the nodes, making it look a bit messy. I can't for my life tell where in the code that is happening, though I guess it would be in the preview model part.
previewModel.getProperties().putValue(PreviewProperty.EDGE_RADIUS, 10f); sets the distance of the arrows from the node.
Related
I used Eclipse Luna 64bit, Maven, docx4j API for PDF conversion, template letter format on which I want my HTML code. This template is saved in my database.
I want to include a hyperlink in the PDF, so my users can on click this link and open it in their browser.
This is my main class:
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.TreeMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.faces.bean.ManagedBean;
import javax.faces.bean.ManagedProperty;
import javax.faces.bean.ViewScoped;
import javax.faces.model.SelectItem;
import org.apache.commons.lang.StringUtils;
import org.docx4j.Docx4J;
import org.docx4j.XmlUtils;
import org.docx4j.convert.in.xhtml.XHTMLImporterImpl;
import org.docx4j.jaxb.Context;
import org.docx4j.openpackaging.exceptions.Docx4JException;
import org.docx4j.openpackaging.packages.WordprocessingMLPackage;
import org.docx4j.openpackaging.parts.relationships.Namespaces;
import org.docx4j.openpackaging.parts.WordprocessingML.MainDocumentPart;
import org.docx4j.wml.Body;
import org.docx4j.wml.BooleanDefaultTrue;
import org.docx4j.wml.Document;
import org.docx4j.wml.P;
import org.docx4j.wml.PPrBase;
import org.docx4j.wml.R;
import org.docx4j.wml.Text;
import org.primefaces.context.RequestContext;
import org.primefaces.model.DefaultStreamedContent;
import org.primefaces.model.StreamedContent;
import org.primefaces.model.UploadedFile;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class LetterMaintenanceBean extends BaseManagedBean implements
Serializable {
public StreamedContent previewLetter() {
String content = this.letter.getHtmlContent();
String regex = "<a href=(\"[^\"]*\")[^<]*</a>"; //Digvijay
Pattern p = Pattern.compile(regex); //Digvijay
System.out.println("p: "+p);
Matcher m = p.matcher(content); //Digvijay
System.out.println("m: "+m);
content = m.replaceAll("<strong><u><span style=\"color:#0099cc\">$1</span></u></strong>"); //Digvijay
System.out.println("regex1: "+regex); //Digvijay
Map<String, String> previewExamples = this.getPreviewExamples(this.letter.getMessageTypeCode());
for (Entry<String, String> example : previewExamples.entrySet()) {
if (StringUtils.isNotBlank(example.getKey()) && StringUtils.isNotBlank(example.getValue())) {
content = content.replace(example.getKey(), example.getValue());
System.out.println("content after map date");
}
}
System.out.println("content1:: "+content);
if (!content.startsWith("<div>")) {
content = "<div>" + content + "</div>";
}
// Docx4j does not understand HTML codes for special characters. So replacing with Unicode values.
content = content.replace(" ", " ");
content = content.replace("’", "’");
content = content.replaceAll("</p>", "</p><br/>");
content = content.replaceAll("\"</span>", "</span>");
InputStream stream = null;
try {
System.out.println("content:"+content);
if (this.letter.getHtmlContent().getBytes() != null && this.letter.getWfTemplateId() != null) {
stream = new ByteArrayInputStream(this.HTMLToPDF(content.getBytes(), this.letter.getWfTemplateId()));
} else {
stream = new ByteArrayInputStream(this.HTMLToPDFWithoutTemplate(content.getBytes()));
}
StreamedContent file = new DefaultStreamedContent(stream, "application/pdf", this.letter.getLetterName() + ".pdf");
return file;
} catch (LetterMaintenanceException e) {
this.processServiceException(e);
StreamedContent file = new DefaultStreamedContent(
new ByteArrayInputStream(
"Unable to process your request. If the problem persists, please contact application support."
.getBytes()), "application/pdf", "error" + ".pdf");
return file;
} catch (Exception e) {
this.processGenericException(e);
StreamedContent file = new DefaultStreamedContent(
new ByteArrayInputStream(
"Unable to process your request. If the problem persists, please contact application support."
.getBytes()), "application/pdf", "error" + ".pdf");
return file;
}
}
This is my HTMLToPDF() method:
private byte[] HTMLToPDF(final byte[] htmlContent, final String templateId)
throws Docx4JException, LetterMaintenanceException {
LetterMaintenanceDelegate letterMaintenanceDelegate = new LetterMaintenanceDelegate();
Template template = letterMaintenanceDelegate.retrieveTemplateById(templateId);
if (template == null || template.getContent() == null) {
throw new LetterMaintenanceException("Could not retrieve template");
}
InputStream is = new ByteArrayInputStream(template.getContent());
WordprocessingMLPackage templatePackage = WordprocessingMLPackage.load(is);
// Convert HTML to docx
XHTMLImporterImpl XHTMLImporter = new XHTMLImporterImpl(templatePackage);
XHTMLImporter.setHyperlinkStyle("Hyperlink");
templatePackage
.getMainDocumentPart()
.getContent()
.addAll(XHTMLImporter.convert(new ByteArrayInputStream(htmlContent), null));
// Add content of content docx to template
templatePackage.getMainDocumentPart().getContent().addAll(templatePackage.getMainDocumentPart().getContent());
// Handle page breaks
templatePackage = this.handlePagebreaksInDocx(templatePackage);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
Docx4J.toPDF(templatePackage, baos);
return baos.toByteArray();
}
}
In this code I am trying to convert HTML (with href tag) to PDF file and in the PDF output the hyperlink must work.
The current output of this program is a PDF but there are no working links in it.
How can I activate my links?
I am trying to use GeoTools in order to load a shapefile into java and then check whether a point is located within one of the polygons in the shape file
The problem is that i am not able to load the shapefile and therefore to continue forward.
Here is my code so far:
public static void main(String[] args){
// create sample coordinate
double lon = -105.0;
double lat = 40.0;
GeometryFactory geometryFactory = new GeometryFactory(new PrecisionModel(PrecisionModel.maximumPreciseValue),8307);
Geometry point = geometryFactory.createPoint(new Coordinate(lon,lat));
//
String path = System.getProperty("user.dir") + "/continent_shp/continent_shp.shp";
File file = new File(path);
try {
Map<String, Serializable> connectParameters = new HashMap<String, Serializable>();
// load shapefile ---- does not work !!!!!!!!
connectParameters.put("url", file.toURI().toURL());
connectParameters.put("create spatial index", true);
DataStore dataStore = DataStoreFinder.getDataStore(connectParameters);
//
FeatureSource featureSource = dataStore.getFeatureSource("POLYGON");
FeatureCollection collection = (FeatureCollection) featureSource.getFeatures();
FeatureIterator iterator = collection.features();
while (iterator.hasNext()) {
Feature feature = iterator.next();
Geometry sourceGeometry = feature.getDefaultGeometry();
boolean isContained = sourceGeometry.contains(point);
System.out.println(isContained);
}
}
catch (MalformedURLException e) {e.printStackTrace();}
catch (IOException e) {e.printStackTrace();}
}
The problem is that the dataStore variable is null after I try to load the shapefile.
Here are my imports:
import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.net.MalformedURLException;
import java.util.HashMap;
import java.util.Map;
import org.geotools.data.DataStore;
import org.geotools.data.DataStoreFinder;
import org.geotools.data.FeatureSource;
import org.geotools.feature.Feature;
import org.geotools.feature.FeatureCollection;
import org.geotools.feature.FeatureIterator;
import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.GeometryFactory;
import com.vividsolutions.jts.geom.PrecisionModel;
Can anyone shed some light on this issue?
Any help would be appreciated.
Thank you.
The most likely problem is that you don't have a Shapefile Datastore implementation available on your path. Try the following method to check what stores are available:
public Map<String, DataStoreFactorySpi> fetchAvailableDataStores() {
Iterator<DataStoreFactorySpi> it = DataStoreFinder.getAllDataStores();
while (it.hasNext()) {
DataStoreFactorySpi fac = it.next();
System.out.println(fac.getDisplayName());
}
}
Another thing that can go wrong is the File to URL conversion, especially if there are spaces in the filename or path. Try using DataUtilities.fileToURL(file) instead.
This worked for me:
// load shapefile ---- does not work !!!!!!!!
connectParameters.put("url", file.toURI().toURL());
connectParameters.put("create spatial index", Boolean.TRUE);
ShapefileDataStoreFactory dataStoreFactory = new ShapefileDataStoreFactory();
ShapefileDataStore store = (ShapefileDataStore) dataStoreFactory.createNewDataStore(connectParameters);
//
So I'm trying to use jsoup to scrape Reddit for images, but when I scrape certain subreddits such as /r/wallpaper, I get a 429 error and am wondering how to fix this. Totally understand that this code is horrible and this is a pretty noob question, but I'm completely new to this. Anyways:
import java.io.IOException;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.io.*;
import java.net.URL;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.io.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Attributes;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import java.io.IOException;
import java.net.URL;
import java.util.Scanner;
public class javascraper{
public static void main (String[]args) throws MalformedURLException
{
Scanner scan = new Scanner (System.in);
System.out.println("Where do you want to store the files?");
String folderpath = scan.next();
System.out.println("What subreddit do you want to scrape?");
String subreddit = scan.next();
subreddit = ("http://reddit.com/r/" + subreddit);
new File(folderpath + "/" + subreddit).mkdir();
//test
try{
//gets http protocol
Document doc = Jsoup.connect(subreddit).timeout(0).get();
//get page title
String title = doc.title();
System.out.println("title : " + title);
//get all links
Elements links = doc.select("a[href]");
for(Element link : links){
//get value from href attribute
String checkLink = link.attr("href");
Elements images = doc.select("img[src~=(?i)\\.(png|jpe?g|gif)]");
if (imgCheck(checkLink)){ // checks to see if img link j
System.out.println("link : " + link.attr("href"));
downloadImages(checkLink, folderpath);
}
}
}
catch (IOException e){
e.printStackTrace();
}
}
public static boolean imgCheck(String http){
String png = ".png";
String jpg = ".jpg";
String jpeg = "jpeg"; // no period so checker will only check last four characaters
String gif = ".gif";
int length = http.length();
if (http.contains(png)|| http.contains("gfycat") || http.contains(jpg)|| http.contains(jpeg) || http.contains(gif)){
return true;
}
else{
return false;
}
}
private static void downloadImages(String src, String folderpath) throws IOException{
String folder = null;
//Exctract the name of the image from the src attribute
int indexname = src.lastIndexOf("/");
if (indexname == src.length()) {
src = src.substring(1, indexname);
}
indexname = src.lastIndexOf("/");
String name = src.substring(indexname, src.length());
System.out.println(name);
//Open a URL Stream
URL url = new URL(src);
InputStream in = url.openStream();
OutputStream out = new BufferedOutputStream(new FileOutputStream( folderpath+ name));
for (int b; (b = in.read()) != -1;) {
out.write(b);
}
out.close();
in.close();
}
}
Your issue is caused by the fact that your scraper is violating reddit's API rules. Error 429 means "Too many requests" – you're requesting too many pages too fast.
You can make one request every 2 seconds, and you also need to set a proper user agent (they format they recommend is <platform>:<app ID>:<version string> (by /u/<reddit username>)). The way it currently looks, your code is running too fast and doesn't specify one, so it's going to be severely rate-limited.
To fix it, first off, add this to the start of your class, before the main method:
public static final String USER_AGENT = "<PUT YOUR USER AGENT HERE>";
(Make sure to specify an actual user agent).
Then, change this (in downloadImages)
URL url = new URL(src);
InputStream in = url.openStream();
to this:
URLConnection connection = (new URL(src)).openConnection();
Thread.sleep(2000); //Delay to comply with rate limiting
connection.setRequestProperty("User-Agent", USER_AGENT);
InputStream in = connection.getInputStream();
You'll also want to change this (in main)
Document doc = Jsoup.connect(subreddit).timeout(0).get();
to this:
Document doc = Jsoup.connect(subreddit).userAgent(USER_AGENT).timeout(0).get();
Then your code should stop running into that error.
Note that using reddit's API (IE, /r/subreddit.json instead of /r/subreddit) would probably make this project easier, but it isn't required and your current code will work.
As you can look up at Wikipedia the 429 status code tells you that you have too many requests:
The user has sent too many requests in a given amount of time. Intended for use with rate limiting schemes.
A solution would be to slow down your scraper. There are some options how to do this, one would be to use sleep.
I'm trying to extract establishments data from GooglePlaces API. It used to work initially, but after making a specific method for extracting the places (instead of in the main method), the program crushes. When debbuged it, it gets stuck in a java method called park (under LockSupport class from java). Reading about it, it says that this happens when there is more than 1 thread and there are sync problems. I'm very new at this and I don't know how to solve this in my code. In my mind, there is only 1 thread in this code, but I'm pretty sure I'm wrong. Please help. It crashes in a "for" commented below. Thanks so much!
package laundry;
import java.util.ArrayList;
import java.util.List;
import se.walkercrou.places.GooglePlaces;
import se.walkercrou.places.Param;
import se.walkercrou.places.Place;
import java.io.FileWriter; //add to import list
import java.io.IOException;
import java.io.Writer;
import static java.lang.Math.sqrt;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.microedition.location.Coordinates;
import se.walkercrou.places.exception.GooglePlacesException;
import se.walkercrou.places.exception.RequestDeniedException;
public class Laundry {
public static void main(String[] args) throws IOException {
List<Place> detailedPlaces = new ArrayList<>();
List<double[]> circlesPoint = new ArrayList<>();
double radio = 10;
Coordinates startingPoint = new Coordinates (-38.182476,144.552079,0);//geelong south west corner of the grid
Coordinates finalPoint = new Coordinates(-37.574381,145.415879,0); //north east of melbourne
GooglePlaces cliente = new GooglePlaces("keyof googlplaces");
MyResult result1=exploreGrid(startingPoint,finalPoint, radio, detailedPlaces, circlesPoint,cliente);
writeResultsCircles(result1.getPoints(),"c:\\outputCircles.txt" );
writeResultsPlaces(result1.getPlaces(), "c:\\outputPlaces.txt");
}
private static MyResult exploreGrid(Coordinates SWpoint,Coordinates NEpoint,double rad, List<Place> lugares, List<double[]> points,GooglePlaces client){
int iterationRow=0;
Coordinates workingPoint = new Coordinates(SWpoint.getLatitude(),SWpoint.getLongitude(),(float) 0.0);
List<Place> places = new ArrayList<>();
while (workingPoint.getLatitude()<NEpoint.getLatitude()){
while (workingPoint.getLongitude()<NEpoint.getLongitude()){
try {
places = client.getNearbyPlaces(workingPoint.getLatitude(), workingPoint.getLongitude(), rad*1000,GooglePlaces.MAXIMUM_RESULTS ,Param.name("types").value("laundry"));
if (places.size()==60){//si se llega al tope de resultados de getNearbyPlaces
iterationRow=1;
}
for (Place place : places) {
lugares.add(place.getDetails());//here is where it crashes
}
}catch (GooglePlacesException ex) {
System.out.println(ex.getCause());
}
double[] prePoint = {workingPoint.getLatitude(),workingPoint.getLongitude(),rad};
points.add(prePoint);
workingPoint.setLongitude(workingPoint.getLongitude()+rad*sqrt(3)*0.01134787);
}
iterationRow++;
if (isEven(iterationRow)){
workingPoint.setLongitude(SWpoint.getLongitude());
} else {
workingPoint.setLongitude(SWpoint.getLongitude()+rad*sqrt(3)*0.01134787/2);
}
workingPoint.setLatitude(workingPoint.getLatitude()+rad*3/2*0.00899416);
}
return new MyResult(lugares,points);
}
}
I have images of codes that I want to decode. How can I use zxing so that I specify the image location and get the decoded text back, and in case the decoding fails (it will for some images, that's the project), it gives me an error.
How can I setup zxing on my Windows machine? I downloaded the jar file, but I don't know where to start. I understand I'll have to create a code to read the image and supply it to the library reader method, but a guide how to do that would be very helpful.
I was able to do it. Downloaded the source and added the following code. Bit rustic, but gets the work done.
import com.google.zxing.NotFoundException;
import com.google.zxing.ChecksumException;
import com.google.zxing.FormatException;
import com.google.zxing.BarcodeFormat;
import com.google.zxing.DecodeHintType;
import com.google.zxing.Reader;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.Result;
import com.google.zxing.LuminanceSource;
import com.google.zxing.client.j2se.BufferedImageLuminanceSource;
import com.google.zxing.common.HybridBinarizer;
import java.awt.image.BufferedImage;
import javax.imageio.ImageIO;
import java.io.File;
import java.io.IOException;
import java.util.*;
import com.google.zxing.qrcode.QRCodeReader;
class qr
{
public static void main(String args[])
{
Reader xReader = new QRCodeReader();
BufferedImage dest = null;
try
{
dest = ImageIO.read(new File(args[0]));
}
catch(IOException e)
{
System.out.println("Cannot load input image");
}
LuminanceSource source = new BufferedImageLuminanceSource(dest);
BinaryBitmap bitmap = new BinaryBitmap(new HybridBinarizer(source));
Vector<BarcodeFormat> barcodeFormats = new Vector<BarcodeFormat>();
barcodeFormats.add(BarcodeFormat.QR_CODE);
HashMap<DecodeHintType, Object> decodeHints = new HashMap<DecodeHintType, Object>(3);
decodeHints.put(DecodeHintType.POSSIBLE_FORMATS, barcodeFormats);
decodeHints.put(DecodeHintType.TRY_HARDER, Boolean.TRUE);
Result result = null;
try
{
result = xReader.decode(bitmap, decodeHints);
System.out.println("Code Decoded");
String text = result.getText();
System.out.println(text);
}
catch(NotFoundException e)
{
System.out.println("Decoding Failed");
}
catch(ChecksumException e)
{
System.out.println("Checksum error");
}
catch(FormatException e)
{
System.out.println("Wrong format");
}
}
}
The project includes a class called CommandLineRunner which you can simply call from the command line. You can also look at its source to see how it works and reuse it.
There is nothing to install or set up. It's a library. Typically you don't download the jar but declare it as a dependency in your Maven-based project.
If you just want to send an image to decode, use http://zxing.org/w/decode.jspx