JACOB:Presentation.Export : PowerPoint can't save ^0 to ^1 - java

public boolean PptExport2Png(String filePath,String exportPath){
Boolean flag = false;
ActiveXComponent component = new ActiveXComponent("PowerPoint.Application");
try{
Dispatch presentations = component.getProperty("Presentations").toDispatch();
Dispatch presentation = Dispatch.call(presentations, "Open", new Variant(filePath),
new Variant(-1), new Variant(-1), new Variant(0))
.toDispatch();
Dispatch.call(presentation,"Export",new Variant(exportPath),new Variant(720),new Variant(540));
}catch (Exception e){
System.out.println("|||" + e.toString());
}finally {
}
return false;
}
public static void main(String[] strs)throws Exception{
String filePath="D://ppttest.ppt";
String pngPath="D://folder22";
JacobPptUtils jac = new JacobPptUtils(filePath,true);
jac.PptExport2Png(filePath,pngPath);
}
I found that when i was using JACOB to export the ppt ,now,i have no any idea.please,give me some advice to deal it.
Here are the way where i has try:
1. to modify the file path expression

public static void main(String args[]) throws Exception{
source = "D:/test.ppt" ;
dest = "D:/xxx.pdf" ;
File file = new File(source);
if(!file.exists()){
throw new Exception("error");
}
ppt2Pdf(source,dest);
}
}

Related

Java reset for loop so file can be read from the beggining in infinite loop

I have methods to read file and check for extension
//using FileChooser to select folder we want to get photos from
static final File dir = new File("D:\\Pictures\\NatGeoPaper2020\\3\\");
//array of supported extensions
static final String[] EXTENSIONS = new String[]{"jpg", "jpeg", "png"};
//filter to identify images by extensions
static final FilenameFilter IMAGE_FILTER = new FilenameFilter() {
#Override
public boolean accept(final File dir, final String name) {
for (final String ext : EXTENSIONS) {
if (name.endsWith("." + ext)) {
return true;
}
}
return (false);
}
};
In main I invoke those methods - basically read all images and show as slideshow to user.
if (dir.isDirectory()) {
for (final File f : dir.listFiles(IMAGE_FILTER)) {
BufferedImage img = null;
try {
img = ImageIO.read(f);
SPI.INSTANCE.SystemParametersInfo(
new UINT_PTR(SPI.SPI_SETDESKWALLPAPER),
new UINT_PTR(0),
dir + "/" + f.getName(),
new UINT_PTR(SPI.SPIF_UPDATEINIFILE | SPI.SPIF_SENDWININICHANGE));
Thread.sleep(6000);
} catch (final IOException e) {
throw new Error(e);
}
}
}
Now my problem is, how can I "reset" my for loop in main to achieve infinite loop? I just want to change wallpaper every 6 seconds infinitely, withour rerunning a program.
Put a while(true) loop around it.
You can do it as follows:
if (dir.isDirectory()) {
while(true) {
for (final File f : dir.listFiles(IMAGE_FILTER)) {
BufferedImage img = null;
try {
img = ImageIO.read(f);
SPI.INSTANCE.SystemParametersInfo(
new UINT_PTR(SPI.SPI_SETDESKWALLPAPER),
new UINT_PTR(0),
dir + "/" + f.getName(),
new UINT_PTR(SPI.SPIF_UPDATEINIFILE | SPI.SPIF_SENDWININICHANGE));
Thread.sleep(6000);
} catch (final IOException e) {
throw new Error(e);
}
}
}
}

Lucene IndexWriter.commit() doesn't finished in ubuntu

Here is initialize code
public class Main {
public void index(String input_path, String index_dir, String separator, String extension, String field, DataHandler handler) {
Index index = new Index(handler);
index.initWriter(index_dir, new StandardAnalyzer());
index.run(input_path, field, extension, separator);
}
public List<?> search(String input_path, String index_dir, String separator, String extension, String field, DataHandler handler) {
Search search = new Search(handler);
search.initSearcher(index_dir, new StandardAnalyzer());
return search.runUsingFiles(input_path, field, extension, separator);
}
#SuppressWarnings("unchecked")
public static void main(String[] args) {
String lang = "en-US";
String dType = "data";
String train = "res/input/" +lang+ "/" +dType +"/train/";
String test = "res/input/"+ lang+ "/" +dType+ "/test/";
String separator = "\\|";
String extension = "csv";
String index_dir = "res/index/" +lang+ "." +dType+ ".index";
String output_file = "res/result/" +lang+ "." +dType+ ".output.json";
String searched_field = "utterance";
Main main = new Main();
DataHandler handler = new DataHandler();
main.index(train, index_dir, separator, extension, searched_field, handler);
//List<JSONObject> result = (List<JSONObject>) main.search(test, index_dir, separator, extension, searched_field, handler);
//handler.writeOutputJson(result, output_file);
}
}
It is my code
public class Index {
private IndexWriter writer;
private DataHandler handler;
public Index(DataHandler handler) {
this.handler = handler;
}
public Index() {
this(new DataHandler());
}
public void initWriter(String index_path, Directory store, Analyzer analyzer) {
IndexWriterConfig config = new IndexWriterConfig(analyzer);
try {
this.writer = new IndexWriter(store, config);
} catch (IOException e) {
e.printStackTrace();
}
}
public void initWriter(String index_path, Analyzer analyzer) {
try {
initWriter(index_path, FSDirectory.open(Paths.get(index_path)), analyzer);
} catch (IOException e) {
e.printStackTrace();
}
}
public void initWriter(String index_path) {
List<String> stopWords = Arrays.asList();
CharArraySet stopSet = new CharArraySet(stopWords, false);
initWriter(index_path, new StandardAnalyzer(stopSet));
}
#SuppressWarnings("unchecked")
public void indexDocs(List<?> datas, String field) throws IOException {
FieldType fieldType = new FieldType();
FieldType fieldType2 = new FieldType();
fieldType.setStored(true);
fieldType.setTokenized(true);
fieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
fieldType2.setStored(true);
fieldType2.setTokenized(false);
fieldType2.setIndexOptions(IndexOptions.DOCS);
for(int i = 0 ; i < datas.size() ; i++) {
Map<String,String> temp = (Map<String,String>) datas.get(i);
Document doc = new Document();
for(String key : temp.keySet()) {
if(key.equals(field))
continue;
doc.add(new Field(key, temp.get(key), fieldType2));
}
doc.add(new Field(field, temp.get(field), fieldType));
this.writer.addDocument(doc);
}
}
public void run(String path, String field, String extension, String separator) {
List<File> files = this.handler.getInputFiles(path, extension);
List<?> data = this.handler.readDocs(files, separator);
try {
System.out.println("start index");
indexDocs(data, field);
this.writer.commit();
this.writer.close();
System.out.println("done");
} catch (IOException e) {
e.printStackTrace();
}
}
public void run(String path) {
run(path, "search_field", "csv", "\t");
}
I made simple search module using Java and Lucene.
This module consisted of two phase, index and search.
In index phase, It read csv files and convert to Document each row and add to IndexWriter object using IndexWriter.addDocument() method.
Finaly, It call IndexWriter.commit() method.
It is working well in my local PC (windows)
but in Ubuntu PC, doesn't finished IndexWriter.commit() method.
Of course IndexWriter.flush() method doesn't work.
What is the problem?

Sentiment Analysis with OpenNLP on a text file

I have 100 sentences of test data. I am trying to run sentiment analysis on them but no matter what input String I am using, I am only getting a positive estimation of the input string. Each sentence gets a return value of 1.0. Any idea why this might be happening? Even if I use negative example inputs from the .txt file, the result is a positive value.
public class StartSentiment
{
public static DoccatModel model = null;
public static String[] analyzedTexts = {"Good win"};
public static void main(String[] args) throws IOException {
// begin of sentiment analysis
trainModel();
for(int i=0; i<analyzedTexts.length;i++){
classifyNewText(analyzedTexts[i]);}
}
private static String readFile(String pathname) throws IOException {
File file = new File(pathname);
StringBuilder fileContents = new StringBuilder((int)file.length());
Scanner scanner = new Scanner(file);
String lineSeparator = System.getProperty("line.separator");
try {
while(scanner.hasNextLine()) {
fileContents.append(scanner.nextLine() + lineSeparator);
}
return fileContents.toString();
} finally {
scanner.close();
}
}
public static void trainModel() {
MarkableFileInputStreamFactory dataIn = null;
try {
dataIn = new MarkableFileInputStreamFactory(
new File("src\\sentiment\\Results.txt"));
ObjectStream<String> lineStream = null;
lineStream = new PlainTextByLineStream(dataIn, StandardCharsets.UTF_8);
ObjectStream<DocumentSample> sampleStream = new DocumentSampleStream(lineStream);
TrainingParameters tp = new TrainingParameters();
tp.put(TrainingParameters.CUTOFF_PARAM, "1");
tp.put(TrainingParameters.ITERATIONS_PARAM, "100");
DoccatFactory df = new DoccatFactory();
model = DocumentCategorizerME.train("en", sampleStream, tp, df);
} catch (Exception e) {
e.printStackTrace();
} finally {
if (dataIn != null) {
try {
} catch (Exception e2) {
e2.printStackTrace();
}
}
}
}
public static void classifyNewText(String text) throws IOException{
DocumentCategorizerME myCategorizer = new DocumentCategorizerME(model);
double[] outcomes = myCategorizer.categorize(text.split(" ") );
String category = myCategorizer.getBestCategory(outcomes);
if (category.equalsIgnoreCase("1")){
System.out.print("The text is positive");
} else {
System.out.print("The text is negative");
}
}

OpenNLP train Thai language

I am experimenting with OpenNlp 1.7.2 and maxent-3.0.0.jar to train for thai language , below is the code that reads thai train data and creates the bin model.
public class TrainPerson {
public static void main(String[] args) throws IOException {
String trainFile = "/Documents/workspace/ThaiOpenNLP/bin/thaiPerson.train";
String modelFile = "/Documents/workspace/ThaiOpenNLP/bin/th-ner-person.bin";
writePersonModel(trainFile, modelFile);
}
private static void writePersonModel(String trainFile, String modelFile)
throws FileNotFoundException, IOException {
Charset charset = Charset.forName("UTF-8");
InputStreamFactory fileInputStream = new MarkableFileInputStreamFactory(new File(trainFile));
ObjectStream<String> lineStream = new PlainTextByLineStream(fileInputStream, charset);
ObjectStream<NameSample> sampleStream = new NameSampleDataStream(lineStream);
TokenNameFinderModel model;
try {
model = NameFinderME.train("th", "person", sampleStream , TrainingParameters.defaultParams(), new TokenNameFinderFactory());
} finally {
sampleStream.close();
}
BufferedOutputStream modelOut = null;
try {
modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
model.serialize(modelOut);
} finally {
if (modelOut != null) {
modelOut.close();
}
}
}}
Thai data looks like as attached in the file trainingData
I am using the output model to detect person name as shown in the below programme. It fails to identify the name.
public class ThaiPersonNameFinder {
static String modelFile = "/Users/avinashpaula/Documents/workspace/ThaiOpenNLP/bin/th-ner-person.bin";
public static void main(String[] args) {
try {
InputStream modelIn = new FileInputStream(new File(modelFile));
TokenNameFinderModel model = new TokenNameFinderModel(modelIn);
NameFinderME nameFinder = new NameFinderME(model);
String sentence[] = new String[]{
"จอห์น",
"30",
"ปี",
"จะ",
"เข้าร่วม",
"ก",
"เริ่มต้น",
"ขึ้น",
"บน",
"มกราคม",
"."
};
Span nameSpans[] = nameFinder.find(sentence);
for (int i = 0; i < nameSpans.length; i++) {
System.out.println(nameSpans[i]);
}
}
catch (IOException e) {
e.printStackTrace();
}
}
}
What am i doing wrong.

Running OpenOffice Macro from Java API

I'm trying to write a Java program that will run an OpenOffice Macro. I'm getting this error:
java.lang.RuntimeException:
com.sun.star.script.provider.ScriptFrameworkErrorException: Incorrect
format for Script URI: vnd.sun.star.script:Name of macro
I believe it has something to do with the way that I'm calling the macro (String cmd)
I've searched high and low but can't seem to find any information on this. There are a few posts on the OO forums but none of them seemed to help. Here is some of the code:
public static void main(String[] args) throws BootstrapException {
if(args.length == 0)
{
System.out.println("Must enter a filename");
System.exit(1);
}
try
{
String param = args[0];
//String cmd = "Standard.Conversion.ConvertHTMLToWord?langauge=Basic&location=application";
String cmd = "Name.Of.Macro?langauge=Basic&location=Document";
System.out.println("Running macro on " + param);
Macro macObj = new Macro();
macObj.executeMacro(cmd, new Object[]{param}]);
System.out.println("Completed");
}
catch(Exception e)
{
System.out.println(e.toString());
//e.printStackTrace();
}
Macro Class:
class Macro {
private static final String ooExecPath = "C:/Program Files/OpenOffice.org 3/program";
public Object executeMacro(String strMacroName, Object[] aParams) throws BootstrapException
{
try
{
com.sun.star.uno.XComponentContext xContext;
System.out.println("Connecting to OpenOffice");
xContext = BootstrapSocketConnector.bootstrap(ooExecPath);
System.out.println("Connected to a running instance of OpenOffice");
System.out.println("Trying to execute macro...");
com.sun.star.text.XTextDocument mxDoc = openWriter(xContext);
XScriptProviderSupplier xScriptPS = (XScriptProviderSupplier) UnoRuntime.queryInterface(XScriptProviderSupplier.class, mxDoc);
XScriptProvider xScriptProvider = xScriptPS.getScriptProvider();
XScript xScript = xScriptProvider.getScript("vnd.sun.star.script:"+strMacroName);
short[][] aOutParamIndex = new short[1][1];
Object[][] aOutParam = new Object[1][1];
return xScript.invoke(aParams, aOutParamIndex, aOutParam);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static com.sun.star.text.XTextDocument openWriter(com.sun.star.uno.XComponentContext xContext)
{
com.sun.star.frame.XComponentLoader xCLoader;
com.sun.star.text.XTextDocument xDoc = null;
com.sun.star.lang.XComponent xComp = null;
try {
// get the remote office service manager
com.sun.star.lang.XMultiComponentFactory xMCF =
xContext.getServiceManager();
Object oDesktop = xMCF.createInstanceWithContext(
"com.sun.star.frame.Desktop", xContext);
xCLoader = (com.sun.star.frame.XComponentLoader)
UnoRuntime.queryInterface(com.sun.star.frame.XComponentLoader.class,
oDesktop);
com.sun.star.beans.PropertyValue [] szEmptyArgs =
new com.sun.star.beans.PropertyValue [0];
/*
ArrayList<PropertyValue> props = new ArrayList<PropertyValue>();
PropertyValue p = new PropertyValue();
p.Name = "Hidden";
p.Value = new Boolean(true);
props.add(p);
PropertyValue[] properties = new PropertyValue[props.size()];
props.toArray(properties);
String strDoc = "private:factory/swriter";
xComp = xCLoader.loadComponentFromURL(strDoc, "_blank", 0, properties);
*/
String strDoc = "private:factory/swriter";
xComp = xCLoader.loadComponentFromURL(strDoc, "_blank", 0, szEmptyArgs);
xDoc = (com.sun.star.text.XTextDocument)
UnoRuntime.queryInterface(com.sun.star.text.XTextDocument.class,
xComp);
} catch(Exception e){
System.err.println(" Exception " + e);
e.printStackTrace(System.err);
}
return xDoc;
}
}
I suppose your problem is in the "Name.Of.Macro": it must be: Library.Module.NameOfMacro.
"langauge=Basic" of course sets the language name, and "location=application" means the macro library should be searched in the opened document, and not in global OO libraries.
As far as parameters are involved, I use:
XScriptProviderSupplier xScriptPS = (XScriptProviderSupplier) UnoRuntime.queryInterface(XScriptProviderSupplier.class, xComponent);
XScriptProvider xScriptProvider = xScriptPS.getScriptProvider();
XScript xScript = xScriptProvider.getScript("vnd.sun.star.script:"+macroName);
short[][] aOutParamIndex = new short[1][1];
Object[][] aOutParam = new Object[1][1];
Object[] aParams = new String[2];
aParams[0] = myFirstParameterName;
aParams[1] = mySecondParameterName;
#SuppressWarnings("unused")
Object result = xScript.invoke(aParams, aOutParamIndex, aOutParam);
System.out.println("xScript invoke macro " + macroName);
Hope it can be useful, after such long time... :-(
XScriptProviderSupplier xScriptPS = (XScriptProviderSupplier) UnoRuntime.queryInterface(XScriptProviderSupplier.class, xComponent);
What is xComponent in the above code?
Compare: ?langauge=Basic&location=Document"
to: ?language=Basic&location=Document"
wring is: "langauge" :D, swap "au" to "ua". :)

Categories