Converting Docx4j to pdf via documents4j - java

java code to generate the conversion of word to pdf
package com.sonakshi;
import com.documents4j.api.DocumentType;
import com.documents4j.api.IConverter;
import com.documents4j.job.LocalConverter;
import org.apache.commons.io.output.ByteArrayOutputStream;
import java.io.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
public class Hello{
public static void main(String[] args) throws IOException, ExecutionException, InterruptedException {
ByteArrayOutputStream bo = new ByteArrayOutputStream();
InputStream in = new BufferedInputStream(new FileInputStream("//home//sonakshi_user//Documents//WordDocToConvert.docx"));
IConverter converter = LocalConverter.builder()
.baseFolder(new File("//home//sonakshi_user//Documents//"))
.workerPool(20, 25, 2, TimeUnit.SECONDS)
.processTimeout(5, TimeUnit.SECONDS).build();
Future<Boolean> conversion = converter
.convert(in).as(DocumentType.MS_WORD)
.to(bo).as(DocumentType.PDF)
.prioritizeWith(1000) // optional
.schedule();
conversion.get();
try (OutputStream outputStream = new FileOutputStream("//home//sonakshi_user//Documents//Output.pdf")) {
bo.writeTo(outputStream);
} catch (IOException e) {
e.printStackTrace();
}
in.close();
bo.close();
}
}
I am getting this below exception generated while conversion
log4j:WARN No appenders could be found for logger (org.zeroturnaround.exec.ProcessExecutor).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
Exception in thread "main" java.lang.IllegalStateException: class com.documents4j.conversion.msoffice.MicrosoftWordBridge could not be created by a (File, long, TimeUnit) constructor
at com.documents4j.conversion.ExternalConverterDiscovery.make(ExternalConverterDiscovery.java:33)
at com.documents4j.conversion.ExternalConverterDiscovery.makeAll(ExternalConverterDiscovery.java:43)
at com.documents4j.conversion.ExternalConverterDiscovery.loadConfiguration(ExternalConverterDiscovery.java:86)
at com.documents4j.conversion.DefaultConversionManager.<init>(DefaultConversionManager.java:22)
at com.documents4j.job.LocalConverter.makeConversionManager(LocalConverter.java:79)
at com.documents4j.job.LocalConverter.<init>(LocalConverter.java:51)
at com.documents4j.job.LocalConverter$Builder.build(LocalConverter.java:186)
at com.sonakshi.Hello.main(Hello.java:20)
Caused by: java.lang.reflect.InvocationTargetException
at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:490)
at com.documents4j.conversion.ExternalConverterDiscovery.make(ExternalConverterDiscovery.java:31)
... 7 more
Caused by: com.documents4j.throwables.ConverterAccessException: Unable to run script: /home/sonakshi_user/Documents/word_start1713780447.vbs
at com.documents4j.conversion.AbstractExternalConverter.runNoArgumentScript(AbstractExternalConverter.java:76)
at com.documents4j.conversion.msoffice.AbstractMicrosoftOfficeBridge.runNoArgumentScript(AbstractMicrosoftOfficeBridge.java:51)
at com.documents4j.conversion.msoffice.AbstractMicrosoftOfficeBridge.tryStart(AbstractMicrosoftOfficeBridge.java:34)
at com.documents4j.conversion.msoffice.MicrosoftWordBridge.startUp(MicrosoftWordBridge.java:44)
at com.documents4j.conversion.msoffice.MicrosoftWordBridge.<init>(MicrosoftWordBridge.java:39)
... 12 more
Caused by: org.zeroturnaround.exec.ProcessInitException: Could not execute [cmd, /S, /C, ""/home/sonakshi_user/Documents/word_start1713780447.vbs""] in /home/sonakshi_user/Documents. Error=2, No such file or directory
at org.zeroturnaround.exec.ProcessInitException.newInstance(ProcessInitException.java:80)
at org.zeroturnaround.exec.ProcessExecutor.invokeStart(ProcessExecutor.java:1002)
at org.zeroturnaround.exec.ProcessExecutor.startInternal(ProcessExecutor.java:970)
at org.zeroturnaround.exec.ProcessExecutor.execute(ProcessExecutor.java:906)
at com.documents4j.conversion.AbstractExternalConverter.runNoArgumentScript(AbstractExternalConverter.java:72)
... 16 more
Caused by: java.io.IOException: Cannot run program "cmd" (in directory "/home/sonakshi_user/Documents"): error=2, No such file or directory
at java.base/java.lang.ProcessBuilder.start(ProcessBuilder.java:1128)
at java.base/java.lang.ProcessBuilder.start(ProcessBuilder.java:1071)
at org.zeroturnaround.exec.ProcessExecutor.invokeStart(ProcessExecutor.java:997)
... 19 more
Caused by: java.io.IOException: error=2, No such file or directory
at java.base/java.lang.ProcessImpl.forkAndExec(Native Method)
at java.base/java.lang.ProcessImpl.<init>(ProcessImpl.java:340)
at java.base/java.lang.ProcessImpl.start(ProcessImpl.java:271)
at java.base/java.lang.ProcessBuilder.start(ProcessBuilder.java:1107)
... 21 more
Exception in thread "Shutdown hook: com.documents4j.job.LocalConverter" java.lang.NullPointerException
at com.documents4j.job.LocalConverter.shutDown(LocalConverter.java:100)
at com.documents4j.job.ConverterAdapter$ConverterShutdownHook.run(ConverterAdapter.java:134)
I have included many jar files as well.
Kindly specify if I need to use specific one

Related

FileNotFoundException from new CSVReaderHeaderAware

Referencing the quick start for OpenCSV, having trouble opening a file which shows as present through the OS and by using exists to demonstrate.
code:
package net.bounceme.dur.basexfromjaxb.csv;
import com.opencsv.CSVReaderHeaderAware;
import java.io.File;
import java.io.FileReader;
import java.net.URI;
import java.util.Map;
import java.util.logging.Logger;
public class ReaderForCVS {
private static final Logger LOG = Logger.getLogger(ReaderForCVS.class.getName());
public ReaderForCVS() {
}
public void unmarshal(URI inputURI) throws Exception {
LOG.info(inputURI.toString());
File encyptFile = new File(inputURI);
System.out.println(encyptFile.exists());
Map<String, String> values = new CSVReaderHeaderAware(new FileReader("file:/home/thufir/jaxb/input.csv")).readMap();
}
}
file not found:
thufir#dur:~/NetBeansProjects/BaseXFromJAXB$
thufir#dur:~/NetBeansProjects/BaseXFromJAXB$ gradle run
> Task :run FAILED
Jan 10, 2019 1:47:50 PM net.bounceme.dur.basexfromjaxb.csv.ReaderForCVS unmarshal
INFO: file:/home/thufir/jaxb/input.csv
true
Exception in thread "main" java.io.FileNotFoundException: file:/home/thufir/jaxb/input.csv (No such file or directory)
at java.io.FileInputStream.open0(Native Method)
at java.io.FileInputStream.open(FileInputStream.java:195)
at java.io.FileInputStream.<init>(FileInputStream.java:138)
at java.io.FileInputStream.<init>(FileInputStream.java:93)
at java.io.FileReader.<init>(FileReader.java:58)
at net.bounceme.dur.basexfromjaxb.csv.ReaderForCVS.unmarshal(ReaderForCVS.java:23)
at net.bounceme.dur.basexfromjaxb.App.marshalCSV(App.java:24)
at net.bounceme.dur.basexfromjaxb.App.main(App.java:16)
FAILURE: Build failed with an exception.
* What went wrong:
Execution failed for task ':run'.
> Process 'command '/usr/lib/jvm/java-8-openjdk-amd64/bin/java'' finished with non-zero exit value 1
* Try:
Run with --stacktrace option to get the stack trace. Run with --info or --debug option to get more log output. Run with --scan to get full insights.
* Get more help at https://help.gradle.org
BUILD FAILED in 1s
3 actionable tasks: 2 executed, 1 up-to-date
thufir#dur:~/NetBeansProjects/BaseXFromJAXB$
thanks to:
https://stackoverflow.com/a/18552188/262852
Why this works I don't know:
public void unmarshal(URI inputURI) throws Exception {
FileReader f = new FileReader(new File(inputURI));
Map<String, String> values = new CSVReaderHeaderAware(f).readMap();
}

Apache Spark eventLog configuration on Windows Giving error

I am using Spark 1.5 on Windows. I haven't installed any separate binaries of Hadoop.
I running a Master and a single worker.
It's a simple HelloWorld Program as below :
package com.java.spark;
import java.io.Serializable;
import java.util.Arrays;
import java.util.List;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;
public class HelloWorld implements Serializable{
/**
*
*/
private static final long serialVersionUID = -7926281781224763077L;
public static void main(String[] args) {
// Local mode
//SparkConf sparkConf = new SparkConf().setAppName("HelloWorld").setMaster("local");
SparkConf sparkConf = new SparkConf().setAppName("HelloWorld").setMaster("spark://192.168.1.106:7077")
.set("spark.eventLog.enabled", "true")
.set("spark.eventLog.dir", "file:///D:/SparkEventLogsHistory");
//.set("spark.eventLog.dir", "/work/");
//tried many combinations above but all gives error.
JavaSparkContext ctx = new JavaSparkContext(sparkConf);
String[] arr = new String[] { "John", "Paul", "Gavin", "Rahul", "Angel" };
List<String> inputList = Arrays.asList(arr);
JavaRDD<String> inputRDD = ctx.parallelize(inputList);
inputRDD.foreach(new VoidFunction<String>() {
public void call(String input) throws Exception {
System.out.println(input);
}
});
}
}
The exception I am getting is :
Exception in thread "main" java.io.IOException: Cannot run program "cygpath": CreateProcess error=2, The system cannot find the file specified
at java.lang.ProcessBuilder.start(Unknown Source)
at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)
at org.apache.hadoop.util.Shell.run(Shell.java:188)
at org.apache.hadoop.fs.FileUtil$CygPathCommand.<init>(FileUtil.java:412)
at org.apache.hadoop.fs.FileUtil.makeShellPath(FileUtil.java:438)
at org.apache.hadoop.fs.FileUtil.makeShellPath(FileUtil.java:465)
at org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:592)
at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:584)
at org.apache.hadoop.fs.FilterFileSystem.setPermission(FilterFileSystem.java:420)
at org.apache.spark.scheduler.EventLoggingListener.start(EventLoggingListener.scala:130)
at org.apache.spark.SparkContext.<init>(SparkContext.scala:541)
at org.apache.spark.api.java.JavaSparkContext.<init>(JavaSparkContext.scala:61)
at com.java.spark.HelloWorld.main(HelloWorld.java:28)
Caused by: java.io.IOException: CreateProcess error=2, The system cannot find the file specified
at java.lang.ProcessImpl.create(Native Method)
at java.lang.ProcessImpl.<init>(Unknown Source)
at java.lang.ProcessImpl.start(Unknown Source)
... 13 more
16/04/01 20:13:24 INFO ShutdownHookManager: Shutdown hook called
Does anyone has any idea how to resolve this exception, so that Spark can pick the eventLogs from local directory.
If I dont give configure eventLog.dir then exception changes to :
Exception in thread "main" java.io.FileNotFoundException: File file:/H:/tmp/spark-events does not exist
at org.apache.hadoop.fs.RawLocalFileSystem.getFileStatus(RawLocalFileSystem.java:468)
at org.apache.hadoop.fs.FilterFileSystem.getFileStatus(FilterFileSystem.java:373)
at org.apache.spark.scheduler.EventLoggingListener.start(EventLoggingListener.scala:100)
at org.apache.spark.SparkContext.<init>(SparkContext.scala:541)
at org.apache.spark.api.java.JavaSparkContext.<init>(JavaSparkContext.scala:61)
at com.java.spark.HelloWorld.main(HelloWorld.java:28)

Java with HDFS file read/write

I am new to Hadoop and Java. I have to read and write to a *.txt file stored on HDFS in my remote cloud-era distribution. And for the same I have this small java program written:
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class ReadHadoopFileData {
public static void main(String[] args) throws IOException, URISyntaxException {
Configuration configuration = new Configuration();
FileSystem hdfs = FileSystem.get( new URI( "hdfs://admin:H4d00p#172.16.10.124:8888" ), configuration );
Path file = new Path("hdfs://admin:H4d00p#172.16.10.124:8888/user/admin/Data/Tlog.txt");
try{
BufferedReader br=new BufferedReader(new InputStreamReader(hdfs.open(file)));
String line;
line=br.readLine();
while (line != null){
System.out.println(line);
line=br.readLine();
}
}catch(Exception e){
e.printStackTrace();
}
}
}
But when the row BufferedReader br=new BufferedReader(new InputStreamReader(hdfs.open(file))); is executed I am running into this error:
java.io.IOException: Failed on local exception: com.google.protobuf.InvalidProtocolBufferException: Protocol message tag had invalid wire type.; Host Details : local host is: "KWTLT02221/169.254.208.16"; destination host is: "172.16.104.124":8888;
at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:772)
at org.apache.hadoop.ipc.Client.call(Client.java:1472)
at org.apache.hadoop.ipc.Client.call(Client.java:1399)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:232)
at com.sun.proxy.$Proxy9.getBlockLocations(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getBlockLocations(ClientNamenodeProtocolTranslatorPB.java:254)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:483)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy10.getBlockLocations(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient.callGetBlockLocations(DFSClient.java:1220)
at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:1210)
at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:1200)
at org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocksAndGetLastBlockLength(DFSInputStream.java:271)
at org.apache.hadoop.hdfs.DFSInputStream.openInfo(DFSInputStream.java:238)
at org.apache.hadoop.hdfs.DFSInputStream.<init>(DFSInputStream.java:231)
at org.apache.hadoop.hdfs.DFSClient.open(DFSClient.java:1498)
at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:302)
at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:298)
at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
at org.apache.hadoop.hdfs.DistributedFileSystem.open(DistributedFileSystem.java:298)
at org.apache.hadoop.fs.FileSystem.open(FileSystem.java:766)
at ReadHadoopFileData.main(ReadHadoopFileData.java:26)
Caused by: com.google.protobuf.InvalidProtocolBufferException: Protocol message tag had invalid wire type.
at com.google.protobuf.InvalidProtocolBufferException.invalidWireType(InvalidProtocolBufferException.java:99)
at com.google.protobuf.UnknownFieldSet$Builder.mergeFieldFrom(UnknownFieldSet.java:498)
at com.google.protobuf.UnknownFieldSet$Builder.mergeFrom(UnknownFieldSet.java:461)
at com.google.protobuf.UnknownFieldSet$Builder.mergeFrom(UnknownFieldSet.java:579)
at com.google.protobuf.UnknownFieldSet$Builder.mergeFrom(UnknownFieldSet.java:280)
at com.google.protobuf.CodedInputStream.readGroup(CodedInputStream.java:240)
at com.google.protobuf.UnknownFieldSet$Builder.mergeFieldFrom(UnknownFieldSet.java:488)
at com.google.protobuf.GeneratedMessage.parseUnknownField(GeneratedMessage.java:193)
at org.apache.hadoop.ipc.protobuf.RpcHeaderProtos$RpcResponseHeaderProto.<init>(RpcHeaderProtos.java:2207)
at org.apache.hadoop.ipc.protobuf.RpcHeaderProtos$RpcResponseHeaderProto.<init>(RpcHeaderProtos.java:2165)
at org.apache.hadoop.ipc.protobuf.RpcHeaderProtos$RpcResponseHeaderProto$1.parsePartialFrom(RpcHeaderProtos.java:2295)
at org.apache.hadoop.ipc.protobuf.RpcHeaderProtos$RpcResponseHeaderProto$1.parsePartialFrom(RpcHeaderProtos.java:2290)
at com.google.protobuf.AbstractParser.parsePartialFrom(AbstractParser.java:200)
at com.google.protobuf.AbstractParser.parsePartialDelimitedFrom(AbstractParser.java:241)
at com.google.protobuf.AbstractParser.parseDelimitedFrom(AbstractParser.java:253)
at com.google.protobuf.AbstractParser.parseDelimitedFrom(AbstractParser.java:259)
at com.google.protobuf.AbstractParser.parseDelimitedFrom(AbstractParser.java:49)
at org.apache.hadoop.ipc.protobuf.RpcHeaderProtos$RpcResponseHeaderProto.parseDelimitedFrom(RpcHeaderProtos.java:3167)
at org.apache.hadoop.ipc.Client$Connection.receiveRpcResponse(Client.java:1072)
at org.apache.hadoop.ipc.Client$Connection.run(Client.java:966)
Could someone help me out to get this resolved please ? I am on this for a day now.
I figured out the solution for this error. And looks like I was using the wrong port. I was using the port number as I see on HUE URL ( misleaded from different sources).
If I chose the port number as defined for the configuration "NameNode Service RPC Port" OR "dfs.namenode.servicerpc-address" on the name node from Cloudera manager, it works fine.

BIRT csv emitter plugin error -

I am trying to get a CSV from the report using this well-known plugin and the example from its documentation, but it throws an exception. I wonder why, because I just copied most of the code from the doc.
My code is:
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.util.logging.Level;
import org.eclipse.birt.core.exception.BirtException;
import org.eclipse.birt.core.framework.Platform;
import org.eclipse.birt.report.engine.api.EngineConfig;
import org.eclipse.birt.report.engine.emitter.csv.CSVRenderOption;
import org.eclipse.birt.report.engine.api.IReportEngine;
import org.eclipse.birt.report.engine.api.IReportEngineFactory;
import org.eclipse.birt.report.engine.api.IReportRunnable;
import org.eclipse.birt.report.engine.api.IRunAndRenderTask;
public class RunReport {
static void runReport() throws FileNotFoundException, BirtException {
String resourcePath = "C:\\Users\\hpsa\\workspace\\My Reports\\";
FileInputStream fs = new FileInputStream(resourcePath + "new_report_1.rptdesign");
IReportEngine engine = null;
EngineConfig config = new EngineConfig();
config.setEngineHome("C:\\birtre\\birt-runtime-4_3_2\\");
config.setLogConfig("C:\\birtre\\", Level.FINE);
config.setResourcePath(resourcePath);
Platform.startup(config);
IReportEngineFactory factory = (IReportEngineFactory) Platform.createFactoryObject(IReportEngineFactory.EXTENSION_REPORT_ENGINE_FACTORY);
engine = factory.createReportEngine(config);
engine.changeLogLevel(Level.FINE);
IReportRunnable design = engine.openReportDesign(fs);
IRunAndRenderTask task = engine.createRunAndRenderTask(design);
CSVRenderOption csvOption = new CSVRenderOption();
String format = CSVRenderOption.OUTPUT_FORMAT_CSV;
csvOption.setOutputFormat(format);
csvOption.setOutputFileName("newBIRTcsv.csv");
csvOption.setShowDatatypeInSecondRow(true);
csvOption.setExportTableByName("SecondTable");
csvOption.setDelimiter("\t");
csvOption.setReplaceDelimiterInsideTextWith("-");
task.setRenderOption(csvOption);
task.setEmitterID("org.eclipse.birt.report.engine.emitter.csv");
task.run();
task.close();
Platform.shutdown();
System.out.println("Report Generated Sucessfully!!");
}
public static void main(String[] args) {
try {
runReport();
} catch (Exception e) {
e.printStackTrace();
}
}
}
I am getting the exception:
Exception in thread "main" java.lang.NoClassDefFoundError: org/eclipse/core/runtime/CoreException
at org.eclipse.birt.core.framework.Platform.createPlatformLauncher(Platform.java:115)
at org.eclipse.birt.core.framework.Platform.startup(Platform.java:74)
at com.demshin.birttest.RunReport.runReport(RunReport.java:26)
at com.demshin.birttest.RunReport.main(RunReport.java:55)
Caused by: java.lang.ClassNotFoundException: org.eclipse.core.runtime.CoreException
at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
... 4 more
I found the package org.eclipse.core.runtime and registered in the build path, but then I am getting the same exception. Indeed, there are no any CoreException.class in the org.eclipse.core.runtime package. What am I doing wrong?
Setting the engine home is deprecated and most of the time it will prevent the Platform to start, such in your case. Remove this line:
config.setEngineHome("C:\\birtre\\birt-runtime-4_3_2\\");
You just have to ensure the birt runtime 4.3.2 is in the classpath of your context. Furthermore i would recommend you try to generate a native format such pdf first, and then try with a csv format.
I was trying a lot of variants of importing different jars, but finally i found the weird decision here:
go to the Project Explorer, right-mouse click on properties for the
project, go to Java Build path, Click on the External Jars button,
then select all the libraries under
C:\birt_runtime\birt-runtime-2_1_0\ReportEngine\lib\ folder
This made chaos there, but at least it starts.

calling an exe file inside jar

I am trying to call the "dspdf.exe" inside the jar file where this smartpdf class exists. I plan to extract it to a temp location and delete when program ends. However this doesn't seem to work, any help will be appreciated.
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.omg.CORBA.portable.InputStream;
public class smartpdf {
static String url="";
static String output="output.pdf";
public static void main(String[] args) throws IOException{
gui mygui = new gui();//gui will call the generate function when user selects
}
public static void generate() throws IOException{
InputStream src = (InputStream) smartpdf.class.getResource("dspdf.exe").openStream();
File exeTempFile = File.createTempFile("dspdf", ".exe");
FileOutputStream out = new FileOutputStream(exeTempFile);
byte[] temp = new byte[32768];
int rc;
while((rc = src.read(temp)) > 0)
out.write(temp, 0, rc);
src.close();
out.close();
exeTempFile.deleteOnExit();
Runtime.getRuntime().exec(exeTempFile.toString()+" "+url+" "+output );
//Runtime.getRuntime().exec("dspdf "+url+" "+output);
}
}
EDIT:
The error that I am getting:
Microsoft Windows XP [Version 5.1.2600]
(C) Copyright 1985-2001 Microsoft Corp.
Exception in thread "main" java.lang.reflect.InvocationTargetException
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source)
at java.lang.reflect.Method.invoke(Unknown Source)
at org.eclipse.jdt.internal.jarinjarloader.JarRsrcLoader.main(JarRsrcLoa
der.java:56)
Caused by: java.lang.ClassCastException: sun.net.www.protocol.jar.JarURLConnecti
on$JarURLInputStream cannot be cast to org.omg.CORBA.portable.InputStream
at smartpdf.generate(smartpdf.java:18)
at smartpdf.main(smartpdf.java:14)
... 5 more
You use the wrong InputStream. Change it to java.io.InputStream.
Why do you use org.omg.CORBA.portable.InputStream instead of a java.io.BufferedInputStream`
with as parameters the inputstream from the resource. I mean this:
BufferedInputStream inputstream = new BufferedInputStream(smartpdf.class.getResourceAsStream(...));
The same for your fileoutput stream: BufferedOutputStream
Don't use
class.getResource(...).openStream();
but use
class.getResourceAsStream(...);
Note also (once you've resolved the InputStream issue) that you should be consuming your spawned process stdout and stderr, otherwise the spawned process may block. See this answer for more details.

Categories