How to persist changes with docx4j Java library to word document - java

I am using the docx4j Java library for the first time and have some difficulties finding a good reference. What i need to start is a simple Java class to enforce the protection on a Word document in read-only mode. I have com that far that i can read the protection mode and set it. But when saving the Word document the changes are not written to the Word document.
public class Doc4JPOC {
public static void main(String[] args) {
String docName = "/Users/petervannes/Desktop/Unprotected document.docx";
// String docName = "/Users/petervannes/Desktop/Protected document.docx" ;
Doc4JPOC d4j = new Doc4JPOC();
d4j.isProtected(docName);
d4j.protect(docName);
d4j.isProtected(docName);
}
private void protect(String filename) {
try {
WordprocessingMLPackage wordMLPackage = Docx4J.load(new java.io.File(filename));
MainDocumentPart mdp = wordMLPackage.getMainDocumentPart();
Relationship rs = mdp.getRelationshipsPart().getRelationshipByType(Namespaces.SETTINGS);
DocumentSettingsPart dsp = (DocumentSettingsPart) mdp.getRelationshipsPart().getPart(rs);
// Update settings.xml
List<Object> nodes = dsp.getJAXBNodesViaXPath("//w:documentProtection", true);
for (Object obj : nodes) {
CTDocProtect cdtP = ((CTDocProtect) obj);
cdtP.setEnforcement(Boolean.TRUE);
cdtP.setEdit(STDocProtect.READ_ONLY);
}
// Write updated settings.xml to document
wordMLPackage.addTargetPart(dsp);
// wordMLPackage.save(new java.io.File(filename));
Docx4J.save(wordMLPackage, new java.io.File(filename), 0);
System.out.println("Protected document " + filename) ;
} catch (Docx4JException ex) {
Logger.getLogger(Doc4JPOC.class.getName()).log(Level.SEVERE, null, ex);
} catch (JAXBException jex) {
Logger.getLogger(Doc4JPOC.class.getName()).log(Level.SEVERE, null, jex);
}
}
private void isProtected(String filename) {
Boolean isProtectionEnforced = false;
STDocProtect editMode = STDocProtect.NONE;
try {
WordprocessingMLPackage wordMLPackage = Docx4J.load(new java.io.File(filename));
MainDocumentPart mdp = wordMLPackage.getMainDocumentPart();
Relationship rs = mdp.getRelationshipsPart().getRelationshipByType(Namespaces.SETTINGS);
DocumentSettingsPart dsp = (DocumentSettingsPart) mdp.getRelationshipsPart().getPart(rs);
System.out.println("Partname : " + dsp.getPartName());
List<Object> nodes = dsp.getJAXBNodesViaXPath("//w:documentProtection", true);
for (Object obj : nodes) {
CTDocProtect cdtP = ((CTDocProtect) obj);
isProtectionEnforced = cdtP.isEnforcement();
editMode = cdtP.getEdit();
System.out.println("Enforced: " + cdtP.isEnforcement());
System.out.println("Edit: " + cdtP.getEdit());
}
if (isProtectionEnforced) {
System.out.println("Protection is enabled , protection mode is " + editMode.toString());
} else {
System.out.println("Protection is disabled");
}
} catch (Docx4JException ex) {
Logger.getLogger(Doc4JPOC.class.getName()).log(Level.SEVERE, null, ex);
} catch (JAXBException jex) {
Logger.getLogger(Doc4JPOC.class.getName()).log(Level.SEVERE, null, jex);
}
}
}
When executing this class i do get the following output;
Partname : /word/settings.xml
Protection is disabled
Protected document /Users/petervannes/Desktop/Unprotected document.docx
Partname : /word/settings.xml
Protection is disabled
So i suspect that i am not updating the WordprocessingMLPackage or DocumentSettingsPart correctly in the protect method, but have currently no clue where it goes wrong.

Resolved it. Instead of adding the DocumentSettingsPart to the loaded WordprocessingMLPackage. The CTDocProtect instance needs to be used to set the document protection on the content.
CTDocProtect cdtP = new CTDocProtect();
cdtP.setEnforcement(Boolean.TRUE);
cdtP.setEdit(STDocProtect.READ_ONLY);
dsp.getContents().setDocumentProtection(cdtP);
Docx4J.save(wordMLPackage, new java.io.File(filename), 0);

For docx4j v3.3.0, please see http://www.docx4java.org/forums/docx-java-f6/password-set-for-documentprotection-not-accepted-in-msword-t2427.html#p8290

Related

DCM4CHE, Network operations,Handling a C-Move call

Hi I'm trying to make a PACS server using Java. dcm4che appears to be quite popular. But I'm unable to find any good examples about it.
As a starting point I inspected dcmqrscp and it successfully stores a DICOM image. But I cannot manage to handle a C-MOVE call. Here's my CMove handler. It finds requested the DICOM file adds a URL and other stuff, it doesn't throw any exception yet client doesn't receive any files.
private final class CMoveSCPImpl extends BasicCMoveSCP {
private final String[] qrLevels;
private final QueryRetrieveLevel rootLevel;
public CMoveSCPImpl(String sopClass, String... qrLevels) {
super(sopClass);
this.qrLevels = qrLevels;
this.rootLevel = QueryRetrieveLevel.valueOf(qrLevels[0]);
}
#Override
protected RetrieveTask calculateMatches(Association as, PresentationContext pc, final Attributes rq, Attributes keys) throws DicomServiceException {
QueryRetrieveLevel level = QueryRetrieveLevel.valueOf(keys, qrLevels);
try {
level.validateRetrieveKeys(keys, rootLevel, relational(as, rq));
} catch (Exception e) {
e.printStackTrace();
}
String moveDest = rq.getString(Tag.MoveDestination);
final Connection remote = new Connection("reciverAE",as.getSocket().getInetAddress().getHostAddress(), 11113);
if (remote == null)
throw new DicomServiceException(Status.MoveDestinationUnknown, "Move Destination: " + moveDest + " unknown");
List<T> matches = DcmQRSCP.this.calculateMatches(keys);
if (matches.isEmpty())
return null;
AAssociateRQ aarq;
Association storeas = null;
try {
aarq = makeAAssociateRQ(as.getLocalAET(), moveDest, matches);
storeas = openStoreAssociation(as, remote, aarq);
} catch (Exception e) {
e.printStackTrace();
}
BasicRetrieveTask<T> retrieveTask = null;
retrieveTask = new BasicRetrieveTask<T>(Dimse.C_MOVE_RQ, as, pc, rq, matches, storeas, new BasicCStoreSCU<T>());
retrieveTask.setSendPendingRSPInterval(getSendPendingCMoveInterval());
return retrieveTask;
}
private Association openStoreAssociation(Association as, Connection remote, AAssociateRQ aarq)
throws DicomServiceException {
try {
return as.getApplicationEntity().connect(as.getConnection(),
remote, aarq);
} catch (Exception e) {
throw new DicomServiceException(
Status.UnableToPerformSubOperations, e);
}
}
private AAssociateRQ makeAAssociateRQ(String callingAET,
String calledAET, List<T> matches) {
AAssociateRQ aarq = new AAssociateRQ();
aarq.setCalledAET(calledAET);
aarq.setCallingAET(callingAET);
for (InstanceLocator match : matches) {
if (aarq.addPresentationContextFor(match.cuid, match.tsuid)) {
if (!UID.ExplicitVRLittleEndian.equals(match.tsuid))
aarq.addPresentationContextFor(match.cuid,
UID.ExplicitVRLittleEndian);
if (!UID.ImplicitVRLittleEndian.equals(match.tsuid))
aarq.addPresentationContextFor(match.cuid,
UID.ImplicitVRLittleEndian);
}
}
return aarq;
}
private boolean relational(Association as, Attributes rq) {
String cuid = rq.getString(Tag.AffectedSOPClassUID);
ExtendedNegotiation extNeg = as.getAAssociateAC().getExtNegotiationFor(cuid);
return QueryOption.toOptions(extNeg).contains(
QueryOption.RELATIONAL);
}
}
I added the code below to send a DICOM file as a response:
String cuid = rq.getString(Tag.AffectedSOPClassUID);
String iuid = rq.getString(Tag.AffectedSOPInstanceUID);
String tsuid = pc.getTransferSyntax();
try {
DcmQRSCP.this.as=as;
File f = new File("D:\\dcmqrscpTestDCMDir\\1.2.840.113619.2.30.1.1762295590.1623.978668949.886\\1.2.840.113619.2.30.1.1762295590.1623.978668949.887\\1.2.840.113619.2.30.1.1762295590.1623.978668949.888");
FileInputStream in = new FileInputStream(f);
InputStreamDataWriter data = new InputStreamDataWriter(in);
// !1! as.cmove(cuid,1,keys,tsuid,"STORESCU");
as.cstore(cuid,iuid,1,data,tsuid,rspHandlerFactory.createDimseRSPHandler(f));
} catch (Exception e) {
e.printStackTrace();
}
Throws this exception
org.dcm4che3.net.NoRoleSelectionException: No Role Selection for SOP Class 1.2.840.10008.5.1.4.1.2.2.2 - Study Root Query/Retrieve Information Model - MOVE as SCU negotiated
You should add a role to the application instance like:
applicationEntity.addTransferCapability(
new TransferCapability(null, "*", TransferCapability.Role.SCP, "*"));

Read Shapes (Rectangle,Square,Circle,Arrow etc), Clip Arts from MS Word File using java

I am able to get image from ms word table but unable to get shapes and clip-arts.
public static void main(String[] args) throws Exception {
// The path to the documents directory.
try {
String dataDir = "E://test//demo.docx";
generatePicturesAsImages(dataDir);
} catch (Exception e) {
e.printStackTrace();
}
}
public static void generatePicturesAsImages(String sourcePath) {
try {
Document doc = new Document(sourcePath);
ImageSaveOptions options = new ImageSaveOptions(SaveFormat.JPEG);
options.setJpegQuality(100);
options.setResolution(100);
// options.setUseHighQualityRendering(true);
List<ShapeRenderer> pictures = getAllPictures(doc);
if (pictures != null) {
for (int i = 0; i < pictures.size(); i++) {
ShapeRenderer picture = pictures.get(i);
String imageFilePath = sourcePath + "_output_" + i + ".jpeg";
picture.save(imageFilePath, options);
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
private static List<ShapeRenderer> getAllPictures(final Document document) throws Exception {
List<ShapeRenderer> images = null;
#SuppressWarnings("unchecked")
NodeCollection<DrawingML> nodeCollection = document.getChildNodes(NodeType.DRAWING_ML, Boolean.TRUE);
if (nodeCollection.getCount() > 0) {
images = new ArrayList<ShapeRenderer>();
for (DrawingML drawingML : nodeCollection) {
images.add(drawingML.getShapeRenderer());
}
}
return images;
}
Above program is getting images from table so what should i add more to get the shapes.. Please suggest me any help will be appreciate !
You are using an older version of Aspose.Words. If you want to use older version of Aspose.Words, please get the collection of Shape and DrawingML nodes using Document.getChildNodes in your getAllPictures method.
NodeCollection<DrawingML> drwingmlnodes = document.getChildNodes(NodeType.DRAWING_ML, Boolean.TRUE);
NodeCollection<Shape> shapenodes = document.getChildNodes(NodeType.SHAPE, Boolean.TRUE);
Note that we removed the DrawingML from our APIs in Aspose.Words 15.2.0. If you want to use latest version of Aspose.Words v16.5.0, please only use NodeType.SHAPE.
I work with Aspose as Developer evangelist.

How do I add an ICC to an existing PDF document

I have an existing PDF document that is using CMYK colors. It was created using a specific ICC profile, which I have obtained. The colors are obviously different if I open the document with the profile active than without. From what I can tell using a variety of tools, there is no ICC profile embedded in the document. What I would like to do is embed the ICC profile in the PDF so that it can be opened and viewed with the correct colors by third parties. My understanding is that this is possible to do with the PDF format, but nothing I have tried seems to work.
I wrote a small program using PDFBox based on looking at some examples, but it seems to have no effect. I feel like I am missing a step somewhere.
package com.mapsherpa.tools.addicc;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.InputStream;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.pdmodel.PDDocumentCatalog;
import org.apache.pdfbox.pdmodel.common.PDMetadata;
import org.apache.pdfbox.pdmodel.graphics.color.PDOutputIntent;
import java.io.FileInputStream;
import java.io.IOException;
public class AddICC {
public AddICC() {
// TODO Auto-generated constructor stub
}
public static void main(String[] args) {
AddICC app = new AddICC();
try {
if( args.length != 3) {
app.usage();
} else {
app.doIt(args[0], args[1], args[2]);
}
}
catch (Exception e) {
e.printStackTrace();
}
}
private void doIt(String input, String output, String icc) throws IOException {
// TODO Auto-generated method stub
System.out.printf("Adding %s to %s and saving as %s\n", icc, input, output);
PDDocument doc = null;
try
{
File file = new File(input);
doc = PDDocument.load(file);
PDDocumentCatalog cat = doc.getDocumentCatalog();
PDMetadata metadata = new PDMetadata(doc);
cat.setMetadata(metadata);
InputStream colorProfile = new FileInputStream(icc);
PDOutputIntent oi = new PDOutputIntent(doc, colorProfile);
oi.setInfo("SWOP (Coated), 20%, GCR, None");
oi.setOutputCondition("SWOP (Coated), 20%, GCR, None");
oi.setOutputConditionIdentifier("SWOP (Coated), 20%, GCR, None");
oi.setRegistryName("http://www.color.org");
cat.addOutputIntent(oi);
doc.save(output);
System.out.println("Finished adding color profile");
}
catch (Exception e)
{
System.out.println("Exception processing color profile");
e.printStackTrace();
}
finally
{
if (doc != null) {
doc.close();
}
}
}
private void usage() {
// TODO Auto-generated method stub
System.err.println("Usage: " + this.getClass().getName() + " <input-file> <output-file> <icc-file>");
}
}
I'm not a Java expert but I did manage to get this to run and it seems to do something but I still am not seeing the correct colors and there is no indication using imagemagick or pdfinfo that it has a color profile.
I feel like somehow I should be indicating that the document color space is ICCBased but I can't see any obvious way to do that using the PDFBox API.
Any help would be appreciated (even being told that it won't work!)
EDIT:
I believe that this is working as written in that it adds the required output intent to the document. However, I have also discovered that this is not what I need - I now believe that I need it to add an /ICCBased stream to the PDF - sigh. The updated code below is based on this stackoverflow question's updated createColorSpace function.
private static PDColorSpace createColorSpace( PDDocument doc, ColorSpace cs ) throws IOException
{
PDColorSpace retval = null;
if( cs.isCS_sRGB() )
{
retval = PDDeviceRGB.INSTANCE;
}
else if( cs instanceof ICC_ColorSpace )
{
ICC_ColorSpace ics = (ICC_ColorSpace)cs;
// CREATING MANUALLY THE COS ARR ****************************
COSArray cosArray = new COSArray();
cosArray.add(COSName.ICCBASED);
PDStream pdStream = new PDStream(doc);
cosArray.add(pdStream.getStream());
// USING DIFFERENT CONSTRUTOR *******************************
PDICCBased pdCS = new PDICCBased( cosArray );
retval = pdCS;
COSArray ranges = new COSArray();
for( int i=0; i<cs.getNumComponents(); i++ )
{
ranges.add( new COSFloat( ics.getMinValue( i ) ) );
ranges.add( new COSFloat( ics.getMaxValue( i ) ) );
}
PDStream iccData = pdCS.getPDStream();
OutputStream output = null;
try
{
output = ((COSStream)iccData.getCOSObject()).createFilteredStream();
output.write( ics.getProfile().getData() );
}
finally
{
if( output != null )
{
output.close();
}
}
pdCS.setNumberOfComponents( cs.getNumComponents() );
}
else
{
throw new IOException( "Not yet implemented:" + cs );
}
return retval;
}
private void doIt(String input, String output, String icc) throws IOException {
// TODO Auto-generated method stub
System.out.printf("Adding %s to %s and saving as %s\n", icc, input, output);
PDDocument doc = null;
try
{
File file = new File(input);
doc = PDDocument.load(file);
ICC_ColorSpace iccColorSpace = new ICC_ColorSpace(ICC_Profile.getInstance(icc));
PDColorSpace colorSpace = createColorSpace(doc, iccColorSpace);
doc.save(output);
System.out.println("Finished adding color profile");
}
catch (Exception e)
{
System.out.println("Exception processing color profile");
e.printStackTrace();
}
finally
{
if (doc != null) {
doc.close();
}
}
}
This code now has an exception:
java.io.IOException: Unknown color space number of components:-1
at org.apache.pdfbox.pdmodel.graphics.color.PDICCBased.getAlternateColorSpace(PDICCBased.java:269)
at org.apache.pdfbox.pdmodel.graphics.color.PDICCBased.loadICCProfile(PDICCBased.java:151)
at org.apache.pdfbox.pdmodel.graphics.color.PDICCBased.<init>(PDICCBased.java:89)
at com.mapsherpa.tools.addicc.AddICC.createColorSpace(AddICC.java:65)
at com.mapsherpa.tools.addicc.AddICC.doIt(AddICC.java:109)
at com.mapsherpa.tools.addicc.AddICC.main(AddICC.java:39)
at this line of code:
cosArray.add(pdStream.getStream());
The only difference I can see between this code and the other answer is that I am loading an existing PDF document rather than creating a new empty one.
For testing, I'm using the US Web (Coated) SWOP v2 icc profile from Adobe, but it is the same exception with any profile I test. From my understanding of reading the PDFBox source, it isn't a problem with the profile but with reading the stream from the document (which doesn't have an /ICCBased stream, the whole point of this question :))
EDIT 2: the code above does actually run without exceptions if used with PDFBox 1.8.10 - apparently I had linked in 2.0.0 RC2 without realizing it (total Java newbie).

Error while trying to classify new instance using Java with Weka-No output instance format defined

I am trying to use Weka in my project to classify text documents using a Naïve Bayes classifier. I found the two classes below on this site.
The first class MyFilteredLearner builds, trains, evaluates, and saves the classifier to disk, this all works fine.
The second class MyFilteredClassifier loads the single text string from a text file and makes it into an instance successfully. It also restores the classifier from disk. What it fails to do is to classify the instance with the method classify(), it instead returns the exception message ‘No output instance format defined’.
I have spend ages searching for an answer, have tried installing the developer and stable versions of Weka, but still get the same issue.
Does anybody know what is incorrect in the code or needs to be added/done differently? The file details and code are as follows:
ARFF file (spam.ARFF) used to train the classifier:
#relation sms_test
#attribute spamclass {spam,ham}
#attribute text String
#data
ham,'Go until jurong point, crazy.. Available only in bugis n great world la e buffet...Cine there got amore wat...'
etc……………………………………………………………………
Single line text file (toClassify.txt) for the new instance:
this is spam or not, who knows?
Code of MyFilteredLearner:
public class MyFilteredLearner {
Instances trainData;
StringToWordVector filter;
FilteredClassifier classifier;
public void loadDataset(String fileName) {
try {
BufferedReader reader = new BufferedReader(new FileReader(fileName));
ArffReader arff = new ArffReader(reader);
trainData = arff.getData();
System.out.println("===== Loaded dataset: " + fileName + " =====");
reader.close();
}
catch (IOException e) {
System.out.println("Problem found when reading: " + fileName);
}
}
public void learn() {
try {
trainData.setClassIndex(0);
classifier = new FilteredClassifier();
filter = new StringToWordVector();
filter.setAttributeIndices("last");
classifier.setFilter(filter);
classifier.setClassifier(new NaiveBayes());
classifier.buildClassifier(trainData);
System.out.println("===== Training on filtered (training) dataset done =====");
}
catch (Exception e) {
System.out.println("Problem found when training");
}
}
public void evaluate() {
try {
trainData.setClassIndex(0);
filter = new StringToWordVector();
filter.setAttributeIndices("last");
classifier = new FilteredClassifier();
classifier.setFilter(filter);
classifier.setClassifier(new NaiveBayes());
Evaluation eval = new Evaluation(trainData);
eval.crossValidateModel(classifier, trainData, 4, new Random(1));
System.out.println(eval.toSummaryString());
System.out.println(eval.toClassDetailsString());
System.out.println("===== Evaluating on filtered (training) dataset done =====");
}
catch (Exception e) {
System.out.println("Problem found when evaluating");
}
}
public void saveModel(String fileName) {
try {
ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(fileName));
out.writeObject(classifier);
System.out.println("Saved model: " + out.toString());
out.close();
System.out.println("===== Saved model: " + fileName + "=====");
}
catch (IOException e) {
System.out.println("Problem found when writing: " + fileName);
}
}
}
Code of MyFilteredClassifier:
public class MyFilteredClassifier {
String text;
Instances instances;
FilteredClassifier classifier;
StringToWordVector filter;
public void load(String fileName) {
try {
BufferedReader reader = new BufferedReader(new FileReader(fileName));
String line;
text = "";
while ((line = reader.readLine()) != null) {
text = text + " " + line;
}
System.out.println("===== Loaded text data: " + fileName + " =====");
reader.close();
System.out.println(text);
}
catch (IOException e) {
System.out.println("Problem found when reading: " + fileName);
}
}
public void makeInstance() {
FastVector fvNominalVal = new FastVector(2);
fvNominalVal.addElement("spam");
fvNominalVal.addElement("ham");
Attribute attribute1 = new Attribute("class", fvNominalVal);
Attribute attribute2 = new Attribute("text",(FastVector) null);
FastVector fvWekaAttributes = new FastVector(2);
fvWekaAttributes.addElement(attribute1);
fvWekaAttributes.addElement(attribute2);
instances = new Instances("Test relation", fvWekaAttributes,1);
instances.setClassIndex(0);
DenseInstance instance = new DenseInstance(2);
instance.setValue(attribute2, text);
instances.add(instance);
System.out.println("===== Instance created with reference dataset =====");
System.out.println(instances);
}
public void loadModel(String fileName) {
try {
ObjectInputStream in = new ObjectInputStream(new FileInputStream(fileName));
Object tmp = in.readObject();
classifier = (FilteredClassifier) tmp;
in.close();
System.out.println("===== Loaded model: " + fileName + "=====");
}
catch (Exception e) {
System.out.println("Problem found when reading: " + fileName);
}
}
public void classify() {
try {
double pred = classifier.classifyInstance(instances.instance(0));
System.out.println("===== Classified instance =====");
System.out.println("Class predicted: " + instances.classAttribute().value((int) pred));
}
catch (Exception e) {
System.out.println("Error: " + e.getMessage());
}
}
public static void main(String args[]) {
MyFilteredLearner c = new MyFilteredLearner();
c.loadDataset("spam.ARFF");
c.learn();
c.evaluate();
c.saveModel("spamClassifier.binary");
MyFilteredClassifier c1 = new MyFilteredClassifier();
c1.load("toClassify.txt");
c1.loadModel("spamClassifier.binary");
c1.makeInstance();
c1.classify();
}
}
It seems you change the code from the blog's GitHub repository in one detail and it is the cause of your error:
c.learn();
c.evaluate();
vs
c.evaluate();
c.learn();
The evaluate() method resets the classifier with the line:
classifier = new FilteredClassifier();
but doesn't build a model. The actual evaluation uses a copy of the passed classifier, so the original classifier (the one in your class) remains untrained.
// weka/classifiers/Evaluation.java (method: crossValidateModel)
Classifier copiedClassifier = Classifier.makeCopy(classifier);
copiedClassifier.buildClassifier(train);
So you first build your model, but then overwrite it when evaluating it and then save the uninitialized model. Switch them around so you train it directly before saving it to a file, then it works.

Running OpenOffice Macro from Java API

I'm trying to write a Java program that will run an OpenOffice Macro. I'm getting this error:
java.lang.RuntimeException:
com.sun.star.script.provider.ScriptFrameworkErrorException: Incorrect
format for Script URI: vnd.sun.star.script:Name of macro
I believe it has something to do with the way that I'm calling the macro (String cmd)
I've searched high and low but can't seem to find any information on this. There are a few posts on the OO forums but none of them seemed to help. Here is some of the code:
public static void main(String[] args) throws BootstrapException {
if(args.length == 0)
{
System.out.println("Must enter a filename");
System.exit(1);
}
try
{
String param = args[0];
//String cmd = "Standard.Conversion.ConvertHTMLToWord?langauge=Basic&location=application";
String cmd = "Name.Of.Macro?langauge=Basic&location=Document";
System.out.println("Running macro on " + param);
Macro macObj = new Macro();
macObj.executeMacro(cmd, new Object[]{param}]);
System.out.println("Completed");
}
catch(Exception e)
{
System.out.println(e.toString());
//e.printStackTrace();
}
Macro Class:
class Macro {
private static final String ooExecPath = "C:/Program Files/OpenOffice.org 3/program";
public Object executeMacro(String strMacroName, Object[] aParams) throws BootstrapException
{
try
{
com.sun.star.uno.XComponentContext xContext;
System.out.println("Connecting to OpenOffice");
xContext = BootstrapSocketConnector.bootstrap(ooExecPath);
System.out.println("Connected to a running instance of OpenOffice");
System.out.println("Trying to execute macro...");
com.sun.star.text.XTextDocument mxDoc = openWriter(xContext);
XScriptProviderSupplier xScriptPS = (XScriptProviderSupplier) UnoRuntime.queryInterface(XScriptProviderSupplier.class, mxDoc);
XScriptProvider xScriptProvider = xScriptPS.getScriptProvider();
XScript xScript = xScriptProvider.getScript("vnd.sun.star.script:"+strMacroName);
short[][] aOutParamIndex = new short[1][1];
Object[][] aOutParam = new Object[1][1];
return xScript.invoke(aParams, aOutParamIndex, aOutParam);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static com.sun.star.text.XTextDocument openWriter(com.sun.star.uno.XComponentContext xContext)
{
com.sun.star.frame.XComponentLoader xCLoader;
com.sun.star.text.XTextDocument xDoc = null;
com.sun.star.lang.XComponent xComp = null;
try {
// get the remote office service manager
com.sun.star.lang.XMultiComponentFactory xMCF =
xContext.getServiceManager();
Object oDesktop = xMCF.createInstanceWithContext(
"com.sun.star.frame.Desktop", xContext);
xCLoader = (com.sun.star.frame.XComponentLoader)
UnoRuntime.queryInterface(com.sun.star.frame.XComponentLoader.class,
oDesktop);
com.sun.star.beans.PropertyValue [] szEmptyArgs =
new com.sun.star.beans.PropertyValue [0];
/*
ArrayList<PropertyValue> props = new ArrayList<PropertyValue>();
PropertyValue p = new PropertyValue();
p.Name = "Hidden";
p.Value = new Boolean(true);
props.add(p);
PropertyValue[] properties = new PropertyValue[props.size()];
props.toArray(properties);
String strDoc = "private:factory/swriter";
xComp = xCLoader.loadComponentFromURL(strDoc, "_blank", 0, properties);
*/
String strDoc = "private:factory/swriter";
xComp = xCLoader.loadComponentFromURL(strDoc, "_blank", 0, szEmptyArgs);
xDoc = (com.sun.star.text.XTextDocument)
UnoRuntime.queryInterface(com.sun.star.text.XTextDocument.class,
xComp);
} catch(Exception e){
System.err.println(" Exception " + e);
e.printStackTrace(System.err);
}
return xDoc;
}
}
I suppose your problem is in the "Name.Of.Macro": it must be: Library.Module.NameOfMacro.
"langauge=Basic" of course sets the language name, and "location=application" means the macro library should be searched in the opened document, and not in global OO libraries.
As far as parameters are involved, I use:
XScriptProviderSupplier xScriptPS = (XScriptProviderSupplier) UnoRuntime.queryInterface(XScriptProviderSupplier.class, xComponent);
XScriptProvider xScriptProvider = xScriptPS.getScriptProvider();
XScript xScript = xScriptProvider.getScript("vnd.sun.star.script:"+macroName);
short[][] aOutParamIndex = new short[1][1];
Object[][] aOutParam = new Object[1][1];
Object[] aParams = new String[2];
aParams[0] = myFirstParameterName;
aParams[1] = mySecondParameterName;
#SuppressWarnings("unused")
Object result = xScript.invoke(aParams, aOutParamIndex, aOutParam);
System.out.println("xScript invoke macro " + macroName);
Hope it can be useful, after such long time... :-(
XScriptProviderSupplier xScriptPS = (XScriptProviderSupplier) UnoRuntime.queryInterface(XScriptProviderSupplier.class, xComponent);
What is xComponent in the above code?
Compare: ?langauge=Basic&location=Document"
to: ?language=Basic&location=Document"
wring is: "langauge" :D, swap "au" to "ua". :)

Categories