FileOutputStream - java

public FileOutputStream(String name) throws FileNotFoundException {
this(name != null ? new File(name) : null, false);
}
public FileOutputStream(String name, boolean append)
throws FileNotFoundException
{
this(name != null ? new File(name) : null, append);
}
public FileOutputStream(File file) throws FileNotFoundException {
this(file, false);
}
public FileOutputStream(File file, boolean append)
throws FileNotFoundException
{
String name = (file != null ? file.getPath() : null);
SecurityManager security = System.getSecurityManager();
if (security != null) {
security.checkWrite(name);
}
if (name == null) {
throw new NullPointerException();
}
if (file.isInvalid()) {
throw new FileNotFoundException("Invalid file path");
}
this.fd = new FileDescriptor();
this.append = append;
this.path = name;
fd.incrementAndGetUseCount();
open(name, append);
}
public FileOutputStream(FileDescriptor fdObj) {
SecurityManager security = System.getSecurityManager();
if (fdObj == null) {
throw new NullPointerException();
}
if (security != null) {
security.checkWrite(fdObj);
}
this.fd = fdObj;
this.path = null;
this.append = false;
fd.incrementAndGetUseCount();
}
vate native void open(String name, boolean append)
throws FileNotFoundException;
private native void write(int b, boolean append) throws IOException;
public void write(int b) throws IOException {
Object traceContext = IoTrace.fileWriteBegin(path);
int bytesWritten = 0;
try {
write(b, append);
bytesWritten = 1;
} finally {
IoTrace.fileWriteEnd(traceContext, bytesWritten);
}
}
private native void writeBytes(byte b[], int off, int len, boolean append)
throws IOException;
public void write(byte b[]) throws IOException {
Object traceContext = IoTrace.fileWriteBegin(path);
int bytesWritten = 0;
try {
writeBytes(b, 0, b.length, append);
bytesWritten = b.length;
} finally {
IoTrace.fileWriteEnd(traceContext, bytesWritten);
}
}
public void close() throws IOException {
synchronized (closeLock) {
if (closed) {
return;
}
closed = true;
}
if (channel != null) {
nt useCount = fd.decrementAndGetUseCount();
if ((useCount <= 0) || !isRunningFinalize()) {
close0();
}
}
public final FileDescriptor getFD() throws IOException {
if (fd != null) return fd;
throw new IOException();
public FileChannel getChannel() {
synchronized (this) {
if (channel == null) {
channel = FileChannelImpl.open(fd, path, false, true, append, this);
fd.incrementAndGetUseCount();
}
return channel;
}
}
protected void finalize() throws IOException {
if (fd != null) {
if (fd == FileDescriptor.out || fd == FileDescriptor.err) {
flush();
} else {
runningFinalize.set(Boolean.TRUE);
try {
close();
} finally {
runningFinalize.set(Boolean.FALSE);
}
}
private native void close0() throws IOException;
private static native void initIDs();
static {
initIDs();
}
Stack Trace
java.io.FileNotFoundException: D:\webs\Softparam2\upload\1\1429175417820\cfm\CFM_test.xlsx (Le périphérique n’est pas prêt)
at java.io.FileOutputStream.open(Native Method)
at java.io.FileOutputStream.<init>(FileOutputStream.java:221)
at java.io.FileOutputStream.<init>(FileOutput)

FileNotFoundException is typically thrown when a file does not exist. I'd suggest you check the filepath.
As a general rule, it is a bad idea to use absolute filepaths. Instead, place the file required in the current working folder (can be found with System.getProperty("user.dir");) and simply call: FileInputStream("CFM_test.xlsx");

Related

Writing a Servlet Filter with Quarkus that modifies the response body

I am trying to write a servlet filter in Quarkus that modifies the response body.
Basically, I have followed the steps on following article to make sure I am not missing anything: https://medium.com/#sportans300/fiddling-with-httpresponses-in-java-2a269cd5a474
So, filter method looks like this:
public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain)
throws IOException, ServletException {
if (request instanceof HttpServletRequestImpl) {
String url = ((HttpServletRequestImpl) request).getRequestURL().toString();
HttpServletResponse httpResponse = (HttpServletResponse) response;
if (url.endsWith("/index.html")) {
ServletResponseWrapper wrappedResp = new ServletResponseWrapper((HttpServletResponse) response);
filterChain.doFilter(request, wrappedResp);
String respBody = wrappedResp.toString();
if (wrappedResp.getContentType().contains("text/html")) {
String msg = "<html><head></head><body>hello world</body></html>";
response.setContentLength(msg.length());
response.getWriter().write(msg);
}
else {
System.out.println("-- Just return original resonse");
response.getWriter().append(respBody);
}
return;
}
}
filterChain.doFilter(request, response);
}
And the ResponseWrapper like this:
public class ServletResponseWrapper extends HttpServletResponseWrapper {
private final ByteArrayOutputStream capture;
private ServletOutputStream output;
private PrintWriter writer;
public ServletResponseWrapper(HttpServletResponse response) throws IOException {
super(response);
capture = new ByteArrayOutputStream(response.getBufferSize());
}
#Override
public ServletOutputStream getOutputStream() {
if (writer != null) {
throw new IllegalStateException("getWriter() has already been called on this response.");
}
if (output == null) {
output = new ServletOutputStream() {
#Override
public void write(int b) throws IOException {
capture.write(b);
}
#Override
public void write(byte b[]) throws IOException {
capture.write(b);
}
#Override
public void write(byte b[], int off, int len) throws IOException {
capture.write(b, off, len);
}
#Override
public void flush() throws IOException {
capture.flush();
}
#Override
public void close() throws IOException {
capture.close();
}
#Override
public boolean isReady() {
return false;
}
#Override
public void setWriteListener(WriteListener arg0) {
}
};
}
return output;
}
#Override
public PrintWriter getWriter() throws IOException {
if (output != null) {
throw new IllegalStateException("getOutputStream() has already been called on this response.");
}
if (writer == null) {
writer = new PrintWriter(new OutputStreamWriter(capture, getCharacterEncoding()));
}
return writer;
}
public void close() throws IOException {
if (writer != null) {
writer.close();
}
if (output != null) {
output.close();
}
}
#Override
public void flushBuffer() throws IOException {
System.out.println("-- flush buffer");
if (writer != null) {
writer.flush();
} else if (output != null) {
output.flush();
}
}
public byte[] getResponseData() throws IOException {
if (writer != null) {
writer.close();
} else if (output != null) {
output.close();
}
return capture.toByteArray();
}
#Override
public String toString() {
try {
return new String(getResponseData());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
What is happening is that its seems like the response is already committed and I am not able to change the contentLength and the body anymore. Undertow seems to have already started to send the headers during filterChain.doFilter(request, wrappedResp).
The browser receives Content-Length set to the length of the original response and a body is not received at all (page keeps loading).
UPDATE: I tested the exact same webfilter on a Wildfly 19 and OpenLiberty; the filter is working as expected. So, the issue is specific to Quarkus it seems.
UPDATE 2: Seems to be an issue in quarkus:dev only. I have opened up an issue: https://github.com/quarkusio/quarkus/issues/8546

Modified HttpServletResponse in custom filter gets truncated to original response length

I have created a custom filter which modifies the content of an http response. The content replacement itself works ok, however when the content size is larger than the original response, it gets truncated to the same size as the original one whilst missing the remaining characters.
This is my custom filter:
public class MyCustomFilter extends OncePerRequestFilter{
#Override
protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain filterChain) throws ServletException, IOException {
String actionHeader = request.getHeader(RestApi.ACTION_HEADER_NAME);
if (actionHeader != null) {
String contentTypeValue = ContentTypeMap.getContentType(actionHeader);
if (contentTypeValue != null) {
try {
response.setHeader(RestApi.ACTION_HEADER_NAME, "TRUE");
log.debug("New Action - Action request detected: " + request.getRequestURL());
ActionRequestWrapper actionRequest = new ActionRequestWrapper(request, contentTypeValue);
ActionResponseWrapper actionResponse = new ActionResponseWrapper(response);
performActionRequest(actionRequest);
filterChain.doFilter(actionRequest, actionResponse);
byte[] originalResponseBody = actionResponse.getResponseData();
byte[] updatedBody = performAction(originalResponseBody);
OutputStream os = response.getOutputStream();
os.write(updatedBody);
os.flush();
os.close();
}
catch (Exception e) {
log.error("New Action - An error occurred while trying to perform action",e);
}
}
else {
filterChain.doFilter(request, response);
}
}
else {
filterChain.doFilter(request, response);
}
}
This is my ResponseWrapper:
public class ActionResponseWrapper extends HttpServletResponseWrapper {
private final ByteArrayOutputStream capture;
private ServletOutputStream output;
private PrintWriter writer;
public ActionResponseWrapper(HttpServletResponse response) throws IOException {
super(response);
capture = new ByteArrayOutputStream(response.getBufferSize());
}
#Override
public ServletOutputStream getOutputStream() {
if (writer != null) {
throw new IllegalStateException("getWriter() has already been called on this response.");
}
if (output == null) {
output = new ServletOutputStream() {
#Override
public void write(int b) throws IOException {
capture.write(b);
}
#Override
public void flush() throws IOException {
capture.flush();
}
#Override
public void close() throws IOException {
capture.close();
}
#Override
public boolean isReady() {
return false;
}
#Override
public void setWriteListener(WriteListener arg0) {
}
};
}
return output;
}
#Override
public PrintWriter getWriter() throws IOException {
if (output != null) {
throw new IllegalStateException("getOutputStream() has already been called on this response.");
}
if (writer == null) {
writer = new PrintWriter(new OutputStreamWriter(capture, getCharacterEncoding()));
}
return writer;
}
#Override
public void flushBuffer() throws IOException {
super.flushBuffer();
if (writer != null) {
writer.flush();
} else if (output != null) {
output.flush();
}
}
public byte[] getResponseData() throws IOException {
if (writer != null) {
writer.close();
} else if (output != null) {
output.close();
}
return capture.toByteArray();
}
}
This is my RequestWrapper:
public class ActionRequestWrapper extends HttpServletRequestWrapper {
private final String contentTypeValue;
private final String contentLengthValue;
private byte[] body;
public ActionRequestWrapper(HttpServletRequest request, String contentType) throws IOException{
super(request);
contentTypeValue = contentType;
StringBuilder _body = new StringBuilder();
try (BufferedReader bufferedReader = request.getReader()) {
String line;
while ((line = bufferedReader.readLine()) != null)
_body.append(line);
}
this.body = _body.toString().getBytes();
contentLengthValue = String.valueOf(body.length);
}
#Override
public String getHeader(String name) {
if (name.equalsIgnoreCase(CONTENT_TYPE_HEADER_NAME)) {
return contentTypeValue;
}
else if (name.equalsIgnoreCase(CONTENT_LENGTH_HEADER_NAME)) {
return contentLengthValue;
}
return super.getHeader(name);
}
#Override
public Enumeration<String> getHeaders(String headerName) {
if (headerName.equalsIgnoreCase(CONTENT_TYPE_HEADER_NAME)) {
return Collections.enumeration(Collections.singletonList(contentTypeValue));
}
else if (headerName.equalsIgnoreCase(CONTENT_LENGTH_HEADER_NAME)) {
return Collections.enumeration(Collections.singletonList(contentLengthValue));
}
return super.getHeaders(headerName);
}
#Override
public Enumeration<String> getHeaderNames(){
Enumeration<String> original = super.getHeaderNames();
List<String> newHeaders = new ArrayList<String>(){
#Override
public boolean contains(Object o){
String paramStr = (String)o;
for (String headerName : this){
if (headerName.equalsIgnoreCase(paramStr)) return true;
}
return false;
}
};
newHeaders.addAll(Collections.list(original));
if (!newHeaders.contains(CONTENT_TYPE_HEADER_NAME)){
newHeaders.add(CONTENT_TYPE_HEADER_NAME);
}
if (!newHeaders.contains(CONTENT_LENGTH_HEADER_NAME)){
newHeaders.add(CONTENT_LENGTH_HEADER_NAME);
}
return Collections.enumeration(newHeaders);
}
#Override
public ServletInputStream getInputStream() throws IOException {
final ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(body);
return new ServletInputStream() {
#Override
public boolean isFinished() {
return byteArrayInputStream.available() > 0;
}
#Override
public boolean isReady() {
return true;
}
#Override
public void setReadListener(ReadListener listener) {
}
#Override
public int read() throws IOException {
return byteArrayInputStream.read();
}
};
}
#Override
public BufferedReader getReader() throws IOException {
return new BufferedReader(new InputStreamReader(this.getInputStream()));
}
public byte[] getBody() {
return body;
}
public void setBody(byte[] body) {
this.body = body;
}
}
I have created a filter registration class as well where I have registered my filter beans and configured their order. Everything works fine except for the fact that the updated body of my response is truncated to the number of bytes my original response had.
I was able to make this work using ControllerAdvice and ResponseBodyAdvice but since I prefer using the filter, what I want to know is why is my updated response body getting truncated, and is it possible to fix this using filters? I have a feeling this has to do with me upgrading from org.apache.tomcat.embed:tomcat-embed-*-8.5.4 to org.apache.tomcat.embed:tomcat-embed-*-8.5.20 but if this is indeed the case I want to know what changed?
I am using Spring Boot Version 1.5.7.
Appreciate the help!
UPDATE:
Using ControllerAdvice and ResponseBodyAdvice makes my solution much more complicated since my body is not necessarily a String, and defining my body as an Object will require too much modification to my current existing code. After digging a little deeper I found that the Content-Length header in the Http11Processor class from package org.apache.coyote.http11 is set to the size of the original response right before the response commit and that is why my new response is getting truncated:
Http11Processor.java:
#Override
protected final void prepareResponse() throws IOException {
boolean entityBody = true;
contentDelimitation = false;
OutputFilter[] outputFilters = outputBuffer.getFilters();
if (http09 == true) {
// HTTP/0.9
outputBuffer.addActiveFilter(outputFilters[Constants.IDENTITY_FILTER]);
outputBuffer.commit();
return;
}
int statusCode = response.getStatus();
if (statusCode < 200 || statusCode == 204 || statusCode == 205 ||
statusCode == 304) {
// No entity body
outputBuffer.addActiveFilter
(outputFilters[Constants.VOID_FILTER]);
entityBody = false;
contentDelimitation = true;
if (statusCode == 205) {
// RFC 7231 requires the server to explicitly signal an empty
// response in this case
response.setContentLength(0);
} else {
response.setContentLength(-1);
}
}
MessageBytes methodMB = request.method();
if (methodMB.equals("HEAD")) {
// No entity body
outputBuffer.addActiveFilter
(outputFilters[Constants.VOID_FILTER]);
contentDelimitation = true;
}
// Sendfile support
if (endpoint.getUseSendfile()) {
prepareSendfile(outputFilters);
}
// Check for compression
boolean isCompressible = false;
boolean useCompression = false;
if (entityBody && (compressionLevel > 0) && sendfileData == null) {
isCompressible = isCompressible();
if (isCompressible) {
useCompression = useCompression();
}
// Change content-length to -1 to force chunking
if (useCompression) {
response.setContentLength(-1);
}
}
MimeHeaders headers = response.getMimeHeaders();
// A SC_NO_CONTENT response may include entity headers
if (entityBody || statusCode == HttpServletResponse.SC_NO_CONTENT) {
String contentType = response.getContentType();
if (contentType != null) {
headers.setValue("Content-Type").setString(contentType);
}
String contentLanguage = response.getContentLanguage();
if (contentLanguage != null) {
headers.setValue("Content-Language")
.setString(contentLanguage);
}
}
long contentLength = response.getContentLengthLong();
boolean connectionClosePresent = false;
if (contentLength != -1) {
headers.setValue("Content-Length").setLong(contentLength);
outputBuffer.addActiveFilter
(outputFilters[Constants.IDENTITY_FILTER]);
contentDelimitation = true;
} else {
// If the response code supports an entity body and we're on
// HTTP 1.1 then we chunk unless we have a Connection: close header
connectionClosePresent = isConnectionClose(headers);
if (entityBody && http11 && !connectionClosePresent) {
outputBuffer.addActiveFilter
(outputFilters[Constants.CHUNKED_FILTER]);
contentDelimitation = true;
headers.addValue(Constants.TRANSFERENCODING).setString(Constants.CHUNKED);
} else {
outputBuffer.addActiveFilter
(outputFilters[Constants.IDENTITY_FILTER]);
}
}
if (useCompression) {
outputBuffer.addActiveFilter(outputFilters[Constants.GZIP_FILTER]);
headers.setValue("Content-Encoding").setString("gzip");
}
// If it might be compressed, set the Vary header
if (isCompressible) {
// Make Proxies happy via Vary (from mod_deflate)
MessageBytes vary = headers.getValue("Vary");
if (vary == null) {
// Add a new Vary header
headers.setValue("Vary").setString("Accept-Encoding");
} else if (vary.equals("*")) {
// No action required
} else {
// Merge into current header
headers.setValue("Vary").setString(
vary.getString() + ",Accept-Encoding");
}
}
// Add date header unless application has already set one (e.g. in a
// Caching Filter)
if (headers.getValue("Date") == null) {
headers.addValue("Date").setString(
FastHttpDateFormat.getCurrentDate());
}
// FIXME: Add transfer encoding header
if ((entityBody) && (!contentDelimitation)) {
// Mark as close the connection after the request, and add the
// connection: close header
keepAlive = false;
}
// This may disabled keep-alive to check before working out the
// Connection header.
checkExpectationAndResponseStatus();
// If we know that the request is bad this early, add the
// Connection: close header.
if (keepAlive && statusDropsConnection(statusCode)) {
keepAlive = false;
}
if (!keepAlive) {
// Avoid adding the close header twice
if (!connectionClosePresent) {
headers.addValue(Constants.CONNECTION).setString(
Constants.CLOSE);
}
} else if (!http11 && !getErrorState().isError()) {
headers.addValue(Constants.CONNECTION).setString(Constants.KEEPALIVE);
}
// Add server header
if (server == null) {
if (serverRemoveAppProvidedValues) {
headers.removeHeader("server");
}
} else {
// server always overrides anything the app might set
headers.setValue("Server").setString(server);
}
// Build the response header
try {
outputBuffer.sendStatus();
int size = headers.size();
for (int i = 0; i < size; i++) {
outputBuffer.sendHeader(headers.getName(i), headers.getValue(i));
}
outputBuffer.endHeaders();
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
// If something goes wrong, reset the header buffer so the error
// response can be written instead.
outputBuffer.resetHeaderBuffer();
throw t;
}
outputBuffer.commit();
}
Is there a way I can somehow change the value of the Content-Length header to the length of my new response?
In your ActionResponseWrapper change the following line
capture = new ByteArrayOutputStream(response.getBufferSize());
to
capture = new ByteArrayOutputStream();
Because by providing the size to ByteArrayOutputStream you are restricting its size to original Response content length. Because of this ActionResponseWrapper.getResponseData will return only up to original response length.
Try:
actionResponse.reset();
os.flush();
os.write(updatedBody);
os.close();
Then it won't truncate anymore.

Pipe Broken with PipeInputStream with kubernetes-client exec()

I'm using the kubernetes-client to try copy a directory from a pod, but I'm doing something wrong with the input stream from stdout. I get a java.io.IOException: Pipe broken exception when it tries to read(). I'm pretty sure that no data flows at all. I'm half wondering if I need to read the InputStream on a separate thread or something?
The stream is created like this:
public InputStream copyFiles(String containerId,
String folderName) {
ExecWatch exec = client.pods().withName(containerId).redirectingOutput().exec("tar -C " + folderName + " -c");
// We need to wrap the InputStream so that when the stdout is closed, then the underlying ExecWatch is closed
// also. This will cleanup any Websockets connections.
ChainedCloseInputStreamWrapper inputStreamWrapper = new ChainedCloseInputStreamWrapper(exec.getOutput(), exec);
return inputStreamWrapper;
}
And the InputStream is processed in this function
void copyVideos(final String containerId) {
TarArchiveInputStream tarStream = new TarArchiveInputStream(containerClient.copyFiles(containerId, "/videos/"));
TarArchiveEntry entry;
boolean videoWasCopied = false;
try {
while ((entry = tarStream.getNextTarEntry()) != null) {
if (entry.isDirectory()) {
continue;
}
String fileExtension = entry.getName().substring(entry.getName().lastIndexOf('.'));
testInformation.setFileExtension(fileExtension);
File videoFile = new File(testInformation.getVideoFolderPath(), testInformation.getFileName());
File parent = videoFile.getParentFile();
if (!parent.exists()) {
parent.mkdirs();
}
OutputStream outputStream = new FileOutputStream(videoFile);
IOUtils.copy(tarStream, outputStream);
outputStream.close();
videoWasCopied = true;
LOGGER.log(Level.INFO, "{0} Video file copied to: {1}/{2}", new Object[]{getId(),
testInformation.getVideoFolderPath(), testInformation.getFileName()});
}
} catch (IOException e) {
LOGGER.log(Level.WARNING, getId() + " Error while copying the video", e);
ga.trackException(e);
} finally {
if (!videoWasCopied) {
testInformation.setVideoRecorded(false);
}
}
}
The InputStream Wrapper class is just there to close the ExecWatch at the end once the InputStream is closed, it looks like this:
private static class ChainedCloseInputStreamWrapper extends InputStream {
private InputStream delegate;
private Closeable resourceToClose;
public ChainedCloseInputStreamWrapper(InputStream delegate, Closeable resourceToClose) {
this.delegate = delegate;
this.resourceToClose = resourceToClose;
}
#Override
public int read() throws IOException {
return delegate.read();
}
public int available() throws IOException {
return delegate.available();
}
public void close() throws IOException {
logger.info("Shutdown called!");
delegate.close();
// Close our dependent resource
resourceToClose.close();
}
public boolean equals(Object o) {
return delegate.equals(o);
}
public int hashCode() {
return delegate.hashCode();
}
public int read(byte[] array) throws IOException {
return delegate.read(array);
}
public int read(byte[] array,
int n,
int n2) throws IOException {
return delegate.read(array, n, n2);
}
public long skip(long n) throws IOException {
return delegate.skip(n);
}
public void mark(int n) {
delegate.mark(n);
}
public void reset() throws IOException {
delegate.reset();
}
public boolean markSupported() {
return delegate.markSupported();
}
public String toString() {
return delegate.toString();
}
}
Turns out I had the tar command wrong, so it was causing a failure and the stdout PipeInputStream was dead locking. I managed to find a workaround for the deadlock. But the main reason for the failure was that I forgot to tell tar to actually do something! I at least needed a "." to include the current directory.

Parquet Writer to buffer or byte stream

I have a java application that converts json messages to parquet format. Is there any parquet writer which writes to buffer or byte stream in java? Most of the examples, I have seen write to files.
TLDR; you will need to implement OutputFile, e.g. something along the line of:
import org.apache.parquet.io.OutputFile;
import org.apache.parquet.io.PositionOutputStream;
import java.io.BufferedOutputStream;
import java.io.IOException;
public class ParquetBufferedWriter implements OutputFile {
private final BufferedOutputStream out;
public ParquetBufferedWriter(BufferedOutputStream out) {
this.out = out;
}
#Override
public PositionOutputStream create(long blockSizeHint) throws IOException {
return createPositionOutputstream();
}
private PositionOutputStream createPositionOutputstream() {
return new PositionOutputStream() {
#Override
public long getPos() throws IOException {
return 0;
}
#Override
public void write(int b) throws IOException {
out.write(b);
}
};
}
#Override
public PositionOutputStream createOrOverwrite(long blockSizeHint) throws IOException {
return createPositionOutputstream();
}
#Override
public boolean supportsBlockSize() {
return false;
}
#Override
public long defaultBlockSize() {
return 0;
}
}
And your writer would be something like:
ParquetBufferedWriter out = new ParquetBufferedWriter();
try (ParquetWriter<Record> writer = AvroParquetWriter.
<Record>builder(out)
.withRowGroupSize(DEFAULT_BLOCK_SIZE)
.withPageSize(DEFAULT_PAGE_SIZE)
.withSchema(SCHEMA)
.build()) {
for (Record record : records) {
writer.write(record);
}
} catch (IOException e) {
throw new IllegalStateException(e);
}
I just also needed to write to a stream, so I completed the example given by naimdjon. The following works perfectly fine for me.
class ParquetBufferedWriter implements OutputFile {
private final BufferedOutputStream out;
public ParquetBufferedWriter(BufferedOutputStream out) {
this.out = out;
}
#Override
public PositionOutputStream create(long blockSizeHint) throws IOException {
return createPositionOutputstream();
}
private PositionOutputStream createPositionOutputstream() {
return new PositionOutputStream() {
int pos = 0;
#Override
public long getPos() throws IOException {
return pos;
}
#Override
public void flush() throws IOException {
out.flush();
};
#Override
public void close() throws IOException {
out.close();
};
#Override
public void write(int b) throws IOException {
out.write(b);
pos++;
}
#Override
public void write(byte[] b, int off, int len) throws IOException {
out.write(b, off, len);
pos += len;
}
};
}
#Override
public PositionOutputStream createOrOverwrite(long blockSizeHint) throws IOException {
return createPositionOutputstream();
}
#Override
public boolean supportsBlockSize() {
return false;
}
#Override
public long defaultBlockSize() {
return 0;
}
}
You need to write the data into temp file and then covert the data from file to input stream or buffer
something like this, first read the tempfile data
final InputStream targetStream = new DataInputStream(new FileInputStream(tmp1.getAbsoluteFile()));
StringWriter writer = new StringWriter();
String encoding = StandardCharsets.UTF_8.name();
IOUtils.copy(targetStream, writer, encoding);
System.out.println(writer);

Run JTape Library on Java (java library path)

I am trying to use the JTape Library to read some data from a DDS4 magnetic tape.
I want to use eclipse to run my code under Linux 12.04 LTS
The problem is that I cannot let eclipse reference the TapeLinux.c Library in any way.
PROBLEM:
Exception in thread "main" java.lang.UnsatisfiedLinkError: no TapeLinux in java.library.path
at java.lang.ClassLoader.loadLibrary(ClassLoader.java:1886)
at java.lang.Runtime.loadLibrary0(Runtime.java:849)
at java.lang.System.loadLibrary(System.java:1088)
at BasicTapeDevice.<clinit>(BasicTapeDevice.java:169)
at TestEOD.main(TestEOD.java:12)
This are my classes:
/* TestEOD.java */
import java.io.*;
public class TestEOD {
public static void main(String[] args) throws IOException {
/* if (args.length != 1) {
System.err.println("Usage: java TestEOD <path to device>");
System.exit(1);
}*/
BasicTapeDevice d = new BasicTapeDevice("/dev/nst0");
System.out.print("Rewinding...");
System.out.flush();
d.rewind();
System.out.println("done!");
System.out.print("Spacing to end of data...");
System.out.flush();
d.spaceEOD();
System.out.println("done!");
}
}
/* BasicTapeDevice.java */
import java.io.*;
public class BasicTapeDevice {
private FileDescriptor fd;
private InputStream in;
private OutputStream out;
private boolean eof;
private boolean eom;
private boolean ignoreEOM;
public BasicTapeDevice(String pathName) throws IOException {
fd = new FileDescriptor();
tapeOpen(pathName);
in = new TapeInputStream();
out = new TapeOutputStream();
eof = false;
eom = false;
ignoreEOM = false;
}
public synchronized void close() throws IOException {
if (fd != null) {
try {
if (fd.valid()) {
tapeClose();
}
} finally {
fd = null;
}
}
}
public InputStream getInputStream() throws IOException {
ensureOpen();
return in;
}
public OutputStream getOutputStream() throws IOException {
ensureOpen();
return out;
}
public int getBlockSize() throws IOException {
ensureOpen();
return tapeGetBlockSize();
}
public void setBlockSize(int bs) throws IOException {
ensureOpen();
tapeSetBlockSize(bs);
}
public void rewind() throws IOException {
ensureOpen();
tapeRewind();
}
public void spaceEOD() throws IOException {
ensureOpen();
tapeSpaceEOD();
}
public void clearEOF() throws IOException {
ensureOpen();
if (eof) {
eof = false;
/* assume that the file mark has already been skipped */
} else {
throw new IOException("not at end of file");
}
}
public void clearEOM() throws IOException {
ensureOpen();
if (eom) {
ignoreEOM = true;
} else {
throw new IOException("not at logical end of media");
}
}
class TapeInputStream extends InputStream {
private byte[] temp = new byte[1];
public int read() throws IOException {
int n = read(temp, 0, 1);
if (n <= 0) {
return -1;
}
return temp[0] & 0xff;
}
public int read(byte[] b, int off, int len) throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off+len > b.length) {
throw new IndexOutOfBoundsException();
}
if (len == 0) {
return 0;
}
if (eof) {
return -1;
}
ensureOpen();
int n = tapeRead(b, off, len);
if (n <= 0) {
return -1;
}
return n;
}
public long skip(long numbytes) throws IOException {
return 0;
}
public void close() throws IOException {
BasicTapeDevice.this.close();
}
}
class TapeOutputStream extends OutputStream {
private byte[] temp = new byte[1];
public void write(int b) throws IOException {
temp[0] = (byte) b;
write(temp, 0, 1);
}
public void write(byte[] b) throws IOException {
write(b, 0, b.length);
}
public void write(byte[] b, int off, int len) throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off+len > b.length) {
throw new IndexOutOfBoundsException();
}
if (eom && !ignoreEOM) {
throw new LogicalEOMException("logical end-of-media");
}
int n = tapeWrite(b, off, len);
while (n < len) {
n += tapeWrite(b, off + n, len - n);
}
}
public void close() throws IOException {
BasicTapeDevice.this.close();
}
}
protected void finalize() {
try {
close();
} catch (IOException ex) {
}
}
private void ensureOpen() throws IOException {
if (fd == null || !fd.valid()) {
throw new IOException("tape device is not open");
}
}
private static native void initFields();
private native void tapeOpen(String pathName) throws IOException;
private native void tapeClose() throws IOException;
private native int tapeRead(byte[] b, int off, int len) throws IOException;
private native int tapeWrite(byte[] b, int off, int len) throws IOException;
private native int tapeGetBlockSize() throws IOException;
private native void tapeSetBlockSize(int bs) throws IOException;
private native void tapeRewind() throws IOException;
private native void tapeSpaceEOD() throws IOException;
/* load the JNI library specific for this platform */
static {
StringBuffer buf = new StringBuffer("Tape");
String osName = System.getProperty("os.name");
if (osName.equals("Windows NT") || osName.equals("Windows 2000")) {
buf.append("WinNT");
} else {
buf.append(osName);
}
System.loadLibrary(buf.toString());
initFields();
}
}
WHAT I HAVE TRIED
I have looked around that what I need is to include the path of the folder which contains the file TapeLinux.c
I have tried all these answers and nothing change
Can you please help me to figure out how I can use JNI in this situation and what I should configure to run my code
Thanks
Since you are on Linux, the message ...main java.lang.UnsatisfiedLinkError: no TapeLinux in java... likely means the library named libTapeLinux.so could not be found.
Seems you are searching for a solution by trying to locate TapeLinux.c and you should be searching for libTapeLinux.so and once you find it make sure libTapeLinux.so is on the load path.

Categories