In this example files reader the solution focuses on just reading any file any file and loading it into the memory.
I've been working on it to improve it so it processes a csv file with keeping the header in each thread, so each thread can output a separate and a correctly-formatted csv file.
Unfortunately I'm not able to do so since it reads from random locations (lines), this means it might read from the middle of the line and I'll get lines mixed up.
Is there a way to utilize this code and make is csv specific?
Here is the code I changed:
public static void main(String[] args) throws IOException {
long start = System.currentTimeMillis();
CSVReader reader = new CSVReader(new FileReader("file.csv"));
String[] columnsNames = reader.readNext();
reader.close();
FileInputStream fileInputStream = new FileInputStream("file.csv");
FileChannel channel = fileInputStream.getChannel();
long remaining_size = channel.size(); //get the total number of bytes in the file
long chunk_size = remaining_size / Integer.parseInt("4"); //file_size/threads
//Max allocation size allowed is ~2GB
if (chunk_size > (Integer.MAX_VALUE - 5))
{
chunk_size = (Integer.MAX_VALUE - 5);
}
//thread pool
ExecutorService executor = Executors.newFixedThreadPool(Integer.parseInt("4"));
long start_loc = 0;//file pointer
int i = 0; //loop counter
while (remaining_size >= chunk_size)
{
//launches a new thread
executor.execute(new FileRead(start_loc, toIntExact(chunk_size), channel, i, String.join(",", columnsNames)));
remaining_size = remaining_size - chunk_size;
start_loc = start_loc + chunk_size;
i++;
}
//load the last remaining piece
executor.execute(new FileRead(start_loc, toIntExact(remaining_size), channel, i, String.join(",", columnsNames)));
//Tear Down
executor.shutdown();
//Wait for all threads to finish
while (!executor.isTerminated())
{
//wait for infinity time
}
System.out.println("Finished all threads");
fileInputStream.close();
long finish = System.currentTimeMillis();
System.out.println( "Time elapsed: " + (finish - start) );
}
class FileRead implements Runnable {
private FileChannel _channel;
private long _startLocation;
private int _size;
int _sequence_number;
String _columns;
public FileRead(long loc, int size, FileChannel chnl, int sequence, String header) {
_startLocation = loc;
_size = size;
_channel = chnl;
_sequence_number = sequence;
_columns = header;
}
#Override
public void run() {
try {
System.out.println( "Reading the channel: " + _startLocation + ":" + _size );
//allocate memory
ByteBuffer buff = ByteBuffer.allocate( _size );
//Read file chunk to RAM
_channel.read( buff, _startLocation );
//chunk to String
String string_chunk = new String( buff.array(), Charset.forName( "UTF-8" ) );
string_chunk = _columns + System.getProperty( "line.separator" ) + string_chunk;
if (string_chunk.length() > 0) {
BufferedWriter out = new BufferedWriter( new FileWriter( "output_" + System.currentTimeMillis() + ".csv" ) );
try {
out.write( string_chunk ); //Replace with the string
//you are trying to write
} catch (IOException e) {
System.out.println( "Exception " );
} finally {
out.close();
}
}
System.out.println( "Done Reading the channel: " + _startLocation + ":" + _size );
} catch (Exception e) {
e.printStackTrace();
}
}
}
Related
I'm trying to write software that sends a set of data (a portion of a video game) in different formats (chunked, compressed, raw), and measures the speed between each. However, I'm running into an issue while sorting out the CHUNKED method. I've found that, when reading byte array sizes of more than 140000 bytes, the client starts to only read up to around 131072, no matter how much bigger the array actually is. Is there a reason for this, or potentially a better way to do this? My code is shown below. I'm using the read() method of DataInputStream (and its return value).
SERVER
/**
*
* #return Time taken to complete transfer.
*/
public int start(String mode, int length) throws IOException, InterruptedException {
if(mode.equals("RAW")){
byte[] all = new ByteCollector(ServerMain.FILES, length).collect();
output.writeUTF("SENDING " + mode + " " + all.length);
expect("RECEIVING " + mode);
long start = System.currentTimeMillis();
echoSend(all);
return (int) (System.currentTimeMillis() - start);
}else if(mode.equals("CHUNKED")){ /*the important part*/
//split into chunks
byte[] all = new ByteCollector(ServerMain.FILES, length).collect();
int chunks = maxChunks(all);
output.writeUTF("SENDING " + mode + " " + chunks);
System.out.println("Expecting RECEIVING " + chunks + "...");
expect("RECEIVING " + chunks);
int ms = 0;
for(int i = 0; i<chunks; i++){
byte[] currentChunk = getChunk(i, all);
System.out.println("My chunk length is " + currentChunk.length);
long start = System.currentTimeMillis();
System.out.println("Sending...");
echoSend(currentChunk);
ms += System.currentTimeMillis() - start;
}
if(chunks == 0) expect("0"); //still need to confirm, even though no data was sent
return ms;
}else if(mode.equals("COMPRESSED")){
byte[] compressed = new ByteCollector(ServerMain.FILES, length).collect();
compressed = ExperimentUtils.compress(compressed);
output.writeUTF("SENDING " + mode + " " + compressed.length);
expect("RECEIVING " + mode);
long start = System.currentTimeMillis();
echoSend(compressed, length);
return (int) (System.currentTimeMillis() - start);
}
return -1;
}
public static void main(String[] args) throws IOException,InterruptedException{
FILES = Files.walk(Paths.get(DIRECTORY)).filter(Files::isRegularFile).toArray(Path[]::new);
SyncServer server = new SyncServer(new ServerSocket(12222).accept());
System.out.println("--------[CH UNK ED]--------");
short[] chunkedSpeeds = new short[FOLDER_SIZE_MB + 1/*for "zero" or origin*/];
for(int i = 0; i<=FOLDER_SIZE_MB; i++){
chunkedSpeeds[i] = (short) server.start("CHUNKED", i * MB);
System.out.println(i + "MB, Chunked: " + chunkedSpeeds[i]);
}
short[] compressedSpeeds = new short[FOLDER_SIZE_MB + 1];
for(int i = 0; i<=FOLDER_SIZE_MB; i++){
compressedSpeeds[i] = (short) server.start("COMPRESSED", i * MB);
}
short[] rawSpeeds = new short[FOLDER_SIZE_MB + 1];
for(int i = 0; i<=FOLDER_SIZE_MB; i++){
rawSpeeds[i] = (short) server.start("RAW", i * MB);
}
System.out.println("Raw speeds: " + Arrays.toString(rawSpeeds));
System.out.println("\n\nCompressed speeds: " + Arrays.toString(compressedSpeeds));
System.out.println("\n\nChunked speeds: " + Arrays.toString(chunkedSpeeds));
}
CLIENT
public static void main(String[] args) throws IOException, InterruptedException {
Socket socket = new Socket("localhost", 12222);
DataInputStream input = new DataInputStream(socket.getInputStream());
DataOutputStream output = new DataOutputStream(socket.getOutputStream());
while(socket.isConnected()){
String response = input.readUTF();
String[] content = response.split(" ");
if(response.startsWith("SENDING CHUNKED")){
int chunks = Integer.parseInt(content[2]);
System.out.println("Read chunk amount of " + chunks);
output.writeUTF("RECEIVING " + chunks);
for(int i = 0; i<chunks; i++){
byte[] chunk = new byte[32 * MB];
System.out.println("Ready to receive...");
int read = input.read(chunk);
System.out.println("Echoing read length of " + read);
output.writeUTF(String.valueOf(read));
}
if(chunks == 0) output.writeUTF("0");
}else if(response.startsWith("SENDING COMPRESSED")){
byte[] compressed = new byte[Integer.parseInt(content[2])];
output.writeUTF("RECEIVING " + compressed.length);
input.read(compressed);
decompress(compressed);
output.writeInt(decompress(compressed).length);
}else if(response.startsWith("SENDING RAW")){
int length = Integer.parseInt(content[2]);
output.writeUTF("RECEIVING " + length);
byte[] received = new byte[length];
input.read(received);
output.writeInt(received.length);
}
}
}
public static byte[] decompress(byte[] in) throws IOException {
try {
ByteArrayOutputStream out = new ByteArrayOutputStream();
InflaterOutputStream infl = new InflaterOutputStream(out);
infl.write(in);
infl.flush();
infl.close();
return out.toByteArray();
} catch (Exception e) {
System.out.println("Error decompressing byte array with length " + in.length);
throw e;
}
}
Using SDK 17
I tried switching around the byte amount, and found the cutoff was right where I stated above. I even replicated this in a test client/server project with no frills (find that here, and found that the cutoff was even lower! I really hope this isn't an actual issue with Java...
The read() method of DataInputStream doesn't directly correspond to the write() method of DataOutputStream. If you want to know how many bytes were sent in a single method call, the server has to inform the client manually.
This is because the read() method, since it doesn't depend on a set length, treats its process as completed when some bytes are read, as it has no way of knowing how many you want.
I want to read a large file in a very fast way. I am using MappedByteBuffer like this:
String line = "";
try (RandomAccessFile file2 = new RandomAccessFile(new File(filename), "r"))
{
FileChannel fileChannel = file2.getChannel();
MappedByteBuffer buffer = fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, fileChannel.size());
for (int i = 0; i < buffer.limit(); i++)
{
char a = (char) buffer.get();
if (a == '\n'){
System.out.println(line);
line = "";
}else{
line += Character.toString(c);
}
}
This is not working correctly. It is changing the content of the file and printing the changed content. Is there a better way to read a line of a file with MappedByteBuffer?
Eventually I would like to split the line and extract certain content (since its csv) so this is just a minimal example that reproduces the problem.
I made some tests using a 21 GB file filled with random strings, each line had a length of 20-40 characters.
It seems like the builtin BufferedReader is still the fastest method.
File f = new File("sfs");
try(Stream<String> lines = Files.lines(f.toPath(), StandardCharsets.UTF_8)){
lines.forEach(line -> System.out.println(line));
} catch (IOException e) {}
Reading the lines to a stream ensures you read the lines as you need them instead of reading the entire file at once.
To improve speed even further you can increase the buffer size of the BufferedReader by a moderate factor. In my tests it starter to outperform the normal buffer size at about 10 millions lines.
CharsetDecoder decoder = StandardCharsets.UTF_8.newDecoder();
int size = 8192 * 16;
try (BufferedReader br = new BufferedReader(new InputStreamReader(newInputStream(f.toPath()), decoder), size)) {
br.lines().limit(LINES_TO_READ).forEach(s -> {
});
} catch (IOException e) {
e.printStackTrace();
}
The code I used for testing:
private static long LINES_TO_READ = 10_000_000;
private static void java8Stream(File f) {
long startTime = System.nanoTime();
try (Stream<String> lines = Files.lines(f.toPath(), StandardCharsets.UTF_8).limit(LINES_TO_READ)) {
lines.forEach(line -> {
});
} catch (IOException e) {
e.printStackTrace();
}
long endTime = System.nanoTime();
System.out.println("no buffer took " + (endTime - startTime) + " nanoseconds");
}
private static void streamWithLargeBuffer(File f) {
long startTime = System.nanoTime();
CharsetDecoder decoder = StandardCharsets.UTF_8.newDecoder();
int size = 8192 * 16;
try (BufferedReader br = new BufferedReader(new InputStreamReader(newInputStream(f.toPath()), decoder), size)) {
br.lines().limit(LINES_TO_READ).forEach(s -> {
});
} catch (IOException e) {
e.printStackTrace();
}
long endTime = System.nanoTime();
System.out.println("using large buffer took " + (endTime - startTime) + " nanoseconds");
}
private static void memoryMappedFile(File f) {
CharsetDecoder decoder = StandardCharsets.UTF_8.newDecoder();
long linesReadCount = 0;
String line = "";
long startTime = System.nanoTime();
try (RandomAccessFile file2 = new RandomAccessFile(f, "r")) {
FileChannel fileChannel = file2.getChannel();
MappedByteBuffer buffer = fileChannel.map(FileChannel.MapMode.READ_ONLY, 0L, Integer.MAX_VALUE - 10_000_000);
CharBuffer decodedBuffer = decoder.decode(buffer);
for (int i = 0; i < decodedBuffer.limit(); i++) {
char a = decodedBuffer.get();
if (a == '\n') {
line = "";
} else {
line += Character.toString(a);
}
if (linesReadCount++ >= LINES_TO_READ){
break;
}
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
long endTime = System.nanoTime();
System.out.println("using memory mapped files took " + (endTime - startTime) + " nanoseconds");
}
Btw I noticed that FileChannel.map throws an exception if the mapped file is larger than Integer.MAX_VALUE which makes the method impractical for reading very large files.
Hello My Respected Seniors :)
My Goal: Download a URL Resource, given a URL, by using Multi-Threading in Java, i.e. download a single file into multiple pieces (much like how IDM does) & at the end of download, combine all of them to 1 final file.
Technology Using: Java, RandomAccessFile, MultiThreading, InputStreams
Problem:
The file is downloaded fine with exact KB size, I've checked many times, but the final file is corrupted. For example, If I download an Image, it will be somewhat blurry, If I download an .exe, it downloads fine but when I run the .exe file, it says "media is damaged, retry download".
This is my Main code from which I call to thread class with parameters such as fileName, starting Range and ending Range for a connection as well as a JProgressBar for every thread which will update its own respectively.
public void InitiateDownload()
{
HttpURLConnection uc = (HttpURLConnection) url.openConnection();
uc.connect();
long fileSize = uc.getContentLengthLong();
System.out.println("File Size = "+ fileSize );
uc.disconnect();
chunkSize = (long) Math.ceil(fileSize/6);
startFrom = 0;
endRange = (startFrom + chunkSize) - 1;
Thread t1 = new MyThread(url, fileName, startFrom, endRange, progressBar_1);
t1.start();
//-----------------------------------------
startFrom += chunkSize;
endRange = endRange + chunkSize;
System.out.println("Part 2 :: Start = " + startFrom + "\tEnd To = " + endRange );
Thread t2 = new MyThread(url, fileName, startFrom, endRange, progressBar_2);
t2.start();
//-----------------------------------------
//..
//..
//..
//-----------------------------------------
startFrom += chunkSize;
long temp = endRange + chunkSize;
endRange = temp + (fileSize - temp); //add any remaining bits, that were rounded off in division
Thread t6 = new MyThread(url, fileName, startFrom, endRange, progressBar_6);
t6.start();
//-----------------------------------------
}
Here is run() function of MyThread class:
public void run() {
Thread.currentThread().setPriority(MAX_PRIORITY);
System.setProperty("http.proxyHost", "192.168.10.50");
System.setProperty("http.proxyPort", "8080");
HttpURLConnection uc = null;
try {
uc = (HttpURLConnection) url.openConnection();
uc.setRequestProperty("Range", "bytes="+startFrom+"-"+range);
uc.connect();
fileSize = uc.getContentLengthLong();
inStream = uc.getInputStream();
int[] buffer = new int[ (int) totalDownloadSize ];
file.seek(startFrom); //adjusted start of file
THIS IS WHERE I THINK THE PROBLEM IS,
run() continued...
for(int i = 0 ; i < totalDownloadSize; i++)
{
buffer[i] = inStream.read();
file.write(buffer[i]);
//Updating Progress bars
totalDownloaded = totalDownloaded + 1;
int downloaded = (int) (100 * ( totalDownloaded/ (float) totalDownloadSize)) ;
progressbar.setValue( downloaded );
}
System.err.println( Thread.currentThread().getName() + "'s download is Finished!");
uc.disconnect();
}
catch(IOException e) {
System.err.println("Exception in " + Thread.currentThread().getName() + "\t Exception = " + e );
}
finally {
try {
file.close();
if(inStream!=null)
inStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
Now the file is downloaded with complete size, but as I said, a little part of it is corrupt.
Now,
If I replace the for loop with following while loop, the problem is completely solved.
int bytesRead = 0;
byte[] buffer = new byte[ (int) totalDownloadSize ];
file.seek(startFrom); //adjusted start of file
while( (bytesRead = inStream.read(buffer) ) != -1 ) {
file.write(buffer, 0, bytesRead);
}
BUT I NEED for LOOP TO MEASURE HOW MUCH FILE EACH THREAD HAS DOWNLOADED & I WANT TO UPGRADE RESPECTIVE JPROGRESSBARs of THREADS.
Kindly help me out with the for loop logic.
OR
If you can advise on how can I upgrade Jprogressbars within while loop. I can't seem to find a way to quantify how much file a thread has downloaded...
I've spent alot of hours & I'm extremely tired now...
You can use the while loop that works, and then keep track of the total amount of bytes read like this:
int totalRead = 0;
while ((bytesRead = inStream.read(buffer)) != -1) {
totalRead += bytesRead;
file.write(buffer, 0, bytesRead);
progressBar.setValue((int)(totalRead / (double) totalDownloadSize));
}
just remember that for (a; b; c) { ... } is equal to a; while (b) { c; ... }.
Can anybody see what is wrong with this code. it does not show up progress-bar but uploades all the files.
I did checkout sun tutorial and swingworkers also but i couldn't fix it yet.
private static boolean putFile(String m_sLocalFile, FtpClient m_client) {
boolean success = false;
int BUFFER_SIZE = 10240;
if (m_sLocalFile.length() == 0) {
System.out.println("Please enter file name");
}
byte[] buffer = new byte[BUFFER_SIZE];
try {
File f = new File(m_sLocalFile);
int size = (int) f.length();
System.out.println("File " + m_sLocalFile + ": " + size + " bytes");
System.out.println(size);
FileInputStream in = new FileInputStream(m_sLocalFile);
//test
InputStream inputStream = new BufferedInputStream(
new ProgressMonitorInputStream(null,"Uploading " + f.getName(),in));
//test
OutputStream out = m_client.put(f.getName());
int counter = 0;
while (true) {
int bytes = inputStream.read(buffer); //in
if (bytes < 0)
break;
out.write(buffer, 0, bytes);
counter += bytes;
System.out.println(counter);
}
out.close();
in.close();
inputStream.close();
success =true;
} catch (Exception ex) {
System.out.println("Error: " + ex.toString());
}
return true;
}
I think your code is fine.
Maybe the task isn't taking long enough for the progress bar to be needed?
Here's a modified version of your code which reads from a local file and writes to another local file.
I have also added a delay to the write so that it gives the progress bar time to kick in.
This works fine on my system with a sample 12MB PDF file, and shows the progress bar.
If you have a smaller file then just increase the sleep from 5 milliseconds to 100 or something - you would need to experiment.
And I didn't even know that the ProgressMonitorInputStream class existed, so I've learnt something myself ;].
/**
* main
*/
public static void main(String[] args) {
try {
System.out.println("start");
final String inf = "d:/testfile.pdf";
final String outf = "d:/testfile.tmp.pdf";
final FileOutputStream out = new FileOutputStream(outf) {
#Override
public void write(byte[] b, int off, int len) throws IOException {
super.write(b, off, len);
try {
// We delay the write by a few millis to give the progress bar time to kick in
Thread.sleep(5);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
};
putFile(inf, out);
System.out.println("end");
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
private static boolean putFile(String m_sLocalFile, OutputStream out /*FtpClient m_client*/) {
boolean success = false;
int BUFFER_SIZE = 10240;
if (m_sLocalFile.length() == 0) {
System.out.println("Please enter file name");
}
byte[] buffer = new byte[BUFFER_SIZE];
try {
File f = new File(m_sLocalFile);
int size = (int) f.length();
System.out.println("File " + m_sLocalFile + ": " + size + " bytes");
System.out.println(size);
FileInputStream in = new FileInputStream(m_sLocalFile);
//test
InputStream inputStream = new BufferedInputStream(
new ProgressMonitorInputStream(null,"Uploading " + f.getName(),in));
//test
//OutputStream out = m_client.put(f.getName());
int counter = 0;
while (true) {
int bytes = inputStream.read(buffer); //in
if (bytes < 0)
break;
out.write(buffer, 0, bytes);
counter += bytes;
System.out.println(counter);
}
out.close();
in.close();
inputStream.close();
success =true;
} catch (Exception ex) {
System.out.println("Error: " + ex.toString());
}
return true;
}
I have a use case where I want to upload big gzipped text data files (~ 60 GB) on HDFS.
My code below is taking about 2 hours to upload these files in chunks of 500 MB. Following is the pseudo code. I was chekcing if somebody could help me reduce this time:
i) int fileFetchBuffer = 500000000;
System.out.println("file fetch buffer is: " + fileFetchBuffer);
int offset = 0;
int bytesRead = -1;
try {
fileStream = new FileInputStream (file);
if (fileName.endsWith(".gz")) {
stream = new GZIPInputStream(fileStream);
BufferedReader reader = new BufferedReader(new InputStreamReader(stream));
String[] fileN = fileName.split("\\.");
System.out.println("fil 0 : " + fileN[0]);
System.out.println("fil 1 : " + fileN[1]);
//logger.info("First line is: " + streamBuff.readLine());
byte[] buffer = new byte[fileFetchBuffer];
FileSystem fs = FileSystem.get(conf);
int charsLeft = fileFetchBuffer;
while (true) {
charsLeft = fileFetchBuffer;
logger.info("charsLeft outside while: " + charsLeft);
FSDataOutputStream dos = null;
while (charsLeft != 0) {
bytesRead = stream.read(buffer, 0, charsLeft);
if (bytesRead < 0) {
dos.flush();
dos.close();
break;
}
offset = offset + bytesRead;
charsLeft = charsLeft - bytesRead;
logger.info("offset in record: " + offset);
logger.info("charsLeft: " + charsLeft);
logger.info("bytesRead in record: " + bytesRead);
//prettyPrintHex(buffer);
String outFileStr = Utils.getOutputFileName(
stagingDir,
fileN[0],
outFileNum);
if (dos == null) {
Path outFile = new Path(outFileStr);
if (fs.exists(outFile)) {
fs.delete(outFile, false);
}
dos = fs.create(outFile);
}
dos.write(buffer, 0, bytesRead);
}
logger.info("done writing: " + outFileNum);
dos.flush();
dos.close();
if (bytesRead < 0) {
dos.flush();
dos.close();
break;
}
outFileNum++;
} // end of if
} else {
// Assume uncompressed file
stream = fileStream;
}
} catch(FileNotFoundException e) {
logger.error("File not found" + e);
}
You should consider using the super package IO from Apache.
It has a method
IOUtils.copy( InputStream, OutputStream )
that would tremendously reduce time needed to copy your files.
I tried with buffered input stream and saw no real difference.
I suppose a file channel implementation could be even more efficient. Tell me if it's not fast enough.
package toto;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
public class Slicer {
private static final int BUFFER_SIZE = 50000;
public static void main(String[] args) {
try
{
slice( args[ 0 ], args[ 1 ], Long.parseLong( args[2]) );
}//try
catch (IOException e)
{
e.printStackTrace();
}//catch
catch( Exception ex )
{
ex.printStackTrace();
System.out.println( "Usage : toto.Slicer <big file> <chunk name radix > <chunks size>" );
}//catch
}//met
/**
* Slices a huge files in chunks.
* #param inputFileName the big file to slice.
* #param outputFileRadix the base name of slices generated by the slicer. All slices will then be numbered outputFileRadix0,outputFileRadix1,outputFileRadix2...
* #param chunkSize the size of chunks in bytes
* #return the number of slices.
*/
public static int slice( String inputFileName, String outputFileRadix, long chunkSize ) throws IOException
{
//I would had some code to pretty print the output file names
//I mean adding a couple of 0 before chunkNumber in output file name
//so that they all have same number of chars
//use java.io.File for that, estimate number of chunks, take power of 10, got number of leading 0s
//just to get some stats
long timeStart = System.currentTimeMillis();
long timeStartSlice = timeStart;
long timeEnd = 0;
//io streams and chunk counter
int chunkNumber = 0;
FileInputStream fis = null;
FileOutputStream fos = null;
try
{
//open files
fis = new FileInputStream( inputFileName );
fos = new FileOutputStream( outputFileRadix + chunkNumber );
//declare state variables
boolean finished = false;
byte[] buffer = new byte[ BUFFER_SIZE ];
int bytesRead = 0;
long bytesInChunk = 0;
while( !finished )
{
//System.out.println( "bytes to read " +(int)Math.min( BUFFER_SIZE, chunkSize - bytesInChunk ) );
bytesRead = fis.read( buffer,0, (int)Math.min( BUFFER_SIZE, chunkSize - bytesInChunk ) );
if( bytesRead == -1 )
finished = true;
else
{
fos.write( buffer, 0, bytesRead );
bytesInChunk += bytesRead;
if( bytesInChunk == chunkSize )
{
if( fos != null )
{
fos.close();
timeEnd = System.currentTimeMillis();
System.out.println( "Chunk "+chunkNumber + " has been generated in "+ (timeEnd - timeStartSlice) +" ms");
chunkNumber ++;
bytesInChunk = 0;
timeStartSlice = timeEnd;
System.out.println( "Creating slice number " + chunkNumber );
fos = new FileOutputStream( outputFileRadix + chunkNumber );
}//if
}//if
}//else
}//while
}
catch (Exception e)
{
System.out.println( "A problem occured during slicing : " );
e.printStackTrace();
}//catch
finally
{
//whatever happens close all files
System.out.println( "Closing all files.");
if( fis != null )
fis.close();
if( fos != null )
fos.close();
}//fin
timeEnd = System.currentTimeMillis();
System.out.println( "Total slicing time : " + (timeEnd - timeStart) +" ms" );
System.out.println( "Total number of slices "+ (chunkNumber +1) );
return chunkNumber+1;
}//met
}//class
Greetings,
Stéphane