Android Track progress of Azure CloudBlockBlob upload - java

How can I print the number of bytes that have been uploaded after calling blob.upload(new FileInputStream(imageFile), imageFile.length()); I want to log something like "100/totalBytes bytes have been uploaded, 224/totalBytes bytes have been uploaded..." So I can create a progress bar of the upload progress.
this is the code:
//AzureBlobLoader extends AsyncTask
public class AzureBlobUploader extends AzureBlobLoader {
private Activity act;
private String userName;
private TaggedImageObject img;
private Fragment histFragment;
public AzureBlobUploader(Fragment f, Activity act, String userName, TaggedImageObject img) {
super();
this.act = act;
this.userName = userName;
this.img = img;
this.histFragment = f;
}
#Override
protected Object doInBackground(Object[] params) {
File imageFile = new File(this.img.getImgPath());
try {
// Define the path to a local file.
final String filePath = imageFile.getPath();
// Create or overwrite the blob with contents from the local file.
String[] imagePathArray = filePath.split("/");
String imageName = imagePathArray[imagePathArray.length-1];
System.out.println("Image Name: " + imageName);
String containerName = userName + "/" + imageName;
System.out.println("Container Name: " + containerName);
CloudBlockBlob blob= this.getContainer().getBlockBlobReference(containerName);
//UPLOAD!
blob.upload(new FileInputStream(imageFile), imageFile.length());
//-----DATABASE-----//
//create client
this.setDBClient(
new MobileServiceClient(
"URL",
this.act.getApplicationContext()
)
);
this.setImageTable(this.getDBClient().getTable(Image.class));
this.setIcavTable(this.getDBClient().getTable(ICAV.class));
//IMG TABLE QUERY
String validImageID = containerName.replace("/", "_");
Log.d("Azure", "Valid Image ID: " + validImageID);
Image img = new Image(validImageID, this.img.getUser(), this.img.getLat(), this.img.getLon());
this.getImageTable().insert(img);
for(String context : this.img.getContextAttributeMap().keySet()){
Map<String,String> attributeValueMap = this.img.getContextAttributeMap().get(context);
for(String attribute : attributeValueMap.keySet()){
String value = attributeValueMap.get(attribute);
ICAV icavRow = new ICAV();
icavRow.setImageID(validImageID);
icavRow.setContextID(context);
icavRow.setAttributeID(attribute);
icavRow.setValue(value);
this.getIcavTable().insert(icavRow);
}
}
} catch (Exception e) {
System.out.println(e.toString());
}
return null;
}
#Override
protected void onProgressUpdate(Object... object) {
super.onProgressUpdate(object);
Log.d("progressUpdate", "progress: "+((Integer)object[0] * 2) + "%");
}
#Override
protected void onPostExecute(Object o) {
// to do
}
}

As you can see the Azure SDK doesn't directly allow for that, but it should be fairly easy to wrap your inputstream in another input stream that can give callbacks for bytes read. Something like that:
public class ListenableInputStream extends InputStream {
private final InputStream wraped;
private final ReadListener listener;
private final long minimumBytesPerCall;
private long bytesRead;
public ListenableInputStream(InputStream wraped, ReadListener listener, int minimumBytesPerCall) {
this.wraped = wraped;
this.listener = listener;
this.minimumBytesPerCall = minimumBytesPerCall;
}
#Override
public int read() throws IOException {
int read = wraped.read();
if (read >= 0) {
bytesRead++;
}
if (bytesRead > minimumBytesPerCall || read == -1) {
listener.onRead(bytesRead);
bytesRead = 0;
}
return read;
}
#Override
public int available() throws IOException {
return wraped.available();
}
#Override
public void close() throws IOException {
wraped.close();
}
#Override
public synchronized void mark(int readlimit) {
wraped.mark(readlimit);
}
#Override
public synchronized void reset() throws IOException {
wraped.reset();
}
#Override
public boolean markSupported() {
return wraped.markSupported();
}
interface ReadListener {
void onRead(long bytes);
}
}
minimumBytesPerCall should be initialised with some sensible number, as you probably don't want to be called on every single byte, maybe every half a megabyte should be good.
And remember that this all gets called on the doInBackground thread, so act accordingly.
edit:
I've edited the class above, there was a small error on computing the bytesRead value.
The official documentation explains your follow-up questions https://developer.android.com/reference/java/io/InputStream.html#read()
Reads the next byte of data from the input stream
So read() reads 1 byte of data (or return -1) if reached the end. So yes, it must be called several several times to read a whole image.
Then the method onRead(long) get's called every time at least minimumBytesPerCall have been read (that's to avoid of calling back for every single byte) and once more at the end of the stream (when it returns -1)
The value passed to onRead(long) is the amount that have been read since the last call. So implementing this on your AsyncTask you would have to accumulate this value and compare with the total size of the file.
Something like the following code inside your asynctask should work fine (assuming the Progress generic parameter is a Long):
private long fileLength;
private long totalBytes;
private final ListenableInputStream.ReadListener readListener = new ListenableInputStream.ReadListener() {
#Override
public void onRead(long bytes) {
totalBytes += bytes;
publishProgress(totalBytes);
}
};
and on inside your upload part you replace with:
FileInputStream fis = new FileInputStream(imageFile);
fileLength = imageFile.length();
ListenableInputStream lis = new ListenableInputStream(fi, readListener, 256 * 1024); // this will call onRead(long) every 256kb
blob.upload(lis, fileLength);
and as a last remark, remember that internally the CloudBlockBlob just caching the file on its own memory for later upload, or doing any other weird stuff that is out of your control. All this code does is check that the complete file was read.
happy coding!

Just another way for your needs, there is a MS blog which introduce about uploading a blob to Azure Storage with progress bar and variable upload block size. That code was written in C#, but it's very simple for reading by Java/Android Developer, I think you can easily rewrite it in Java for Android to compute the uploading processbar ratio to share via some public variables.
Hope it helps.

Related

How to split InputStream into multiple BoundedInputStream without knowing total stream size?

I'm looking at an open source Swift library that is able to split an InputStream into multiple BoundedInputStream objects, given a total stream size (to know when to stop creating bounded input streams). I don't see why there should be no option to stop the BoundedInputStream creation automatically once the initial InputStream is closed.
The code looks something like this:
protected Long segmentationSize = 5368709120L;
protected Long currentSegment = 0L;
private InputStream inputStream; // supplied externally
private long inputStreamSize; // supplied externally
public void uploadSegmentedObjects() {
InputStream segmentStream = getNextSegment();
while (segmentStream != null) {
// do something
}
}
public InputStream getNextSegment() {
if (done()) {
return null;
}
InputStream segment = createSegment();
currentSegment++;
return segment;
}
protected boolean done() {
return currentSegment * segmentationSize > inputStreamSize;
}
#Override
protected InputStream createSegment() throws IOException {
BoundedInputStream stream = new BoundedInputStream(inputStream, segmentationSize);
stream.setPropagateClose(false);
return stream;
}
Essentially, I need to know how to rewrite the done() method such that it is not reliant on the inputStreamSize variable and instead returns null when the stream closes.

How can I display list of recently opened files?

Can anyone provide me with the guidelines on how to retrieve a list of recently opened files in a folder?
In my application, I have a folder which contains .epub files.
I would like to display a sorted list of recently read/opened books by the user.
The problem is actually not so trivial.
There are two reasons - history's persistence and IO blocking operations.
In fact to ensure persistence of history we can use some solutions:
database (seems most reasonable but requires most effort)
internal file (should be the easiest way)
shared preferences
So, I used second method. In memory I keep just ArrayList<Uri>, and to save it to file I convert it to List<String>, as android.net.Uri doesn't support serialisation.
In internal memory I save serialised object using ObjectIOStream.
So, see the code:
public class FileHistory {
private static final String FILE_NAME = "file-history-v1";
private static final int HISTORY_SIZE = 20;
#NonNull
private final Context mAppContext;
// This is a executor where I can post any runnable
// and all of them will be executed in one pipeline
// keeping posting order.
#NonNull
private final OneThreadExecutor mExecutor;
#Nullable
private ArrayList<Uri> mInternalFilesHistory;
#NonNull
private MutableLiveData<List<Uri>> mFilesHistory = new MutableLiveData<>();
public FileHistory(#NonNull final Context appContext,
#NonNull final OneThreadExecutor executor) {
this.mAppContext = appContext;
this.mExecutor = executor;
loadHistory();
}
public void addEntry(#NonNull final Uri entry) {
if (mInternalFilesHistory == null) {
// The fileHistory is not ready yet.
// Schedule adding entry as next task of serial executor.
mExecutor.execute(() -> addEntry(entry));
return;
}
// Remove entry if exists and add it as first element.
CollectionUtils.removeFirst(mInternalFilesHistory, uri -> uri.equals(entry));
mInternalFilesHistory.add(0, entry);
if (mInternalFilesHistory.size() > HISTORY_SIZE) {
ArrayList<Uri> trimmed = new ArrayList<>(HISTORY_SIZE + 1);
trimmed.addAll(mInternalFilesHistory.subList(0, HISTORY_SIZE));
mInternalFilesHistory = trimmed;
}
mExecutor.execute(this::rePostHistory);
mExecutor.execute(this::saveToInternalStorage);
}
#NonNull
public MutableLiveData<List<Uri>> getFilesHistory() {
return mFilesHistory;
}
private void loadHistory() {
mExecutor.execute(this::loadFromInternalStorage);
mExecutor.execute(this::rePostHistory);
}
private void rePostHistory() {
if (mInternalFilesHistory != null) {
mFilesHistory.postValue(Collections.unmodifiableList(mInternalFilesHistory));
}
}
#SuppressWarnings("unchecked")
#WorkerThread
private void loadFromInternalStorage() {
try {
FileInputStream fis = mAppContext.openFileInput(FILE_NAME);
ObjectInputStream ois = new ObjectInputStream(fis);
ArrayList<String> entries = (ArrayList<String>) ois.readObject();
List<Uri> mapped = CollectionUtils.map(entries, Uri::parse);
if (mInternalFilesHistory == null) {
mInternalFilesHistory = new ArrayList<>(HISTORY_SIZE + 1);
} else {
mInternalFilesHistory.clear();
}
mInternalFilesHistory.addAll(mapped);
fis.close();
ois.close();
} catch (Exception ex) {
mInternalFilesHistory = new ArrayList<>(HISTORY_SIZE + 1);
}
}
#WorkerThread
private void saveToInternalStorage() {
try {
FileOutputStream fis = mAppContext.openFileOutput(FILE_NAME, Context.MODE_PRIVATE);
ObjectOutputStream oos = new ObjectOutputStream(fis);
if (mInternalFilesHistory == null) {
mInternalFilesHistory = new ArrayList<>();
}
List<String> converted = CollectionUtils.map(mInternalFilesHistory, Uri::toString);
oos.writeObject(converted);
fis.close();
oos.close();
} catch (IOException ignored) {
}
}
}
As you can see, internal storage is use to keep that file. So, there is no need to add any additional permissions.
Synchronisation is ensured by using executor which will execute all request, one by one, so even if IO will be slow order or requests will be saved.
We do not block thread with IO operations, because all operations using IO are on WorkerThread. About the result we will be notified via LiveData from android.arch.
In my opinion this is kind of the simplest solution. If we need to keep stats, dates etc. we can save List<MyHistoryEntry>, as long MyHistoryEntry will be serialisable.
As a better approach I would suggest to use database (easier migration etc.).

TrueZip compression taking too much time

I am using TrueZip for compression. Here is what my code looks like
public String compress() throws IOException {
if (logLocations.isEmpty()) {
throw new IllegalStateException("no logs provided to compress");
}
removeDestinationIfExists(desiredArchive);
final TFile destinationArchive = new TFile(desiredArchive + "/diagnostics");
for (final String logLocation : logLocations) {
final TFile log = new TFile(logLocation);
if (!log.exists()) {
LOGGER.debug("{} does not exist, ignoring.");
continue;
}
if (log.isDirectory()) {
log.cp_r(destinationArchive);
} else {
final String newLogLocation =
new TFile(destinationArchive.getAbsolutePath()) + SLASH +
getLogNameFromPath(logLocation);
log.cp(new TFile(newLogLocation));
}
}
return destinationArchive.getEnclArchive().getAbsolutePath();
}
and my test
#Test
public void testBenchMarkWithHprof() throws IOException {
final FileWriter logLocations;
String logLocationPath = "/Users/harit/Downloads/tmp/logLocations.txt";
{
logLocations = new FileWriter(logLocationPath);
logLocations.write("Test3");
logLocations.write("\n");
logLocations.close();
}
final LPLogCompressor compressor = new LPLogCompressor("/Users/harit/Downloads/tmp",
new File(logLocationPath),
"/Users/harit/Downloads/tmp/TestOut");
final long startTime = System.currentTimeMillis();
compressor.compress();
System.out.println("Time taken (msec): " + (System.currentTimeMillis() - startTime));
}
and my data directory Test3 looks like
Test3/
java_pid1748.hprof
The file size is 2.83GB
When I ran the test, it took over 22 minutes.
However when I compress the same file using Native OSX compress (right click -> compress), it takes only 2 minutes
Why there is so much of difference?
Thanks
UPDATE
Based on #Satnam recommendation, I attached a debugger to see whats going on and this is what I find
None of the TrueZip Threads are running? really? Apologies I am using profiler for the first time
The reason in this case was using default Deflater which is Deflater.BEST_COMPRESSION.
I override the ZipDriver class to over the level as
import de.schlichtherle.truezip.fs.archive.zip.ZipDriver;
import de.schlichtherle.truezip.socket.IOPoolProvider;
import java.util.zip.Deflater;
public class OverrideZipDriver extends ZipDriver {
public OverrideZipDriver(final IOPoolProvider ioPoolProvider) {
super(ioPoolProvider);
}
#Override
public int getLevel() {
return Deflater.DEFAULT_COMPRESSION;
}
}
and then in my Compressor class, I did
public LPLogCompressor(final String logProcessorInstallPath, final File logLocationsSource,
final String desiredArchive) throws IOException {
this.desiredArchive = desiredArchive + DOT + getDateTimeStampFormat() + ZIP;
logLocations = getLogLocations(logProcessorInstallPath, logLocationsSource);
enableLogCompression();
}
private static void enableLogCompression() {
TConfig.get().setArchiveDetector(
new TArchiveDetector(TArchiveDetector.NULL, new Object[][]{
{"zip", new OverrideZipDriver(IOPoolLocator.SINGLETON)},}));
TConfig.push();
}
You can read the thread here

Setting a Unique ID at Application Install - Android

I'm trying to implement the code found below so that I can generate a random ID number for the user right when the app is installed. I just have a couple questions.
If I create a new file for this (Install.java) how do I access the ID in another class?
How do I make sure that this part of the program is executed when the app is first installed? Right now, the program starts on my Main.java class (I'm new to Java). Will it just run when the app is installed?
public class Install {
private static String sID = null;
private static final String INSTALLATION = "INSTALLATION";
public synchronized static String id(Context context) {
if (sID == null) {
File installation = new File(context.getFilesDir(), INSTALLATION);
try {
if (!installation.exists())
writeInstallationFile(installation);
sID = readInstallationFile(installation);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return sID;
}
private static String readInstallationFile(File installation) throws IOException {
RandomAccessFile f = new RandomAccessFile(installation, "r");
byte[] bytes = new byte[(int) f.length()];
f.readFully(bytes);
f.close();
return new String(bytes);
}
private static void writeInstallationFile(File installation) throws IOException {
FileOutputStream out = new FileOutputStream(installation);
String id = UUID.randomUUID().toString();
out.write(id.getBytes());
out.close();
}
}
Here's some code I use - feel free to adapt as you will...
public void onCreate(Bundle icicle) {
super.onCreate(icicle);
Log.d(Tag, "Yay onCreate!"); // sorry sometimes I'm a bit verbose with my logs...
createVerifierStrings();
.....
private void createVerifierStrings() {
SharedPreferences prefs = this.getSharedPreferences("Someprefstringreference", 0);
String not_set = "NOTSET";
String android_key;
android_key = prefs.getString("id", not_set);
if (android_key.equals(not_set)) {
Log.d(Tag, "Creating keys for 1st time");
android_key = generateRandomEnoughStuff();
prefs.edit().putString("id", android_key).commit();
}
......
As far as I know you don't get a way to run any arbitrary code right after installation is complete.
I think the closest you can get is make a check inside your MainActivity onCreate() method that determines whether or not this is the first run (a good way to check this might be to get a reference to your file and call file.exists(), the resulting boolean will tell you whether or not you need to create your UID file.
Here is a blog post from Tim Bray that explains what you actually should be doing..
http://android-developers.blogspot.com/2011/03/identifying-app-installations.html

How to Cache InputStream for Multiple Use

I have an InputStream of a file and i use apache poi components to read from it like this:
POIFSFileSystem fileSystem = new POIFSFileSystem(inputStream);
The problem is that i need to use the same stream multiple times and the POIFSFileSystem closes the stream after use.
What is the best way to cache the data from the input stream and then serve more input streams to different POIFSFileSystem ?
EDIT 1:
By cache i meant store for later use, not as a way to speedup the application. Also is it better to just read up the input stream into an array or string and then create input streams for each use ?
EDIT 2:
Sorry to reopen the question, but the conditions are somewhat different when working inside desktop and web application.
First of all, the InputStream i get from the org.apache.commons.fileupload.FileItem in my tomcat web app doesn't support markings thus cannot reset.
Second, I'd like to be able to keep the file in memory for faster acces and less io problems when dealing with files.
you can decorate InputStream being passed to POIFSFileSystem with a version that when close() is called it respond with reset():
class ResetOnCloseInputStream extends InputStream {
private final InputStream decorated;
public ResetOnCloseInputStream(InputStream anInputStream) {
if (!anInputStream.markSupported()) {
throw new IllegalArgumentException("marking not supported");
}
anInputStream.mark( 1 << 24); // magic constant: BEWARE
decorated = anInputStream;
}
#Override
public void close() throws IOException {
decorated.reset();
}
#Override
public int read() throws IOException {
return decorated.read();
}
}
testcase
static void closeAfterInputStreamIsConsumed(InputStream is)
throws IOException {
int r;
while ((r = is.read()) != -1) {
System.out.println(r);
}
is.close();
System.out.println("=========");
}
public static void main(String[] args) throws IOException {
InputStream is = new ByteArrayInputStream("sample".getBytes());
ResetOnCloseInputStream decoratedIs = new ResetOnCloseInputStream(is);
closeAfterInputStreamIsConsumed(decoratedIs);
closeAfterInputStreamIsConsumed(decoratedIs);
closeAfterInputStreamIsConsumed(is);
}
EDIT 2
you can read the entire file in a byte[] (slurp mode) then passing it to a ByteArrayInputStream
Try BufferedInputStream, which adds mark and reset functionality to another input stream, and just override its close method:
public class UnclosableBufferedInputStream extends BufferedInputStream {
public UnclosableBufferedInputStream(InputStream in) {
super(in);
super.mark(Integer.MAX_VALUE);
}
#Override
public void close() throws IOException {
super.reset();
}
}
So:
UnclosableBufferedInputStream bis = new UnclosableBufferedInputStream (inputStream);
and use bis wherever inputStream was used before.
This works correctly:
byte[] bytes = getBytes(inputStream);
POIFSFileSystem fileSystem = new POIFSFileSystem(new ByteArrayInputStream(bytes));
where getBytes is like this:
private static byte[] getBytes(InputStream is) throws IOException {
byte[] buffer = new byte[8192];
ByteArrayOutputStream baos = new ByteArrayOutputStream(2048);
int n;
baos.reset();
while ((n = is.read(buffer, 0, buffer.length)) != -1) {
baos.write(buffer, 0, n);
}
return baos.toByteArray();
}
Use below implementation for more custom use -
public class ReusableBufferedInputStream extends BufferedInputStream
{
private int totalUse;
private int used;
public ReusableBufferedInputStream(InputStream in, Integer totalUse)
{
super(in);
if (totalUse > 1)
{
super.mark(Integer.MAX_VALUE);
this.totalUse = totalUse;
this.used = 1;
}
else
{
this.totalUse = 1;
this.used = 1;
}
}
#Override
public void close() throws IOException
{
if (used < totalUse)
{
super.reset();
++used;
}
else
{
super.close();
}
}
}
What exactly do you mean with "cache"? Do you want the different POIFSFileSystem to start at the beginning of the stream? If so, there's absolutely no point caching anything in your Java code; it will be done by the OS, just open a new stream.
Or do you wan to continue reading at the point where the first POIFSFileSystem stopped? That's not caching, and it's very difficult to do. The only way I can think of if you can't avoid the stream getting closed would be to write a thin wrapper that counts how many bytes have been read and then open a new stream and skip that many bytes. But that could fail when POIFSFileSystem internally uses something like a BufferedInputStream.
If the file is not that big, read it into a byte[] array and give POI a ByteArrayInputStream created from that array.
If the file is big, then you shouldn't care, since the OS will do the caching for you as best as it can.
[EDIT] Use Apache commons-io to read the File into a byte array in an efficient way. Do not use int read() since it reads the file byte by byte which is very slow!
If you want to do it yourself, use a File object to get the length, create the array and the a loop which reads bytes from the file. You must loop since read(byte[], int offset, int len) can read less than len bytes (and usually does).
This is how I would implemented, to be safely used with any InputStream :
write your own InputStream wrapper where you create a temporary file to mirror the original stream content
dump everything read from the original input stream into this temporary file
when the stream was completely read you will have all the data mirrored in the temporary file
use InputStream.reset to switch(initialize) the internal stream to a FileInputStream(mirrored_content_file)
from now on you will loose the reference of the original stream(can be collected)
add a new method release() which will remove the temporary file and release any open stream.
you can even call release() from finalize to be sure the temporary file is release in case you forget to call release()(most of the time you should avoid using finalize, always call a method to release object resources). see Why would you ever implement finalize()?
public static void main(String[] args) throws IOException {
BufferedInputStream inputStream = new BufferedInputStream(IOUtils.toInputStream("Foobar"));
inputStream.mark(Integer.MAX_VALUE);
System.out.println(IOUtils.toString(inputStream));
inputStream.reset();
System.out.println(IOUtils.toString(inputStream));
}
This works. IOUtils is part of commons IO.
This answer iterates on previous ones 1|2 based on the BufferInputStream. The main changes are that it allows infinite reuse. And takes care of closing the original source input stream to free-up system resources. Your OS defines a limit on those and you don't want the program to run out of file handles (That's also why you should always 'consume' responses e.g. with the apache EntityUtils.consumeQuietly()). EDIT Updated the code to handle for gready consumers that use read(buffer, offset, length), in that case it may happen that BufferedInputStream tries hard to look at the source, this code protects against that use.
public class CachingInputStream extends BufferedInputStream {
public CachingInputStream(InputStream source) {
super(new PostCloseProtection(source));
super.mark(Integer.MAX_VALUE);
}
#Override
public synchronized void close() throws IOException {
if (!((PostCloseProtection) in).decoratedClosed) {
in.close();
}
super.reset();
}
private static class PostCloseProtection extends InputStream {
private volatile boolean decoratedClosed = false;
private final InputStream source;
public PostCloseProtection(InputStream source) {
this.source = source;
}
#Override
public int read() throws IOException {
return decoratedClosed ? -1 : source.read();
}
#Override
public int read(byte[] b) throws IOException {
return decoratedClosed ? -1 : source.read(b);
}
#Override
public int read(byte[] b, int off, int len) throws IOException {
return decoratedClosed ? -1 : source.read(b, off, len);
}
#Override
public long skip(long n) throws IOException {
return decoratedClosed ? 0 : source.skip(n);
}
#Override
public int available() throws IOException {
return source.available();
}
#Override
public void close() throws IOException {
decoratedClosed = true;
source.close();
}
#Override
public void mark(int readLimit) {
source.mark(readLimit);
}
#Override
public void reset() throws IOException {
source.reset();
}
#Override
public boolean markSupported() {
return source.markSupported();
}
}
}
To reuse it just close it first if it wasn't.
One limitation though is that if the stream is closed before the whole content of the original stream has been read, then this decorator will have incomplete data, so make sure the whole stream is read before closing.
I just add my solution here, as this works for me. It basically is a combination of the top two answers :)
private String convertStreamToString(InputStream is) {
Writer w = new StringWriter();
char[] buf = new char[1024];
Reader r;
is.mark(1 << 24);
try {
r = new BufferedReader(new InputStreamReader(is, "UTF-8"));
int n;
while ((n=r.read(buf)) != -1) {
w.write(buf, 0, n);
}
is.reset();
} catch(UnsupportedEncodingException e) {
Logger.debug(this.getClass(), "Cannot convert stream to string.", e);
} catch(IOException e) {
Logger.debug(this.getClass(), "Cannot convert stream to string.", e);
}
return w.toString();
}

Categories