ImageSwitcher took several seconds when Grow heap happen (already down scale) - java

I've already read a lot of posts,still can't solve it.
I have an app that have one problem related to OOM. It can show a picture stored(PNG type,approximately 1.33MB, 3072*1728 pixel) on server. My system heap size is 125MB
get by: long maxMemory = rt.maxMemory();
I new a thread( AsyncTask )to decode it by using new a handle to manage it.
I also down scale the picture when it is too big. Image can show in local device.
But, it still happen Grow heap (frag case) to 10.389MB for 2654224-byte allocation
And, it will take 2~10 sec to collect the garbage.
It's bad user experience. Did any one meet the same problem? Any Suggestion?
public class DownloadImageTask extends AsyncTask<String, Void, Bitmap> {
public static String TAG = "DownloadImageTask";
public enum DOWNLOAD_STATE {
eIdle, eProgressing, eError, eSuccess,
}
private DOWNLOAD_STATE state = DOWNLOAD_STATE.eIdle;
private Bitmap dlBitmap;
public DownloadImageTask(String imageUrl) {
execute(imageUrl);
}
public DOWNLOAD_STATE getDownloadState() {
return state;
}
public Bitmap getDownloadBitmap() {
return dlBitmap;
}
public Drawable getDownloadDrawable() {
#SuppressWarnings("deprecation")
Drawable dlDrawable = new BitmapDrawable(dlBitmap);
return dlDrawable;
}
protected Bitmap doInBackground(String... urls) {
String urldisplay = urls[0];
try {
dlBitmap = getBitmap(urldisplay);
}
if(dlBitmap==null)
state = DOWNLOAD_STATE.eError;
else
state = DOWNLOAD_STATE.eSuccess;
} catch (Exception e) {
dlBitmap = null;
state = DOWNLOAD_STATE.eError;
}
return dlBitmap;
}
protected void onPostExecute(Bitmap result) {
result = null;
}
public Bitmap getBitmap(String urldisplay) {
try {
Log.d(TAG,"getBitmap File");
InputStream in = new java.net.URL(urldisplay).openStream();
if(in == null)
Log.d(TAG,"in null");
// first decode, get the length & width of picture,didn't load the picture into memory
BitmapFactory.Options opts = new BitmapFactory.Options();
opts.inJustDecodeBounds = true;
BitmapFactory.decodeStream(in, null, opts);
//in.close();
//compute the SampleSize (power of 2 is great)
int sampleSize = computeSampleSize(opts, -1, 1920 * 1080);//monitor limitation
Log.d(TAG,"samplesize ="+sampleSize);
// second decode,set sample size , generate thumbnail
in = new java.net.URL(urldisplay).openStream();
opts = new BitmapFactory.Options();
opts.inPreferredConfig = Bitmap.Config.RGB_565;
opts.inSampleSize = sampleSize;
opts.inInputShareable = true;
opts.inPurgeable = true;
Bitmap bmp = BitmapFactory.decodeStream(in, null, opts);
in.close();
return bmp;
} catch (Exception err) {
Log.e(TAG, "error: " + err.toString());
return null;
}
}
public static int computeSampleSize(BitmapFactory.Options options,
int minSideLength, int maxNumOfPixels) {
Log.d(TAG,"computeSampleSize");
int initialSize = computeInitialSampleSize(options, minSideLength,
maxNumOfPixels);
int roundedSize;
if (initialSize <= 8) {
roundedSize = 1;
while (roundedSize < initialSize) {
roundedSize <<= 1;
}
} else {
roundedSize = (initialSize + 7) / 8 * 8;
}
return roundedSize;
}
private static int computeInitialSampleSize(BitmapFactory.Options options,
int minSideLength, int maxNumOfPixels) {
Log.d(TAG,"computeInitialSampleSize");
try{
double w = options.outWidth;
double h = options.outHeight;
int lowerBound = (maxNumOfPixels == -1) ? 1 : (int) Math.ceil(Math
.sqrt(w * h / maxNumOfPixels));
int upperBound = (minSideLength == -1) ? 128 : (int) Math.min(
Math.floor(w / minSideLength), Math.floor(h / minSideLength));
if (upperBound < lowerBound) {
// return the larger one when there is no overlapping zone.
return lowerBound;
}
if ((maxNumOfPixels == -1) && (minSideLength == -1)) {
return 1;
} else if (minSideLength == -1) {
return lowerBound;
} else {
return upperBound;
}
}catch (Exception e){
Log.d(TAG,"computeInitialSampleSize err:"+e.toString());
return 1;
}
}
}
And, show the picture in ImageSwitcher
myImageSwitcher.setImageDrawable(task.getDownloadDrawable());

Related

Merge/Mux multiple mp4 video files on Android

I have a series of mp4 files saved on the device that need to be merged together to make a single mp4 file.
video_p1.mp4 video_p2.mp4 video_p3.mp4 > video.mp4
The solutions I have researched such as the mp4parser framework use deprecated code.
The best solution I could find is using a MediaMuxer and MediaExtractor.
The code runs but my videos are not merged (only the content in video_p1.mp4 is displayed and it is in landscape orientation, not portrait).
Can anyone help me sort this out?
public static boolean concatenateFiles(File dst, File... sources) {
if ((sources == null) || (sources.length == 0)) {
return false;
}
boolean result;
MediaExtractor extractor = null;
MediaMuxer muxer = null;
try {
// Set up MediaMuxer for the destination.
muxer = new MediaMuxer(dst.getPath(), MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
// Copy the samples from MediaExtractor to MediaMuxer.
boolean sawEOS = false;
//int bufferSize = MAX_SAMPLE_SIZE;
int bufferSize = 1 * 1024 * 1024;
int frameCount = 0;
int offset = 100;
ByteBuffer dstBuf = ByteBuffer.allocate(bufferSize);
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
long timeOffsetUs = 0;
int dstTrackIndex = -1;
for (int fileIndex = 0; fileIndex < sources.length; fileIndex++) {
int numberOfSamplesInSource = getNumberOfSamples(sources[fileIndex]);
// Set up MediaExtractor to read from the source.
extractor = new MediaExtractor();
extractor.setDataSource(sources[fileIndex].getPath());
// Set up the tracks.
SparseIntArray indexMap = new SparseIntArray(extractor.getTrackCount());
for (int i = 0; i < extractor.getTrackCount(); i++) {
extractor.selectTrack(i);
MediaFormat format = extractor.getTrackFormat(i);
if (dstTrackIndex < 0) {
dstTrackIndex = muxer.addTrack(format);
muxer.start();
}
indexMap.put(i, dstTrackIndex);
}
long lastPresentationTimeUs = 0;
int currentSample = 0;
while (!sawEOS) {
bufferInfo.offset = offset;
bufferInfo.size = extractor.readSampleData(dstBuf, offset);
if (bufferInfo.size < 0) {
sawEOS = true;
bufferInfo.size = 0;
timeOffsetUs += (lastPresentationTimeUs + 0);
}
else {
lastPresentationTimeUs = extractor.getSampleTime();
bufferInfo.presentationTimeUs = extractor.getSampleTime() + timeOffsetUs;
bufferInfo.flags = extractor.getSampleFlags();
int trackIndex = extractor.getSampleTrackIndex();
if ((currentSample < numberOfSamplesInSource) || (fileIndex == sources.length - 1)) {
muxer.writeSampleData(indexMap.get(trackIndex), dstBuf, bufferInfo);
}
extractor.advance();
frameCount++;
currentSample++;
Log.d("tag2", "Frame (" + frameCount + ") " +
"PresentationTimeUs:" + bufferInfo.presentationTimeUs +
" Flags:" + bufferInfo.flags +
" TrackIndex:" + trackIndex +
" Size(KB) " + bufferInfo.size / 1024);
}
}
extractor.release();
extractor = null;
}
result = true;
}
catch (IOException e) {
result = false;
}
finally {
if (extractor != null) {
extractor.release();
}
if (muxer != null) {
muxer.stop();
muxer.release();
}
}
return result;
}
public static int getNumberOfSamples(File src) {
MediaExtractor extractor = new MediaExtractor();
int result;
try {
extractor.setDataSource(src.getPath());
extractor.selectTrack(0);
result = 0;
while (extractor.advance()) {
result ++;
}
}
catch(IOException e) {
result = -1;
}
finally {
extractor.release();
}
return result;
}
I'm using this library for muxing videos: ffmpeg-android-java
gradle dependency:
implementation 'com.writingminds:FFmpegAndroid:0.3.2'
Here's how I use it in my project to mux video and audio in kotlin: VideoAudioMuxer
So basically it works like the ffmpeg in terminal but you're inputing your command to a method as an array of strings along with a listener.
fmpeg.execute(arrayOf("-i", videoPath, "-i", audioPath, "$targetPath.mp4"), object : ExecuteBinaryResponseHandler() {
You'll have to search how to merge videos in ffmpeg and convert the commands into array of strings for the argument you need.
You could probably do almost anything, since ffmpeg is a very powerful tool.

The orientation of the rear camera is inverse portrait

I'm trying to switch the camera with a button and i'm also successul in that but the proble is that the preview of the rear camera is in inverse portrait i've tryied the setDiplayOrientation, but no changes... Maybe i've put it in a wrong line this is the code and with the button i call the start camera method:
public class MainRenderer implements GLSurfaceView.Renderer, SurfaceTexture.OnFrameAvailableListener {
public static final String TAG = "MainRenderer";
public static final int DEVICE_ORIENTATION_PORTRAIT = 0;
public static final int DEVICE_ORIENTATION_INVERSE_PORTRAIT = 1;
public static final int DEVICE_ORIENTATION_LANDSCAPE = 2;
public static final int DEVICE_ORIENTATION_INVERSE_LANDSCAPE = 3;
private Camera.CameraInfo cameraInfo;
public volatile int deviceOrientation = DEVICE_ORIENTATION_PORTRAIT;
private FSDK.HTracker tracker;
private int[] textures;
private Camera camera;
private SurfaceTexture surfaceTexture;
private boolean updateSurfaceTexture = false;
private FSDK.FSDK_Features[] trackingFeatures;
private MR.MaskFeatures maskCoords;
private int[] isMaskTexture1Created = new int[]{0};
private int[] isMaskTexture2Created = new int[]{0};
private int width;
private int height;
private ByteBuffer pixelBuffer;
private FSDK.HImage cameraImage = new FSDK.HImage();
private FSDK.FSDK_IMAGEMODE cameraImageMode = new FSDK.FSDK_IMAGEMODE();
private FSDK.HImage snapshotImage = new FSDK.HImage();
private FSDK.FSDK_IMAGEMODE snapshotImageMode = new FSDK.FSDK_IMAGEMODE();
private MainView mainView;
private MainActivity mainActivity;
private volatile boolean isResizeCalled = false;
private volatile boolean isResized = false;
public long IDs[] = new long[MR.MAX_FACES];
public long face_count[] = new long[1];
private long frameCount = 0;
private long startTime = 0;
private AtomicBoolean isTakingSnapshot = new AtomicBoolean(false);
public static final int[][] MASKS = new int[][]{
{R.raw.lips_pink, R.drawable.lips_pink, R.drawable.lips_pink_normal, MR.SHIFT_TYPE_NO},
{R.raw.lips_purple, R.drawable.lips_purple, R.drawable.lips_purple_normal, MR.SHIFT_TYPE_NO},
{R.raw.lips_red, R.drawable.lips_red, R.drawable.lips_red_normal, MR.SHIFT_TYPE_NO},
};
private int mask = 0;
private int maskLoaded = 0;
private volatile boolean isMaskChanged = false;
private boolean inPreview = false;
public void changeMask(int i) {
mask += i;
isMaskChanged = true;
}
public MainRenderer(MainView view) {
tracker = Application.tracker;
mainView = view;
mainActivity = (MainActivity) mainView.getContext();
trackingFeatures = new FSDK.FSDK_Features[MR.MAX_FACES];
for (int i = 0; i < MR.MAX_FACES; ++i) {
trackingFeatures[i] = new FSDK.FSDK_Features();
for (int j = 0; j < FSDK.FSDK_FACIAL_FEATURE_COUNT; ++j) {
trackingFeatures[i].features[j] = new FSDK.TPoint();
}
}
maskCoords = new MR.MaskFeatures();
}
public void close() {
updateSurfaceTexture = false;
surfaceTexture.release();
camera.stopPreview();
camera.release();
camera = null;
deleteTex();
}
public void startCamera() {
if (inPreview) {
camera.stopPreview();
inPreview = false;
}
//NB: if you don't release the current camera before switching, you app will crash
camera.release();
//swap the id of the camera to be used
if (cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_BACK) {
cameraInfo.facing = Camera.CameraInfo.CAMERA_FACING_FRONT;
} else {
cameraInfo.facing = Camera.CameraInfo.CAMERA_FACING_BACK;
}
camera = Camera.open(cameraInfo.facing);
//Code snippet for this method from somewhere on android developers, i forget where
//setCameraDisplayOrientation(mainActivity, cameraInfo.facing, camera);
try {
//this step is critical or preview on new camera will no know where to render to
camera.setPreviewTexture(surfaceTexture);
} catch (IOException e) {
e.printStackTrace();
}
camera.startPreview();
inPreview = true;
}
public void onSurfaceCreated(GL10 unused, EGLConfig config) { //call opengl functions only inside these functions!
Log.d(TAG, "surfaceCreated");
isResizeCalled = false;
isResized = false;
initTex();
loadMask(mask);
surfaceTexture = new SurfaceTexture(textures[0]);
surfaceTexture.setOnFrameAvailableListener(this);
// Find the ID of the camera
int cameraId = 0;
boolean frontCameraFound = false;
cameraInfo = new Camera.CameraInfo();
for (int i = 0; i < Camera.getNumberOfCameras(); i++) {
Camera.getCameraInfo(i, cameraInfo);
if (cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_BACK) {
}
if (cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) {
cameraId = i;
frontCameraFound = true;
}
}
if (frontCameraFound) {
camera = Camera.open(cameraId);
} else {
camera = Camera.open();
}
try {
camera.setPreviewTexture(surfaceTexture);
} catch (IOException e) {
e.printStackTrace();
}
GLES11.glClearColor(0.0f, 0.0f, 0.0f, 1.0f); //background color
}
private byte[] readBytes(InputStream inputStream) throws IOException {
ByteArrayOutputStream byteBuffer = new ByteArrayOutputStream();
int bufferSize = 16384;
byte[] buffer = new byte[bufferSize];
int len;
while ((len = inputStream.read(buffer)) != -1) {
byteBuffer.write(buffer, 0, len);
}
return byteBuffer.toByteArray();
}
// must be called from the thread with OpenGL context!
#TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH_MR1)
public void loadMask(int maskNumber) {
GLES11.glDisable(GLES11Ext.GL_TEXTURE_EXTERNAL_OES);
Log.d(TAG, "Loading mask...");
int[] mask = MASKS[maskNumber];
if (isMaskTexture1Created[0] > 0) {
GLES11.glDeleteTextures(1, textures, 1);
}
if (isMaskTexture2Created[0] > 0) {
GLES11.glDeleteTextures(1, textures, 2);
}
isMaskTexture1Created[0] = 0;
isMaskTexture2Created[0] = 0;
InputStream stream = mainView.getResources().openRawResource(mask[0]);
int res = MR.LoadMaskCoordsFromStream(stream, maskCoords);
try {
stream.close();
} catch (IOException e) {
e.printStackTrace();
}
if (res != FSDK.FSDKE_OK) {
Log.e(TAG, "Error loading mask coords from stream: " + res);
GLES11.glEnable(GLES11Ext.GL_TEXTURE_EXTERNAL_OES);
return;
}
BitmapFactory.Options bitmapDecodingOptions = new BitmapFactory.Options();
bitmapDecodingOptions.inScaled = false; // to load original image without scaling
FSDK.HImage img1 = new FSDK.HImage();
if (mask[1] == -1) { // if no image
FSDK.CreateEmptyImage(img1);
} else {
stream = mainView.getResources().openRawResource(mask[1]);
byte[] data = null;
try {
data = readBytes(stream);
stream.close();
} catch (IOException e) {
e.printStackTrace();
}
if (data != null) {
res = FSDK.LoadImageFromPngBufferWithAlpha(img1, data, data.length);
Log.d(TAG, "Load mask image of size " + data.length + " with result " + res);
int[] w = new int[]{0};
int[] h = new int[]{0};
FSDK.GetImageWidth(img1, w);
FSDK.GetImageHeight(img1, h);
Log.d(TAG, "Mask image size: " + w[0] + " x " + h[0]);
} else {
Log.w(TAG, "Error loading mask image, using empty image");
FSDK.CreateEmptyImage(img1);
}
}
FSDK.HImage img2 = new FSDK.HImage();
if (mask[2] == -1) { // if no normal image
FSDK.CreateEmptyImage(img2);
} else {
stream = mainView.getResources().openRawResource(mask[2]);
byte[] data = null;
try {
data = readBytes(stream);
stream.close();
} catch (IOException e) {
e.printStackTrace();
}
if (data != null) {
res = FSDK.LoadImageFromPngBufferWithAlpha(img2, data, data.length);
Log.d(TAG, "Load mask normal image of size " + data.length + " with result " + res);
int[] w = new int[]{0};
int[] h = new int[]{0};
FSDK.GetImageWidth(img2, w);
FSDK.GetImageHeight(img2, h);
Log.d(TAG, "Mask normal image size: " + w[0] + " x " + h[0]);
} else {
Log.w(TAG, "Error loading mask normal image, using empty image");
FSDK.CreateEmptyImage(img2);
}
}
res = MR.LoadMask(img1, img2, textures[1], textures[2], isMaskTexture1Created, isMaskTexture2Created);
FSDK.FreeImage(img1);
FSDK.FreeImage(img2);
Log.d(TAG, "Mask loaded with result " + res + " texture1Created:" + isMaskTexture1Created[0] + " texture2Created:" + isMaskTexture2Created[0]);
Log.d(TAG, "Mask textures: " + textures[1] + " " + textures[2]);
GLES11.glEnable(GLES11Ext.GL_TEXTURE_EXTERNAL_OES);
}
public void onDrawFrame(GL10 unused) { //call opengl functions only inside these functions!
GLES11.glClear(GLES11.GL_COLOR_BUFFER_BIT);
if (!isResized) {
return;
}
synchronized (this) {
if (updateSurfaceTexture) {
surfaceTexture.updateTexImage();
updateSurfaceTexture = false;
}
}
if (isMaskChanged) {
maskLoaded = mask;
loadMask(mask);
isMaskChanged = false;
}
int rotation = 1;
// First, drawing without mask to get image buffer
int res = MR.DrawGLScene(textures[0], 0, trackingFeatures, rotation, MR.SHIFT_TYPE_NO, textures[1], textures[2], maskCoords, 0, 0, width, height);
if (FSDK.FSDKE_OK != res) {
Log.e(TAG, "Error in the first MR.DrawGLScene call: " + res);
}
GLES11.glReadPixels(0, 0, width, height, GLES11.GL_RGBA, GLES11.GL_UNSIGNED_BYTE, pixelBuffer);
face_count[0] = 0;
processCameraImage();
// Second, drawing with mask atop of image
res = MR.DrawGLScene(textures[0], (int) face_count[0], trackingFeatures, rotation, MASKS[maskLoaded][3], textures[1], textures[2], maskCoords, isMaskTexture1Created[0], isMaskTexture2Created[0], width, height);
if (FSDK.FSDKE_OK != res) {
Log.e(TAG, "Error in the second MR.DrawGLScene call: " + res);
}
// Save snapshot if needed
if (isTakingSnapshot.compareAndSet(true, false)) {
GLES11.glReadPixels(0, 0, width, height, GLES11.GL_RGBA, GLES11.GL_UNSIGNED_BYTE, pixelBuffer);
snapshotImageMode.mode = FSDK.FSDK_IMAGEMODE.FSDK_IMAGE_COLOR_32BIT;
res = FSDK.LoadImageFromBuffer(snapshotImage, pixelBuffer.array(), width, height, width * 4, snapshotImageMode);
if (FSDK.FSDKE_OK != res) {
Log.e(TAG, "Error loading snapshot image to FaceSDK: " + res);
} else {
FSDK.MirrorImage(snapshotImage, false);
String galleryPath = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_PICTURES).getAbsolutePath();
final String filename = galleryPath + "/MirrorRealityDemo" + System.currentTimeMillis() + ".png";
res = FSDK.SaveImageToFile(snapshotImage, filename);
Log.d(TAG, "saving snapshot to " + filename);
FSDK.FreeImage(snapshotImage);
if (FSDK.FSDKE_OK == res) {
mainActivity.runOnUiThread(new Runnable() {
#Override
public void run() {
Intent mediaScanIntent = new Intent(Intent.ACTION_MEDIA_SCANNER_SCAN_FILE);
File f = new File(filename);
Uri contentUri = Uri.fromFile(f);
mediaScanIntent.setData(contentUri);
mainActivity.sendBroadcast(mediaScanIntent);
Toast.makeText(mainActivity, "Saved successfully", Toast.LENGTH_SHORT).show();
}
});
}
}
}
// Show fps
++frameCount;
long timeCurrent = System.currentTimeMillis();
if (startTime == 0) startTime = timeCurrent;
long diff = timeCurrent - startTime;
if (diff >= 3000) {
final float fps = frameCount / (diff / 1000.0f);
frameCount = 0;
startTime = 0;
final TextView fpsTextView = mainActivity.fpsTextView();
mainActivity.fpsTextView().post(new Runnable() {
#Override
public void run() {
if (!mainActivity.isFinishing()) {
fpsTextView.setText(fps + " FPS");
}
}
});
}
}
private void processCameraImage() {
//clear previous features
for (int i = 0; i < MR.MAX_FACES; ++i) {
for (int j = 0; j < FSDK.FSDK_FACIAL_FEATURE_COUNT; ++j) {
trackingFeatures[i].features[j].x = 0;
trackingFeatures[i].features[j].y = 0;
}
}
cameraImageMode.mode = FSDK.FSDK_IMAGEMODE.FSDK_IMAGE_COLOR_32BIT;
int res = FSDK.LoadImageFromBuffer(cameraImage, pixelBuffer.array(), width, height, width * 4, cameraImageMode);
if (FSDK.FSDKE_OK != res) {
Log.e(TAG, "Error loading camera image to FaceSDK: " + res);
return;
}
FSDK.MirrorImage(cameraImage, false);
int[] widthByReference = new int[1];
int[] heightByReference = new int[1];
FSDK.GetImageWidth(cameraImage, widthByReference);
FSDK.GetImageHeight(cameraImage, heightByReference);
int width = widthByReference[0];
int height = heightByReference[0];
int rotation = 0;
if (deviceOrientation == DEVICE_ORIENTATION_INVERSE_PORTRAIT) {
rotation = 2;
} else if (deviceOrientation == DEVICE_ORIENTATION_LANDSCAPE) {
rotation = 3;
} else if (deviceOrientation == DEVICE_ORIENTATION_INVERSE_LANDSCAPE) {
rotation = 1;
}
if (rotation > 0) {
FSDK.HImage rotated = new FSDK.HImage();
FSDK.CreateEmptyImage(rotated);
FSDK.RotateImage90(cameraImage, rotation, rotated);
FSDK.FeedFrame(tracker, 0, rotated, face_count, IDs);
FSDK.FreeImage(rotated);
} else {
FSDK.FeedFrame(tracker, 0, cameraImage, face_count, IDs);
}
for (int i = 0; i < (int) face_count[0]; ++i) {
FSDK.GetTrackerFacialFeatures(tracker, 0, IDs[i], trackingFeatures[i]);
if (rotation > 0) {
if (rotation == 1) {
for (int j = 0; j < FSDK.FSDK_FACIAL_FEATURE_COUNT; ++j) {
int x = trackingFeatures[i].features[j].x;
trackingFeatures[i].features[j].x = trackingFeatures[i].features[j].y;
trackingFeatures[i].features[j].y = height - 1 - x;
}
} else if (rotation == 2) {
for (int j = 0; j < FSDK.FSDK_FACIAL_FEATURE_COUNT; ++j) {
trackingFeatures[i].features[j].x = width - 1 - trackingFeatures[i].features[j].x;
trackingFeatures[i].features[j].y = height - 1 - trackingFeatures[i].features[j].y;
}
} else {
for (int j = 0; j < FSDK.FSDK_FACIAL_FEATURE_COUNT; ++j) {
int x = trackingFeatures[i].features[j].x;
trackingFeatures[i].features[j].x = width - 1 - trackingFeatures[i].features[j].y;
trackingFeatures[i].features[j].y = x;
}
}
}
}
FSDK.FreeImage(cameraImage);
}
public void onSurfaceChanged(GL10 unused, int width, int height) { //call opengl functions only inside these functions!
Log.d(TAG, "surfaceChanged");
if (!isResizeCalled) {
isResizeCalled = true;
mainView.resizeForPerformance(width, height);
return;
}
GLES11.glViewport(0, 0, width, height);
Camera.Parameters param = camera.getParameters();
List<Camera.Size> psize = param.getSupportedPreviewSizes();
if (psize.size() > 0) {
int i = 0;
int optDistance = Integer.MAX_VALUE;
Log.d(TAG, "Choosing preview resolution closer to " + width + " x " + height);
double neededScale = height / (double) width;
for (int j = 0; j < psize.size(); ++j) {
double scale = psize.get(j).width / (double) psize.get(j).height;
int distance = (int) (10000 * Math.abs(scale - neededScale));
Log.d(TAG, "Choosing preview resolution, probing " + psize.get(j).width + " x " + psize.get(j).height + " distance: " + distance);
if (distance < optDistance) {
i = j;
optDistance = distance;
} else if (distance == optDistance) {
// try to avoid too low resolution
if ((psize.get(i).width < 300 || psize.get(i).height < 300)
&& psize.get(j).width > psize.get(i).width && psize.get(j).height > psize.get(i).height) {
i = j;
}
}
}
Log.d(TAG, "Using optimal preview size: " + psize.get(i).width + " x " + psize.get(i).height);
param.setPreviewSize(psize.get(i).width, psize.get(i).height);
// adjusting viewport to camera aspect ratio
int viewportHeight = (int) (width * (psize.get(i).width * 1.0f / psize.get(i).height));
GLES11.glViewport(0, 0, width, viewportHeight);
this.width = width;
this.height = viewportHeight;
pixelBuffer = ByteBuffer.allocateDirect(this.width * this.height * 4).order(ByteOrder.nativeOrder());
}
param.set("orientation", "landscape");
camera.setParameters(param);
camera.startPreview();
inPreview = true;
isResized = true;
}
#TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH_MR1)
private void initTex() {
textures = new int[3];
GLES11.glEnable(GLES11Ext.GL_TEXTURE_EXTERNAL_OES);
GLES11.glEnable(GL10.GL_TEXTURE_2D);
GLES11.glGenTextures(3, textures, 0);
GLES11.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, textures[0]);
GLES11.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES11.GL_TEXTURE_WRAP_S, GLES11.GL_CLAMP_TO_EDGE);
GLES11.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES11.GL_TEXTURE_WRAP_T, GLES11.GL_CLAMP_TO_EDGE);
GLES11.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES11.GL_TEXTURE_MIN_FILTER, GLES11.GL_NEAREST);
GLES11.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES11.GL_TEXTURE_MAG_FILTER, GLES11.GL_NEAREST);
}
private void deleteTex() {
GLES11.glDeleteTextures(3, textures, 0);
}
public synchronized void onFrameAvailable(SurfaceTexture st) {
updateSurfaceTexture = true;
mainView.requestRender();
}
public synchronized void snapshot() {
isTakingSnapshot.set(true);
}
}
Any type of help is appreciated ... thank you so much
here is the preview that i see
I have tried making sense, but there is few knowledge on what API Level your trying to target , that may also be the problem or maybe the problem may be the Test Device.
But lets rewrite the camera Display code:
1)
public void setCameraDisplayOrientation(android.hardware.Camera camera) {
Camera.Parameters parameters = camera.getParameters();
android.hardware.Camera.CameraInfo camInfo =
new android.hardware.Camera.CameraInfo();
android.hardware.Camera.getCameraInfo(getBackFacingCameraId(), camInfo);
Display display = ((WindowManager) context.getSystemService(Context.WINDOW_SERVICE)).getDefaultDisplay();
int rotation = display.getRotation();
int degrees = 0;
switch (rotation) {
case Surface.ROTATION_0:
degrees = 0;
break;
case Surface.ROTATION_90:
degrees = 90;
break;
case Surface.ROTATION_180:
degrees = 180;
break;
case Surface.ROTATION_270:
degrees = 270;
break;
}
int result;
if (camInfo.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) {
result = (camInfo.orientation + degrees) % 360;
result = (360 - result) % 360; // compensate the mirror
} else { // back-facing
result = (camInfo.orientation - degrees + 360) % 360;
}
camera.setDisplayOrientation(result);
}
Call setCameraDisplayOrientation() method in surfaceCreated callback as the following:
#Override
public void onSurfaceCreated(GL10 unused, EGLConfig config) {
camera = Camera.open();
setCameraDisplayOrientation(getActivity(), CameraInfo.CAMERA_FACING_BACK, camera);
}
Method 2 is to try:
Get the phone's orientation by using a sensor and this makes life much easier because the orientation will be properly handle
You can find more about this within this link: Is Android's CameraInfo.orientation correctly documented? Incorrectly implemented?
Method 3:
edit your code like
public class MainRenderer implements GLSurfaceView.Renderer, SurfaceTexture.OnFrameAvailableListener {
public static final String TAG = "MainRenderer";
public static final int DEVICE_ORIENTATION_PORTRAIT = 0;
public static final int DEVICE_ORIENTATION_INVERSE_PORTRAIT = 1;
public static final int DEVICE_ORIENTATION_LANDSCAPE = 2;
public static final int DEVICE_ORIENTATION_INVERSE_LANDSCAPE = 3;
private Camera.CameraInfo cameraInfo;
public volatile int deviceOrientation = DEVICE_ORIENTATION_PORTRAIT;
private FSDK.HTracker tracker;
private int[] textures;
private Camera camera;
private SurfaceTexture surfaceTexture;
private boolean updateSurfaceTexture = false;
private FSDK.FSDK_Features[] trackingFeatures;
private MR.MaskFeatures maskCoords;
private int[] isMaskTexture1Created = new int[]{0};
private int[] isMaskTexture2Created = new int[]{0};
private int width;
private int height;
private ByteBuffer pixelBuffer;
private FSDK.HImage cameraImage = new FSDK.HImage();
private FSDK.FSDK_IMAGEMODE cameraImageMode = new FSDK.FSDK_IMAGEMODE();
private FSDK.HImage snapshotImage = new FSDK.HImage();
private FSDK.FSDK_IMAGEMODE snapshotImageMode = new FSDK.FSDK_IMAGEMODE();
private MainView mainView;
private MainActivity mainActivity;
private volatile boolean isResizeCalled = false;
private volatile boolean isResized = false;
public long IDs[] = new long[MR.MAX_FACES];
public long face_count[] = new long[1];
private long frameCount = 0;
private long startTime = 0;
private AtomicBoolean isTakingSnapshot = new AtomicBoolean(false);
public static final int[][] MASKS = new int[][]{
{R.raw.lips_pink, R.drawable.lips_pink, R.drawable.lips_pink_normal, MR.SHIFT_TYPE_NO},
{R.raw.lips_purple, R.drawable.lips_purple, R.drawable.lips_purple_normal, MR.SHIFT_TYPE_NO},
{R.raw.lips_red, R.drawable.lips_red, R.drawable.lips_red_normal, MR.SHIFT_TYPE_NO},
};
private int mask = 0;
private int maskLoaded = 0;
private volatile boolean isMaskChanged = false;
private boolean inPreview = false;
public void changeMask(int i) {
mask += i;
isMaskChanged = true;
}
public MainRenderer(MainView view) {
tracker = Application.tracker;
mainView = view;
mainActivity = (MainActivity) mainView.getContext();
trackingFeatures = new FSDK.FSDK_Features[MR.MAX_FACES];
for (int i = 0; i < MR.MAX_FACES; ++i) {
trackingFeatures[i] = new FSDK.FSDK_Features();
for (int j = 0; j < FSDK.FSDK_FACIAL_FEATURE_COUNT; ++j) {
trackingFeatures[i].features[j] = new FSDK.TPoint();
}
}
maskCoords = new MR.MaskFeatures();
}
public void close() {
updateSurfaceTexture = false;
surfaceTexture.release();
camera.stopPreview();
camera.release();
camera = null;
deleteTex();
}
public void startCamera() {
if (inPreview) {
camera.stopPreview();
inPreview = false;
}
//NB: if you don't release the current camera before switching, you app will crash
camera.release();
//swap the id of the camera to be used
//Below i have tried to change the camera facing
if (cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_BACK) {
cameraInfo.facing = Camera.CameraInfo.CAMERA_FACING_BACK;
} else {
cameraInfo.facing = Camera.CameraInfo.CAMERA_FACING_FRONT;
}
camera = Camera.open(cameraInfo.facing);
//Code snippet for this method from somewhere on android developers, i forget where
//setCameraDisplayOrientation(mainActivity, cameraInfo.facing, camera);
try {
//this step is critical or preview on new camera will no know where to render to
camera.setPreviewTexture(surfaceTexture);
} catch (IOException e) {
e.printStackTrace();
}
camera.startPreview();
inPreview = true;
}
public void onSurfaceCreated(GL10 unused, EGLConfig config) { //call opengl functions only inside these functions!
Log.d(TAG, "surfaceCreated");
isResizeCalled = false;
isResized = false;
initTex();
loadMask(mask);
surfaceTexture = new SurfaceTexture(textures[0]);
surfaceTexture.setOnFrameAvailableListener(this);
// Find the ID of the camera
int cameraId = 0;
boolean frontCameraFound = false;
cameraInfo = new Camera.CameraInfo();
for (int i = 0; i < Camera.getNumberOfCameras(); i++) {
Camera.getCameraInfo(i, cameraInfo);
if (cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_BACK) {
}
if (cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) {
cameraId = i;
frontCameraFound = true;
}
}
if (frontCameraFound) {
camera = Camera.open(cameraId);
} else {
camera = Camera.open();
}
try {
camera.setPreviewTexture(surfaceTexture);
} catch (IOException e) {
e.printStackTrace();
}
GLES11.glClearColor
(0.0f, 0.0f, 0.0f, 1.0f); //background color
}
private byte[] readBytes(InputStream inputStream) throws IOException {
ByteArrayOutputStream byteBuffer = new ByteArrayOutputStream();
int bufferSize = 16384;
byte[] buffer = new byte[bufferSize];
int len;
while ((len = inputStream.read(buffer)) != -1) {
byteBuffer.write(buffer, 0, len);
}
return byteBuffer.toByteArray();
}
// must be called from the thread with OpenGL context!
#TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH_MR1)
public void loadMask(int maskNumber) {
GLES11.glDisable(GLES11Ext.GL_TEXTURE_EXTERNAL_OES);
Log.d(TAG, "Loading mask...");
int[] mask = MASKS[maskNumber];
if (isMaskTexture1Created[0] > 0) {
GLES11.glDeleteTextures(1, textures, 1);
}
if (isMaskTexture2Created[0] > 0) {
GLES11.glDeleteTextures(1, textures, 2);
}
isMaskTexture1Created[0] = 0;
isMaskTexture2Created[0] = 0;
InputStream stream = mainView.getResources().openRawResource(mask[0]);
int res = MR.LoadMaskCoordsFromStream(stream, maskCoords);
try {
stream.close();
} catch (IOException e) {
e.printStackTrace();
}
if (res != FSDK.FSDKE_OK) {
Log.e(TAG, "Error loading mask coords from stream: " + res);
GLES11.glEnable(GLES11Ext.GL_TEXTURE_EXTERNAL_OES);
return;
}
BitmapFactory.Options bitmapDecodingOptions = new BitmapFactory.Options();
bitmapDecodingOptions.inScaled = false; // to load original image without scaling
FSDK.HImage img1 = new FSDK.HImage();
if (mask[1] == -1) { // if no image
FSDK.CreateEmptyImage(img1);
} else {
stream = mainView.getResources().openRawResource(mask[1]);
byte[] data = null;
try {
data = readBytes(stream);
stream.close();
} catch (IOException e) {
e.printStackTrace();
}
if (data != null) {
res = FSDK.LoadImageFromPngBufferWithAlpha(img1, data, data.length);
Log.d(TAG, "Load mask image of size " + data.length + " with result " + res);
int[] w = new int[]{0};
int[] h = new int[]{0};
FSDK.GetImageWidth(img1, w);
FSDK.GetImageHeight(img1, h);
Log.d(TAG, "Mask image size: " + w[0] + " x " + h[0]);
} else {
Log.w(TAG, "Error loading mask image, using empty image");
FSDK.CreateEmptyImage(img1);
}
}
FSDK.HImage img2 = new FSDK.HImage();
if (mask[2] == -1) { // if no normal image
FSDK.CreateEmptyImage(img2);
} else {
stream = mainView.getResources().openRawResource(mask[2]);
byte[] data = null;
try {
data = readBytes(stream);
stream.close();
} catch (IOException e) {
e.printStackTrace();
}
if (data != null) {
res = FSDK.LoadImageFromPngBufferWithAlpha(img2, data, data.length);
Log.d(TAG, "Load mask normal image of size " + data.length + " with result " + res);
int[] w = new int[]{0};
int[] h = new int[]{0};
FSDK.GetImageWidth(img2, w);
FSDK.GetImageHeight(img2, h);
Log.d(TAG, "Mask normal image size: " + w[0] + " x " + h[0]);
} else {
Log.w(TAG, "Error loading mask normal image, using empty image");
FSDK.CreateEmptyImage(img2);
}
}
res = MR.LoadMask(img1, img2, textures[1], textures[2], isMaskTexture1Created, isMaskTexture2Created);
FSDK.FreeImage(img1);
FSDK.FreeImage(img2);
Log.d(TAG, "Mask loaded with result " + res + " texture1Created:" + isMaskTexture1Created[0] + " texture2Created:" + isMaskTexture2Created[0]);
Log.d(TAG, "Mask textures: " + textures[1] + " " + textures[2]);
GLES11.glEnable(GLES11Ext.GL_TEXTURE_EXTERNAL_OES);
}
public void onDrawFrame(GL10 unused) { //call opengl functions only inside these functions!
GLES11.glClear(GLES11.GL_COLOR_BUFFER_BIT);
if (!isResized) {
return;
}
synchronized (this) {
if (updateSurfaceTexture) {
surfaceTexture.updateTexImage();
updateSurfaceTexture = false;
}
}
if (isMaskChanged) {
maskLoaded = mask;
loadMask(mask);
isMaskChanged = false;
}
int rotation = 1;
// First, drawing without mask to get image buffer
int res = MR.DrawGLScene(textures[0], 0, trackingFeatures, rotation, MR.SHIFT_TYPE_NO, textures[1], textures[2], maskCoords, 0, 0, width, height);
if (FSDK.FSDKE_OK != res) {
Log.e(TAG, "Error in the first MR.DrawGLScene call: " + res);
}
GLES11.glReadPixels(0, 0, width, height, GLES11.GL_RGBA, GLES11.GL_UNSIGNED_BYTE, pixelBuffer);
face_count[0] = 0;
processCameraImage();
// Second, drawing with mask atop of image
res = MR.DrawGLScene(textures[0], (int) face_count[0], trackingFeatures, rotation, MASKS[maskLoaded][3], textures[1], textures[2], maskCoords, isMaskTexture1Created[0], isMaskTexture2Created[0], width, height);
if (FSDK.FSDKE_OK != res) {
Log.e(TAG, "Error in the second MR.DrawGLScene call: " + res);
}
// Save snapshot if needed
if (isTakingSnapshot.compareAndSet(true, false)) {
GLES11.glReadPixels(0, 0, width, height, GLES11.GL_RGBA, GLES11.GL_UNSIGNED_BYTE, pixelBuffer);
snapshotImageMode.mode = FSDK.FSDK_IMAGEMODE.FSDK_IMAGE_COLOR_32BIT;
res = FSDK.LoadImageFromBuffer(snapshotImage, pixelBuffer.array(), width, height, width * 4, snapshotImageMode);
if (FSDK.FSDKE_OK != res) {
Log.e(TAG, "Error loading snapshot image to FaceSDK: " + res);
} else {
FSDK.MirrorImage(snapshotImage, false);
String galleryPath = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_PICTURES).getAbsolutePath();
final String filename = galleryPath + "/MirrorRealityDemo" + System.currentTimeMillis() + ".png";
res = FSDK.SaveImageToFile(snapshotImage, filename);
Log.d(TAG, "saving snapshot to " + filename);
FSDK.FreeImage(snapshotImage);
if (FSDK.FSDKE_OK == res) {
mainActivity.runOnUiThread(new Runnable() {
#Override
public void run() {
Intent mediaScanIntent = new Intent(Intent.ACTION_MEDIA_SCANNER_SCAN_FILE);
File f = new File(filename);
Uri contentUri = Uri.fromFile(f);
mediaScanIntent.setData(contentUri);
mainActivity.sendBroadcast(mediaScanIntent);
Toast.makeText(mainActivity, "Saved successfully", Toast.LENGTH_SHORT).show();
}
});
}
}
}
// Show fps
++frameCount;
long timeCurrent = System.currentTimeMillis();
if (startTime == 0) startTime = timeCurrent;
long diff = timeCurrent - startTime;
if (diff >= 3000) {
final float fps = frameCount / (diff / 1000.0f);
frameCount = 0;
startTime = 0;
final TextView fpsTextView = mainActivity.fpsTextView();
mainActivity.fpsTextView().post(new Runnable() {
#Override
public void run() {
if (!mainActivity.isFinishing()) {
fpsTextView.setText(fps + " FPS");
}
}
});
}
}
private void processCameraImage() {
//clear previous features
for (int i = 0; i < MR.MAX_FACES; ++i) {
for (int j = 0; j < FSDK.FSDK_FACIAL_FEATURE_COUNT; ++j) {
trackingFeatures[i].features[j].x = 0;
trackingFeatures[i].features[j].y = 0;
}
}
cameraImageMode.mode = FSDK.FSDK_IMAGEMODE.FSDK_IMAGE_COLOR_32BIT;
int res = FSDK.LoadImageFromBuffer(cameraImage, pixelBuffer.array(), width, height, width * 4, cameraImageMode);
if (FSDK.FSDKE_OK != res) {
Log.e(TAG, "Error loading camera image to FaceSDK: " + res);
return;
}
FSDK.MirrorImage(cameraImage, false);
int[] widthByReference = new int[1];
int[] heightByReference = new int[1];
FSDK.GetImageWidth(cameraImage, widthByReference);
FSDK.GetImageHeight(cameraImage, heightByReference);
int width = widthByReference[0];
int height = heightByReference[0];
int rotation = 0;
if (deviceOrientation == DEVICE_ORIENTATION_INVERSE_PORTRAIT) {
rotation = 2;
} else if (deviceOrientation == DEVICE_ORIENTATION_LANDSCAPE) {
rotation = 3;
} else if (deviceOrientation == DEVICE_ORIENTATION_INVERSE_LANDSCAPE) {
rotation = 1;
}
if (rotation > 0) {
FSDK.HImage rotated = new FSDK.HImage();
FSDK.CreateEmptyImage(rotated);
FSDK.RotateImage90(cameraImage, rotation, rotated);
FSDK.FeedFrame(tracker, 0, rotated, face_count, IDs);
FSDK.FreeImage(rotated);
} else {
FSDK.FeedFrame(tracker, 0, cameraImage, face_count, IDs);
}
for (int i = 0; i < (int) face_count[0]; ++i) {
FSDK.GetTrackerFacialFeatures(tracker, 0, IDs[i], trackingFeatures[i]);
if (rotation > 0) {
if (rotation == 1) {
for (int j = 0; j < FSDK.FSDK_FACIAL_FEATURE_COUNT; ++j) {
int x = trackingFeatures[i].features[j].x;
trackingFeatures[i].features[j].x = trackingFeatures[i].features[j].y;
trackingFeatures[i].features[j].y = height - 1 - x;
}
} else if (rotation == 2) {
for (int j = 0; j < FSDK.FSDK_FACIAL_FEATURE_COUNT; ++j) {
trackingFeatures[i].features[j].x = width - 1 - trackingFeatures[i].features[j].x;
trackingFeatures[i].features[j].y = height - 1 - trackingFeatures[i].features[j].y;
}
} else {
for (int j = 0; j < FSDK.FSDK_FACIAL_FEATURE_COUNT; ++j) {
int x = trackingFeatures[i].features[j].x;
trackingFeatures[i].features[j].x = width - 1 - trackingFeatures[i].features[j].y;
trackingFeatures[i].features[j].y = x;
}
}
}
}
FSDK.FreeImage(cameraImage);
}
public void onSurfaceChanged(GL10 unused, int width, int height) { //call opengl functions only inside these functions!
Log.d(TAG, "surfaceChanged");
if (!isResizeCalled) {
isResizeCalled = true;
mainView.resizeForPerformance(width, height);
return;
}
GLES11.glViewport(0, 0, width, height);
Camera.Parameters param = camera.getParameters();
List<Camera.Size> psize = param.getSupportedPreviewSizes();
if (psize.size() > 0) {
int i = 0;
int optDistance = Integer.MAX_VALUE;
Log.d(TAG, "Choosing preview resolution closer to " + width + " x " + height);
double neededScale = height / (double) width;
for (int j = 0; j < psize.size(); ++j) {
double scale = psize.get(j).width / (double) psize.get(j).height;
int distance = (int) (10000 * Math.abs(scale - neededScale));
Log.d(TAG, "Choosing preview resolution, probing " + psize.get(j).width + " x " + psize.get(j).height + " distance: " + distance);
if (distance < optDistance) {
i = j;
optDistance = distance;
} else if (distance == optDistance) {
// try to avoid too low resolution
if ((psize.get(i).width < 300 || psize.get(i).height < 300)
&& psize.get(j).width > psize.get(i).width && psize.get(j).height > psize.get(i).height) {
i = j;
}
}
}
Log.d(TAG, "Using optimal preview size: " + psize.get(i).width + " x " + psize.get(i).height);
param.setPreviewSize(psize.get(i).width, psize.get(i).height);
// adjusting viewport to camera aspect ratio
int viewportHeight = (int) (width * (psize.get(i).width * 1.0f / psize.get(i).height));
GLES11.glViewport(0, 0, width, viewportHeight);
this.width = width;
this.height = viewportHeight;
pixelBuffer = ByteBuffer.allocateDirect(this.width * this.height * 4).order(ByteOrder.nativeOrder());
}
param.set("orientation", "landscape");
camera.setParameters(param);
camera.startPreview();
inPreview = true;
isResized = true;
}
#TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH_MR1)
private void initTex() {
textures = new int[3];
GLES11.glEnable(GLES11Ext.GL_TEXTURE_EXTERNAL_OES);
GLES11.glEnable(GL10.GL_TEXTURE_2D);
GLES11.glGenTextures(3, textures, 0);
GLES11.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, textures[0]);
GLES11.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES11.GL_TEXTURE_WRAP_S, GLES11.GL_CLAMP_TO_EDGE);
GLES11.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES11.GL_TEXTURE_WRAP_T, GLES11.GL_CLAMP_TO_EDGE);
GLES11.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES11.GL_TEXTURE_MIN_FILTER, GLES11.GL_NEAREST);
GLES11.glTexParameteri(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, GLES11.GL_TEXTURE_MAG_FILTER, GLES11.GL_NEAREST);
}
private void deleteTex() {
GLES11.glDeleteTextures(3, textures, 0);
}
public synchronized void onFrameAvailable(SurfaceTexture st) {
updateSurfaceTexture = true;
mainView.requestRender();
}
public synchronized void snapshot() {
isTakingSnapshot.set(true);
}
}
This is sometimes due to android not knowing what orientation is the previous activity coming from, try setting the orientation of the application with
android:screenOrientation="nosensor"
or try the following:
Add the orientation attribute in the manifest
android:screenOrientation=["unspecified" | "behind" |
"landscape" | "portrait" |
"reverseLandscape" | "reversePortrait" |
"sensorLandscape" | "sensorPortrait" |
"userLandscape" | "userPortrait" |
"sensor" | "fullSensor" | "nosensor" |
"user" | "fullUser" | "locked"]
So in your case it will be
<activity android:name=".yourCameractivity"
....
android:screenOrientation="sensorPortrait"/>

loadpixel() returns nullpointerexception

I was using processing in netbeans to play a movie on an array of ledstrips and I am using OPC.class for ledstrip fadecandy mapping. This code works on the processing sketch, but when I tried to use it on netbeans the loadpixel() in draw() method of OPC.java throws a nullpointer exception.
Stacktrace:
Exception in thread "Animation Thread" java.lang.NullPointerException
at processing.core.PApplet.loadPixels(PApplet.java:10625)
at com.processing.OPC.draw(OPC.java:139)
at com.processing.Video.draw(Video.java:62)
at processing.core.PApplet.handleDraw(PApplet.java:2402)
at processing.awt.PSurfaceAWT$12.callDraw(PSurfaceAWT.java:1527)
at processing.core.PSurfaceNone$AnimationThread.run(PSurfaceNone.java:316)
Video.java
public class Video extends PApplet
{
OPC opc;
Movie movie;
public static void main(String args[])
{
PApplet.main(new String[] { "--present", "com.processing.Video" });
}
public void settings()
{
size(600, 240);
}
public void setup()
{
opc = new OPC(this, "192.168.15.10", 7890);
for(int i=0; i<24; i++) {
opc.ledStrip(i * 60, 60,
300, i * 240 / 24 + 240 / 48, 240 / 24, PI, false);
}
movie = new Movie(this, "waffle.mp4");
movie.loop();
}
public void movieEvent(Movie m)
{
m.read();
}
#Override
public void draw()
{
if (movie.available() == true) {
movie.read();
}
image(movie, 0, 0, width, height);
}
}
OPC.java
public class OPC extends PApplet implements Runnable
{
Thread thread;
Socket socket;
OutputStream output, pending;
String host;
int port;
int height = 240;
int width = 600;
int[] pixelLocations;
byte[] packetData;
byte firmwareConfig;
String colorCorrection;
boolean enableShowLocations;
PApplet parent;
OPC(PApplet parent, String host, int port)
{
this.host = host;
this.port = port;
thread = new Thread(this);
thread.start();
this.enableShowLocations = true;
registerMethod("draw", this);
}
public void led(int index, int x, int y){
if (pixelLocations == null) {
pixelLocations = new int[index + 1];
} else if (index >= pixelLocations.length) {
pixelLocations = Arrays.copyOf(pixelLocations, index + 1);
}
pixelLocations[index] = x + 600 * y;
}
public void ledStrip(int index, int count, float x, float y, float spacing, float angle, boolean reversed)
{
float s = sin(angle);
float c = cos(angle);
for (int i = 0; i < count; i++) {
led(reversed ? (index + count - 1 - i) : (index + i),
(int)(x + (i - (count-1)/2.0) * spacing * c + 0.5),
(int)(y + (i - (count-1)/2.0) * spacing * s + 0.5));
}
}
void showLocations(boolean enabled)
{
enableShowLocations = enabled;
}
void setColorCorrection(String s)
{
colorCorrection = s;
sendColorCorrectionPacket();
}
void sendFirmwareConfigPacket()
{
if (pending == null) {
return;
}
byte[] packet = new byte[9];
packet[0] = (byte)0x00; // Channel (reserved)
packet[1] = (byte)0xFF; // Command (System Exclusive)
packet[2] = (byte)0x00; // Length high byte
packet[3] = (byte)0x05; // Length low byte
packet[4] = (byte)0x00; // System ID high byte
packet[5] = (byte)0x01; // System ID low byte
packet[6] = (byte)0x00; // Command ID high byte
packet[7] = (byte)0x02; // Command ID low byte
packet[8] = (byte)firmwareConfig;
try {
pending.write(packet);
} catch (Exception e) {
dispose();
}
}
void sendColorCorrectionPacket()
{
if (colorCorrection == null) {
return;
}
if (pending == null) {
return;
}
byte[] content = colorCorrection.getBytes();
int packetLen = content.length + 4;
byte[] header = new byte[8];
header[0] = (byte)0x00; // Channel (reserved)
header[1] = (byte)0xFF; // Command (System Exclusive)
header[2] = (byte)(packetLen >> 8); // Length high byte
header[3] = (byte)(packetLen & 0xFF); // Length low byte
header[4] = (byte)0x00; // System ID high byte
header[5] = (byte)0x01; // System ID low byte
header[6] = (byte)0x00; // Command ID high byte
header[7] = (byte)0x01; // Command ID low byte
try {
pending.write(header);
pending.write(content);
} catch (Exception e) {
dispose();
}
}
public void draw()
{
if (pixelLocations == null) {
return;
}
if (output == null) {
return;
}
int numPixels = pixelLocations.length;
int ledAddress = 4;
setPixelCount(numPixels);
println("pixel loading");
loadPixels();
println("pixel loaded123");
for (int i = 0; i < numPixels; i++) {
int pixelLocation = pixelLocations[i];
int pixel = pixels[pixelLocation];
packetData[ledAddress] = (byte)(pixel >> 16);
packetData[ledAddress + 1] = (byte)(pixel >> 8);
packetData[ledAddress + 2] = (byte)pixel;
ledAddress += 3;
if (true) {
pixels[pixelLocation] = 0xFFFFFF ^ pixel;
}
}
writePixels();
if (enableShowLocations) {
updatePixels();
print("a");
}
}
void setPixelCount(int numPixels)
{
int numBytes = 3 * numPixels;
int packetLen = 4 + numBytes;
if (packetData == null || packetData.length != packetLen) {
// Set up our packet buffer
packetData = new byte[packetLen];
packetData[0] = (byte)0x00;
packetData[1] = (byte)0x00;
packetData[2] = (byte)(numBytes >> 8);
packetData[3] = (byte)(numBytes & 0xFF);
}
}
void setPixel(int number, int c)
{
println("set");
int offset = 4 + number * 3;
if (packetData == null || packetData.length < offset + 3) {
setPixelCount(number + 1);
}
packetData[offset] = (byte) (c >> 16);
packetData[offset + 1] = (byte) (c >> 8);
packetData[offset + 2] = (byte) c;
}
int getPixel(int number)
{
println("get");
int offset = 4 + number * 3;
if (packetData == null || packetData.length < offset + 3) {
return 0;
}
return (packetData[offset] << 16) | (packetData[offset + 1] << 8) | packetData[offset + 2];
}
void writePixels()
{
println("write");
if (packetData == null || packetData.length == 0) {
return;
}
if (output == null) {
return;
}
try {
output.write(packetData);
} catch (Exception e) {
dispose();
}
}
public void dispose()
{
if (output != null) {
println("Disconnected from OPC server");
}
socket = null;
output = pending = null;
}
public void run()
{
println("?");
if(output == null) { // No OPC connection?
try { // Make one!
socket = new Socket(host, port);
socket.setTcpNoDelay(true);
pending = socket.getOutputStream();
println("Connected to OPC server");
sendColorCorrectionPacket();
sendFirmwareConfigPacket();
output = pending;
} catch (ConnectException e) {
dispose();
} catch (IOException e) {
dispose();
}
}
try {
Thread.sleep(500);
}
catch(InterruptedException e) {
}
}
}
You should only have one class that extends PApplet.
Think of that class as your "main" class. Any other classes that need to use Processing functions will need an instance of that main class in order to access Processing functions. You can pass it in using the this keyword.
In fact, you're already doing that- notice that you pass this into your OPC class, and then store it in the parent parameter. You just never do anything with that parameter.
So step 1 is to remove the extends PApplet from your OBC class:
public class OPC implements Runnable
This will cause some compilation errors. That's okay, we'll fix them later.
Step 2 is to actually store the PApplet parent parameter in a class-level variable.
OPC(PApplet parent, String host, int port){
this.parent = parent;
//rest of constructor
Now that you have that, step 3 is to fix any compilation errors by using the parent variable to access Processing functions.
parent.println("pixel loading");
parent.loadPixels();
More info can be found in the Processing in eclipse tutorial. It's for eclipse, but the same principles apply in netbeans.

Tapping android screen solves media skipping, how to fix?

I wrote an Android app that plays multi-track audio files and it works completely in the simulator. On the device, it plays for a few seconds and then starts skipping and popping every few seconds. If I continuously tap the screen in the dead space of the app, the skipping doesn't occur and then recurs about 5 seconds after screen tapping ceases. I presume that this has something to do with thread priority, but I log the thread priority in the play loop and it never changes.
I'm hoping that somebody can tell me either:
a hack where I can simulate a screen tap every second so that I can run a beta test without the app skipping
explain a way to debug activity/thread/etc priority when it seems that my thread priority isn't changing when it seems like it is.
Here is how the player code is executed:
private class DecodeOperation extends AsyncTask<Void, Void, Void> {
#Override
protected Void doInBackground(Void... values) {
AudioTrackPlayer.this.decodeLoop();
return null;
}
#Override
protected void onPreExecute() {
}
#Override
protected void onProgressUpdate(Void... values) {
}
}
Here is the relevant player code:
private void decodeLoop()
{
ByteBuffer[] codecInputBuffers;
ByteBuffer[] codecOutputBuffers;
// extractor gets information about the stream
extractor = new MediaExtractor();
try {
extractor.setDataSource(this.mUrlString);
} catch (Exception e) {
mDelegateHandler.onRadioPlayerError(AudioTrackPlayer.this);
return;
}
MediaFormat format = extractor.getTrackFormat(0);
String mime = format.getString(MediaFormat.KEY_MIME);
// the actual decoder
codec = MediaCodec.createDecoderByType(mime);
codec.configure(format, null /* surface */, null /* crypto */, 0 /* flags */);
codec.start();
codecInputBuffers = codec.getInputBuffers();
codecOutputBuffers = codec.getOutputBuffers();
// get the sample rate to configure AudioTrack
int sampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
Log.i(LOG_TAG,"mime "+mime);
Log.i(LOG_TAG,"sampleRate "+sampleRate);
// create our AudioTrack instance
audioTrack = new AudioTrack(
AudioManager.STREAM_MUSIC,
sampleRate,
AudioFormat.CHANNEL_OUT_5POINT1,
AudioFormat.ENCODING_PCM_16BIT,
AudioTrack.getMinBufferSize (
sampleRate,
AudioFormat.CHANNEL_OUT_5POINT1,
AudioFormat.ENCODING_PCM_16BIT
),
AudioTrack.MODE_STREAM
);
// start playing, we will feed you later
audioTrack.play();
extractor.selectTrack(0);
// start decoding
final long kTimeOutUs = 10000;
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
boolean sawInputEOS = false;
boolean sawOutputEOS = false;
int noOutputCounter = 0;
int noOutputCounterLimit = 50;
while (!sawOutputEOS && noOutputCounter < noOutputCounterLimit && !doStop) {
//Log.i(LOG_TAG, "loop ");
noOutputCounter++;
if (!sawInputEOS) {
inputBufIndex = codec.dequeueInputBuffer(kTimeOutUs);
bufIndexCheck++;
// Log.d(LOG_TAG, " bufIndexCheck " + bufIndexCheck);
if (inputBufIndex >= 0) {
ByteBuffer dstBuf = codecInputBuffers[inputBufIndex];
int sampleSize =
extractor.readSampleData(dstBuf, 0 /* offset */);
//Log.d(LOG_TAG, "SampleLength = " + String.valueOf(sampleSize));
long presentationTimeUs = 0;
if (sampleSize < 0) {
Log.d(LOG_TAG, "saw input EOS.");
sawInputEOS = true;
sampleSize = 0;
} else {
presentationTimeUs = extractor.getSampleTime();
}
// can throw illegal state exception (???)
codec.queueInputBuffer(
inputBufIndex,
0 /* offset */,
sampleSize,
presentationTimeUs,
sawInputEOS ? MediaCodec.BUFFER_FLAG_END_OF_STREAM : 0);
if (!sawInputEOS) {
extractor.advance();
}
}
else
{
Log.e(LOG_TAG, "inputBufIndex " +inputBufIndex);
}
}
int res = codec.dequeueOutputBuffer(info, kTimeOutUs);
if (res >= 0) {
//Log.d(LOG_TAG, "got frame, size " + info.size + "/" + info.presentationTimeUs);
if (info.size > 0) {
noOutputCounter = 0;
}
int outputBufIndex = res;
ByteBuffer buf = codecOutputBuffers[outputBufIndex];
final byte[] chunk = new byte[info.size];
buf.get(chunk);
buf.clear();
audioTrack.write(chunk,0,chunk.length);
if(this.mState != State.Playing)
{
mDelegateHandler.onRadioPlayerPlaybackStarted(AudioTrackPlayer.this);
}
this.mState = State.Playing;
}
codec.releaseOutputBuffer(outputBufIndex, false /* render */);
if ((info.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
Log.d(LOG_TAG, "saw output EOS.");
sawOutputEOS = true;
}
} else if (res == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
codecOutputBuffers = codec.getOutputBuffers();
Log.d(LOG_TAG, "output buffers have changed.");
} else if (res == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
MediaFormat oformat = codec.getOutputFormat();
Log.d(LOG_TAG, "output format has changed to " + oformat);
} else {
Log.d(LOG_TAG, "dequeueOutputBuffer returned " + res);
}
}
Log.d(LOG_TAG, "stopping...");
relaxResources(true);
this.mState = State.Stopped;
doStop = true;
// attempt reconnect
if(sawOutputEOS)
{
try {
AudioTrackPlayer.this.play();
return;
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
if(noOutputCounter >= noOutputCounterLimit)
{
mDelegateHandler.onRadioPlayerError(AudioTrackPlayer.this);
}
else
{
mDelegateHandler.onRadioPlayerStopped(AudioTrackPlayer.this);
}
}
Have you monitored the CPU frequency while your application is running? The CPU governor is probably scaling the CPU up on touch and scaling back down on a timer. Increasing the priority on your background thread to THREAD_PRIORITY_DEFAULT will probably fix the issue, the default priority for AsyncTask is quite low and not appropriate for Audio.
You could also increase the size of the AudioTrack's buffer to some multiple of the value returned by getMinBufferSize, that method only returns the minimum possible buffer for the Class to operate, it does not guarantee smooth playback.

Java video output can only be opened using Quicktime but no other media player can open it

I have a program and that captures the screen and then takes those images and turns them into a movie. (Using the JpegImagesToMovies.java that was customized to work with .png) I have tried multiple extensions: .mov, .mp4, .avi (I am open to trying others). However, regardless of what extension I use, when I try to open the file with Windows Media Player I get the following error:
Windows Media Player encountered a problem while playing the file.
Error code C00D11B1
I've also tried opening the file using VLC but that produces the following error:
No suitable decoder module:
VLC does not support the audio or video format "twos". Unfortunately there is no way for you to fix this.
Opening the file using QuickTime does work.
So the question is, how can I produce a video file that can be opened with most if not all media players.
Here is my JpegImagesToMovie.java
package maple;
/*
* #(#)JpegImagesToMovie.java 1.3 01/03/13
*
* Copyright (c) 1999-2001 Sun Microsystems, Inc. All Rights Reserved.
*
* Sun grants you ("Licensee") a non-exclusive, royalty free, license to use,
* modify and redistribute this software in source and binary code form,
* provided that i) this copyright notice and license appear on all copies of
* the software; and ii) Licensee does not utilize the software in a manner
* which is disparaging to Sun.
*
* This software is provided "AS IS," without a warranty of any kind. ALL
* EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
* IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
* NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN AND ITS LICENSORS SHALL NOT BE
* LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING
* OR DISTRIBUTING THE SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS
* LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT,
* INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER
* CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF
* OR INABILITY TO USE SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* This software is not designed or intended for use in on-line control of
* aircraft, air traffic, aircraft navigation or aircraft communications; or in
* the design, construction, operation or maintenance of any nuclear
* facility. Licensee represents and warrants that it will not use or
* redistribute the Software for such purposes.
*/
import java.io.*;
import java.util.*;
import java.awt.Dimension;
import java.awt.image.BufferedImage;
import javax.imageio.ImageIO;
import javax.media.*;
import javax.media.control.*;
import javax.media.protocol.*;
import javax.media.datasink.*;
import javax.media.format.RGBFormat;
import javax.media.format.VideoFormat;
/**
* This program takes a list of JPEG image files and convert them into a
* QuickTime movie.
*/
public class JpegImagesToMovie implements ControllerListener, DataSinkListener {
static private Vector<String> getImageFilesPathsVector(
String imagesFolderPath) {
File imagesFolder = new File(imagesFolderPath);
String[] imageFilesArray = imagesFolder.list();
Vector<String> imageFilesPathsVector = new Vector<String>();
for (String imageFileName : imageFilesArray) {
if (!imageFileName.toLowerCase().endsWith("png"))
continue;
imageFilesPathsVector.add(imagesFolder.getAbsolutePath()
+ File.separator + imageFileName);
}
return imageFilesPathsVector;
}
public boolean doIt(int width, int height, int frameRate,
Vector<String> inFiles, MediaLocator outML) {
ImageDataSource ids = new ImageDataSource(width, height, frameRate,
inFiles);
Processor p;
try {
System.err
.println("- create processor for the image datasource ...");
p = Manager.createProcessor(ids);
} catch (Exception e) {
System.err
.println("Yikes! Cannot create a processor from the data source.");
return false;
}
p.addControllerListener(this);
// Put the Processor into configured state so we can set
// some processing options on the processor.
p.configure();
if (!waitForState(p, Processor.Configured)) {
System.err.println("Failed to configure the processor.");
return false;
}
// Set the output content descriptor to QuickTime.
p.setContentDescriptor(new ContentDescriptor(
FileTypeDescriptor.QUICKTIME));// FileTypeDescriptor.MSVIDEO
// Query for the processor for supported formats.
// Then set it on the processor.
TrackControl tcs[] = p.getTrackControls();
Format f[] = tcs[0].getSupportedFormats();
if (f == null || f.length <= 0) {
System.err.println("The mux does not support the input format: "
+ tcs[0].getFormat());
return false;
}
tcs[0].setFormat(f[0]);
System.err.println("Setting the track format to: " + f[0]);
// We are done with programming the processor. Let's just
// realize it.
p.realize();
if (!waitForState(p, Controller.Realized)) {
System.err.println("Failed to realize the processor.");
return false;
}
// Now, we'll need to create a DataSink.
DataSink dsink;
if ((dsink = createDataSink(p, outML)) == null) {
System.err
.println("Failed to create a DataSink for the given output MediaLocator: "
+ outML);
return false;
}
dsink.addDataSinkListener(this);
fileDone = false;
System.err.println("start processing...");
// OK, we can now start the actual transcoding.
try {
p.start();
dsink.start();
} catch (IOException e) {
System.err.println("IO error during processing");
return false;
}
// Wait for EndOfStream event.
waitForFileDone();
// Cleanup.
try {
dsink.close();
} catch (Exception e) {
}
p.removeControllerListener(this);
System.err.println("...done processing.");
return true;
}
/**
* Create the DataSink.
*/
DataSink createDataSink(Processor p, MediaLocator outML) {
DataSource ds;
if ((ds = p.getDataOutput()) == null) {
System.err
.println("Something is really wrong: the processor does not have an output DataSource");
return null;
}
DataSink dsink;
try {
System.err.println("- create DataSink for: " + outML);
dsink = Manager.createDataSink(ds, outML);
dsink.open();
} catch (Exception e) {
System.err.println("Cannot create the DataSink: " + e);
return null;
}
return dsink;
}
Object waitSync = new Object();
boolean stateTransitionOK = true;
/**
* Block until the processor has transitioned to the given state. Return
* false if the transition failed.
*/
boolean waitForState(Processor p, int state) {
synchronized (waitSync) {
try {
while (p.getState() < state && stateTransitionOK)
waitSync.wait();
} catch (Exception e) {
}
}
return stateTransitionOK;
}
/**
* Controller Listener.
*/
public void controllerUpdate(ControllerEvent evt) {
if (evt instanceof ConfigureCompleteEvent
|| evt instanceof RealizeCompleteEvent
|| evt instanceof PrefetchCompleteEvent) {
synchronized (waitSync) {
stateTransitionOK = true;
waitSync.notifyAll();
}
} else if (evt instanceof ResourceUnavailableEvent) {
synchronized (waitSync) {
stateTransitionOK = false;
waitSync.notifyAll();
}
} else if (evt instanceof EndOfMediaEvent) {
evt.getSourceController().stop();
evt.getSourceController().close();
}
}
Object waitFileSync = new Object();
boolean fileDone = false;
boolean fileSuccess = true;
/**
* Block until file writing is done.
*/
boolean waitForFileDone() {
synchronized (waitFileSync) {
try {
while (!fileDone)
waitFileSync.wait();
} catch (Exception e) {
}
}
return fileSuccess;
}
/**
* Event handler for the file writer.
*/
public void dataSinkUpdate(DataSinkEvent evt) {
if (evt instanceof EndOfStreamEvent) {
synchronized (waitFileSync) {
fileDone = true;
waitFileSync.notifyAll();
}
} else if (evt instanceof DataSinkErrorEvent) {
synchronized (waitFileSync) {
fileDone = true;
fileSuccess = false;
waitFileSync.notifyAll();
}
}
}
public static void main(String args[]) {
// changed this method a bit
if (args.length == 0)
prUsage();
// Parse the arguments.
int i = 0;
int width = -1, height = -1, frameRate = -1;
Vector<String> inputFiles = new Vector<String>();
String rootDir = null;
String outputURL = null;
while (i < args.length) {
if (args[i].equals("-w")) {
i++;
if (i >= args.length)
prUsage();
width = new Integer(args[i]).intValue();
} else if (args[i].equals("-h")) {
i++;
if (i >= args.length)
prUsage();
height = new Integer(args[i]).intValue();
} else if (args[i].equals("-f")) {
i++;
if (i >= args.length)
prUsage();
// new Integer(args[i]).intValue();
frameRate = Integer.parseInt(args[i]);
} else if (args[i].equals("-o")) {
i++;
if (i >= args.length)
prUsage();
outputURL = args[i];
} else if (args[i].equals("-i")) {
i++;
if (i >= args.length)
prUsage();
rootDir = args[i];
} else {
System.out.println(".");
prUsage();
}
i++;
}
if (rootDir == null) {
System.out
.println("Since no input (-i) forder provided, assuming this JAR is inside JPEGs folder.");
rootDir = (new File(".")).getAbsolutePath();
}
inputFiles = getImageFilesPathsVector(rootDir);
if (inputFiles.size() == 0)
prUsage();
if (outputURL == null) {
outputURL = (new File(rootDir)).getAbsolutePath() + File.separator
+ "pngs2movie.mov";
}
if (!outputURL.toLowerCase().startsWith("file:///")) {
outputURL = "file:///" + outputURL;
}
// Check for output file extension.
if (!outputURL.toLowerCase().endsWith(".mov")) {
prUsage();
outputURL += ".mov";
System.out
.println("outputURL should be ending with mov. Making this happen.\nNow outputURL is: "
+ outputURL);
}
if (width < 0 || height < 0) {
prUsage();
System.out.println("Trying to guess movie size from first image");
BufferedImage firstImageInFolder = getFirstImageInFolder(rootDir);
width = firstImageInFolder.getWidth();
height = firstImageInFolder.getHeight();
System.out.println("width = " + width);
System.out.println("height = " + height);
}
// Check the frame rate.
if (frameRate < 1)
frameRate = 30;
// Generate the output media locators.
MediaLocator oml;
if ((oml = createMediaLocator(outputURL)) == null) {
System.err.println("Cannot build media locator from: " + outputURL);
System.exit(0);
}
JpegImagesToMovie imageToMovie = new JpegImagesToMovie();
imageToMovie.doIt(width, height, frameRate, inputFiles, oml);
System.exit(0);
}
private static BufferedImage getFirstImageInFolder(String rootDir) {
File rootFile = new File(rootDir);
String[] list = (rootFile).list();
BufferedImage bufferedImage = null;
for (String filePath : list) {
if (!filePath.toLowerCase().endsWith(".png")
&& !filePath.toLowerCase().endsWith(".png")) {
continue;
}
try {
bufferedImage = ImageIO.read(new File(rootFile
.getAbsoluteFile() + File.separator + filePath));
break;
} catch (IOException e) {
e.printStackTrace();
}
}
return bufferedImage;
}
static void prUsage() {
System.err
.println("Usage: java JpegImagesToMovie [-w <width>] [-h <height>] [-f <frame rate>] [-o <output URL>] -i <input JPEG files dir Path>");
// System.exit(-1);
}
/**
* Create a media locator from the given string.
*/
#SuppressWarnings("unused")
public static MediaLocator createMediaLocator(String url) {
MediaLocator ml;
if (url.indexOf(":") > 0 && (ml = new MediaLocator(url)) != null)
return ml;
if (url.startsWith(File.separator)) {
if ((ml = new MediaLocator("file:" + url)) != null)
return ml;
} else {
String file = "file:" + System.getProperty("user.dir")
+ File.separator + url;
if ((ml = new MediaLocator(file)) != null)
return ml;
}
return null;
}
// /////////////////////////////////////////////
//
// Inner classes.
// /////////////////////////////////////////////
/**
* A DataSource to read from a list of JPEG image files and turn that into a
* stream of JMF buffers. The DataSource is not seekable or positionable.
*/
class ImageDataSource extends PullBufferDataSource {
ImageSourceStream streams[];
ImageDataSource(int width, int height, int frameRate,
Vector<String> images) {
streams = new ImageSourceStream[1];
streams[0] = new PngImageSourceStream(width, height, frameRate, images);
}
public void setLocator(MediaLocator source) {
}
public MediaLocator getLocator() {
return null;
}
/**
* Content type is of RAW since we are sending buffers of video frames
* without a container format.
*/
public String getContentType() {
return ContentDescriptor.RAW;
}
public void connect() {
}
public void disconnect() {
}
public void start() {
}
public void stop() {
}
/**
* Return the ImageSourceStreams.
*/
public PullBufferStream[] getStreams() {
return streams;
}
/**
* We could have derived the duration from the number of frames and
* frame rate. But for the purpose of this program, it's not necessary.
*/
public Time getDuration() {
return DURATION_UNKNOWN;
}
public Object[] getControls() {
return new Object[0];
}
public Object getControl(String type) {
return null;
}
}
/**
* The source stream to go along with ImageDataSource.
*/
class ImageSourceStream implements PullBufferStream {
Vector<String> images;
int width, height;
VideoFormat format;
int nextImage = 0; // index of the next image to be read.
boolean ended = false;
public ImageSourceStream(int width, int height, int frameRate,
Vector<String> images) {
this.width = width;
this.height = height;
this.images = images;
format = new VideoFormat(VideoFormat.JPEG, new Dimension(width,
height), Format.NOT_SPECIFIED, Format.byteArray,
(float) frameRate);
}
/**
* We should never need to block assuming data are read from files.
*/
public boolean willReadBlock() {
return false;
}
/**
* This is called from the Processor to read a frame worth of video
* data.
*/
public void read(Buffer buf) throws IOException {
// Check if we've finished all the frames.
if (nextImage >= images.size()) {
// We are done. Set EndOfMedia.
System.err.println("Done reading all images.");
buf.setEOM(true);
buf.setOffset(0);
buf.setLength(0);
ended = true;
return;
}
String imageFile = (String) images.elementAt(nextImage);
nextImage++;
System.err.println(" - reading image file: " + imageFile);
// Open a random access file for the next image.
RandomAccessFile raFile;
raFile = new RandomAccessFile(imageFile, "r");
byte data[] = null;
// Check the input buffer type & size.
if (buf.getData() instanceof byte[])
data = (byte[]) buf.getData();
// Check to see the given buffer is big enough for the frame.
if (data == null || data.length < raFile.length()) {
data = new byte[(int) raFile.length()];
buf.setData(data);
}
// Read the entire JPEG image from the file.
raFile.readFully(data, 0, (int) raFile.length());
System.err.println(" read " + raFile.length() + " bytes.");
buf.setOffset(0);
buf.setLength((int) raFile.length());
buf.setFormat(format);
buf.setFlags(buf.getFlags() | Buffer.FLAG_KEY_FRAME);
// Close the random access file.
raFile.close();
}
/**
* Return the format of each video frame. That will be JPEG.
*/
public Format getFormat() {
return format;
}
public ContentDescriptor getContentDescriptor() {
return new ContentDescriptor(ContentDescriptor.RAW);
}
public long getContentLength() {
return 0;
}
public boolean endOfStream() {
return ended;
}
public Object[] getControls() {
return new Object[0];
}
public Object getControl(String type) {
return null;
}
}
class PngImageSourceStream extends ImageSourceStream {
public PngImageSourceStream(int width, int height, int frameRate,
Vector<String> images) {
super(width, height, frameRate, images);
// configure the new format as RGB format
format = new RGBFormat(new Dimension(width, height),
Format.NOT_SPECIFIED, Format.byteArray, frameRate,
24, // 24 bits per pixel
1, 2, 3); // red, green and blue masks when data are in the form of byte[]
}
public void read(Buffer buf) throws IOException {
// Check if we've finished all the frames.
if (nextImage >= images.size()) {
// We are done. Set EndOfMedia.
System.err.println("Done reading all images.");
buf.setEOM(true);
buf.setOffset(0);
buf.setLength(0);
ended = true;
return;
}
String imageFile = (String) images.elementAt(nextImage);
nextImage++;
System.err.println(" - reading image file: " + imageFile);
// read the PNG image
BufferedImage image = ImageIO.read(new File(imageFile));
boolean hasAlpha = image.getColorModel().hasAlpha();
Dimension size = format.getSize();
// convert 32-bit RGBA to 24-bit RGB
byte[] imageData = convertTo24Bit(hasAlpha, image.getRaster().getPixels(0, 0, size.width, size.height, (int[]) null));
buf.setData(imageData);
System.err.println(" read " + imageData.length + " bytes.");
buf.setOffset(0);
buf.setLength(imageData.length);
buf.setFormat(format);
buf.setFlags(buf.getFlags() | Buffer.FLAG_KEY_FRAME);
}
private void convertIntByteToByte(int[] src, int srcIndex, byte[] out, int outIndex) {
// Note: the int[] returned by bufferedImage.getRaster().getPixels()
// is an int[]
// where each int is the value for one color i.e. the first 4 ints
// contain the RGBA values for the first pixel
int r = src[srcIndex];
int g = src[srcIndex + 1];
int b = src[srcIndex + 2];
out[outIndex] = (byte) (r & 0xFF);
out[outIndex + 1] = (byte) (g & 0xFF);
out[outIndex + 2] = (byte) (b & 0xFF);
}
private byte[] convertTo24Bit(boolean hasAlpha, int[] input) {
int dataLength = input.length;
int newSize = (hasAlpha ? dataLength * 3 / 4 : dataLength);
byte[] convertedData = new byte[newSize];
// for every 4 int values of the original array (RGBA) write 3
// bytes (RGB) to the output array
// if there is no alpha (i.e. RGB image) then just convert int to byte
for (int i = 0, j = 0; i < dataLength; i += 3, j += 3) {
convertIntByteToByte(input, i, convertedData, j);
if (hasAlpha) {
i++; // skip an extra byte if the original image has an
// extra int for transparency
}
}
return convertedData;
}
}
}
And I make the video doing the following
public void makeVideo (String movFile) throws MalformedURLException {
JpegImagesToMovie imageToMovie = new JpegImagesToMovie();
Vector<String> imgList = new Vector <String>();
File f = new File(JavCapture.tmpLocation + "\\tmp\\");
File[] fileList = f.listFiles();
for (int i = 0; i < fileList.length; i++) {
imgList.add(fileList[i].getAbsolutePath());
}
MediaLocator ml;
if ((ml = imageToMovie.createMediaLocator(movFile)) == null) {
System.exit(0);
}
setWidth();
setHeight();
imageToMovie.doIt(width, height, (1000/125), imgList, ml);
}

Categories