Encode video using ffmpeg from javacv on Android causes native code crash - java

NOTE: I have updated this since originally asking the question to reflect some of what I have learned about loading live camera images into the ffmpeg libraries.
I am using ffmpeg from javacv compiled for Android to encode/decode video for my application. (Note that originally, I was trying to use ffmpeg-java, but it has some incompatible libraries)
Original problem: The problem that I've run into is that I am currently getting each frame as a Bitmap (just a plain android.graphics.Bitmap) and I can't figure out how to stuff that into the encoder.
Solution in javacv's ffmpeg: Use avpicture_fill(), the format from Android is supposedly YUV420P, though I can't verify this until my encoder issues (below) are fixed.
avcodec.avpicture_fill((AVPicture)mFrame, picPointer, avutil.PIX_FMT_YUV420P, VIDEO_WIDTH, VIDEO_HEIGHT)
Problem Now: The line that is supposed to actually encode the data crashes the thread. I get a big native code stack trace that I'm unable to understand. Does anybody have a suggestion?
Here is the code that I am using to instantiate all the ffmpeg libraries:
avcodec.avcodec_register_all();
avcodec.avcodec_init();
avformat.av_register_all();
mCodec = avcodec.avcodec_find_encoder(avcodec.CODEC_ID_H263);
if (mCodec == null)
{
Logging.Log("Unable to find encoder.");
return;
}
Logging.Log("Found encoder.");
mCodecCtx = avcodec.avcodec_alloc_context();
mCodecCtx.bit_rate(300000);
mCodecCtx.codec(mCodec);
mCodecCtx.width(VIDEO_WIDTH);
mCodecCtx.height(VIDEO_HEIGHT);
mCodecCtx.pix_fmt(avutil.PIX_FMT_YUV420P);
mCodecCtx.codec_id(avcodec.CODEC_ID_H263);
mCodecCtx.codec_type(avutil.AVMEDIA_TYPE_VIDEO);
AVRational ratio = new AVRational();
ratio.num(1);
ratio.den(30);
mCodecCtx.time_base(ratio);
mCodecCtx.coder_type(1);
mCodecCtx.flags(mCodecCtx.flags() | avcodec.CODEC_FLAG_LOOP_FILTER);
mCodecCtx.me_cmp(avcodec.FF_LOSS_CHROMA);
mCodecCtx.me_method(avcodec.ME_HEX);
mCodecCtx.me_subpel_quality(6);
mCodecCtx.me_range(16);
mCodecCtx.gop_size(30);
mCodecCtx.keyint_min(10);
mCodecCtx.scenechange_threshold(40);
mCodecCtx.i_quant_factor((float) 0.71);
mCodecCtx.b_frame_strategy(1);
mCodecCtx.qcompress((float) 0.6);
mCodecCtx.qmin(10);
mCodecCtx.qmax(51);
mCodecCtx.max_qdiff(4);
mCodecCtx.max_b_frames(1);
mCodecCtx.refs(2);
mCodecCtx.directpred(3);
mCodecCtx.trellis(1);
mCodecCtx.flags2(mCodecCtx.flags2() | avcodec.CODEC_FLAG2_BPYRAMID | avcodec.CODEC_FLAG2_WPRED | avcodec.CODEC_FLAG2_8X8DCT | avcodec.CODEC_FLAG2_FASTPSKIP);
if (avcodec.avcodec_open(mCodecCtx, mCodec) == 0)
{
Logging.Log("Unable to open encoder.");
return;
}
Logging.Log("Encoder opened.");
mFrameSize = avcodec.avpicture_get_size(avutil.PIX_FMT_YUV420P, VIDEO_WIDTH, VIDEO_HEIGHT);
Logging.Log("Frame size - '" + mFrameSize + "'.");
//mPic = new AVPicture(mPicSize);
mFrame = avcodec.avcodec_alloc_frame();
if (mFrame == null)
{
Logging.Log("Unable to alloc frame.");
}
This is what I want to be able to execute next:
BytePointer picPointer = new BytePointer(data);
int bBuffSize = mFrameSize;
BytePointer bBuffer = new BytePointer(bBuffSize);
int picSize = 0;
if ((picSize = avcodec.avpicture_fill((AVPicture)mFrame, picPointer, avutil.PIX_FMT_YUV420P, VIDEO_WIDTH, VIDEO_HEIGHT)) <= 0)
{
Logging.Log("Couldn't convert preview to AVPicture (" + picSize + ")");
return;
}
Logging.Log("Converted preview to AVPicture (" + picSize + ")");
VCAP_Package vPackage = new VCAP_Package();
if (mCodecCtx.isNull())
{
Logging.Log("Codec Context is null!");
}
//encode the image
int size = avcodec.avcodec_encode_video(mCodecCtx, bBuffer, bBuffSize, mFrame);
int totalSize = 0;
while (size >= 0)
{
totalSize += size;
Logging.Log("Encoded '" + size + "' bytes.");
//Get any delayed frames
size = avcodec.avcodec_encode_video(mCodecCtx, bBuffer, bBuffSize, null);
}
Logging.Log("Finished encoding. (" + totalSize + ")");
But, as of now, I don't know how to put the Bitmap into the right piece or if I have that setup correctly.
A few notes about the code:
- VIDEO_WIDTH = 352
- VIDEO_HEIGHT = 288
- VIDEO_FPS = 30;

After a lot of searching, I figured out that you have to load the pointers in a fairly strict and awkward manner. This is how I got everything working:
Codec setup:
avcodec.avcodec_register_all();
avcodec.avcodec_init();
avformat.av_register_all();
/* find the H263 video encoder */
mCodec = avcodec.avcodec_find_encoder(avcodec.CODEC_ID_H263);
if (mCodec == null) {
Log.d("TEST_VIDEO", "avcodec_find_encoder() run fail.");
}
mCodecCtx = avcodec.avcodec_alloc_context();
picture = avcodec.avcodec_alloc_frame();
/* put sample parameters */
mCodecCtx.bit_rate(400000);
/* resolution must be a multiple of two */
mCodecCtx.width(VIDEO_WIDTH);
mCodecCtx.height(VIDEO_HEIGHT);
/* frames per second */
AVRational avFPS = new AVRational();
avFPS.num(1);
avFPS.den(VIDEO_FPS);
mCodecCtx.time_base(avFPS);
mCodecCtx.pix_fmt(avutil.PIX_FMT_YUV420P);
mCodecCtx.codec_id(avcodec.CODEC_ID_H263);
mCodecCtx.codec_type(avutil.AVMEDIA_TYPE_VIDEO);
/* open it */
if (avcodec.avcodec_open(mCodecCtx, mCodec) < 0) {
Log.d("TEST_VIDEO", "avcodec_open() run fail.");
}
/* alloc image and output buffer */
output_buffer_size = 100000;
output_buffer = avutil.av_malloc(output_buffer_size);
size = mCodecCtx.width() * mCodecCtx.height();
picture_buffer = avutil.av_malloc((size * 3) / 2); /* size for YUV 420 */
picture.data(0, new BytePointer(picture_buffer));
picture.data(1, picture.data(0).position(size));
picture.data(2, picture.data(1).position(size / 4));
picture.linesize(0, mCodecCtx.width());
picture.linesize(1, mCodecCtx.width() / 2);
picture.linesize(2, mCodecCtx.width() / 2);
Handling the preview data:
//(1)Convert byte[] first
byte[] data420 = new byte[data.length];
convert_yuv422_to_yuv420(data, data420, VIDEO_WIDTH, VIDEO_HEIGHT);
//(2) Fill picture buffer
int data1_offset = VIDEO_HEIGHT * VIDEO_WIDTH;
int data2_offset = data1_offset * 5 / 4;
int pic_linesize_0 = picture.linesize(0);
int pic_linesize_1 = picture.linesize(1);
int pic_linesize_2 = picture.linesize(2);
//Y
for(y = 0; y < VIDEO_HEIGHT; y++)
{
for(x = 0; x < VIDEO_WIDTH; x++)
{
picture.data(0).put((y * pic_linesize_0 + x), data420[y * VIDEO_WIDTH + x]);
}
}
//Cb and Cr
for(y = 0; y < VIDEO_HEIGHT / 2; y++) {
for(x = 0; x < VIDEO_WIDTH / 2; x++) {
picture.data(1).put((y * pic_linesize_1 + x), data420[data1_offset + y * VIDEO_WIDTH / 2 + x]);
picture.data(2).put((y * pic_linesize_2 + x), data420[data2_offset + y * VIDEO_WIDTH / 2 + x]);
}
}
//(2)Encode
//Encode the image into output_buffer
out_size = avcodec.avcodec_encode_video(mCodecCtx, new BytePointer(output_buffer), output_buffer_size, picture);
Log.d("TEST_VIDEO", "Encoded '" + out_size + "' bytes");
//Delayed frames
for(; out_size > 0; i++) {
out_size = avcodec.avcodec_encode_video(mCodecCtx, new BytePointer(output_buffer), output_buffer_size, null);
Log.d("TEST_VIDEO", "Encoded '" + out_size + "' bytes");
//fwrite(output_buffer, 1, out_size, file);
}
I am still working to packetize the data, but the ongoing test project can be found here # http://code.google.com/p/test-video-encode/

Does android graphics library support the YUV format:
codecCtx.pix_fmt = AVCodecLibrary.PIX_FMT_YUV420P;
See if you can set it to ARGB or RGB32. I know the android graphics library supports this pixel format.
PS: I don't know anything about ffmpeg

Related

ghostscript 9.53.3 auto rotating images for some reason

I am using ghostsript version 9.53.3, I have an eps image. but when I try to convert it to png, it rotates it for some reason I don't understand.
here is my command which I execute in java by executing process and sending commandline args:
["C:\Program Files\gs\gs9.53.3\bin\gswin64.exe", -dNOPAUSE, -dEPSFitPage, -g88x88, -r300, -sDEVICE=png16m, -dGraphicsAlphaBits=4, -dBATCH, -sOutputFile=C:\ProgramData\InBetween\Temp\InBetweenClient\IBTemp\eps_12257004264254001602758322946eac07d9c-2366-49f1-bd2e-0181c1bef7ea.png, "D:\Trunkcheckout\IBClientServer\bin\..\Projects\myview_2356_VerpackungenPNG\Images\piktogramme/eps/2260_Erf_360.eps"]
My code:
List<String> shellCommand = new ArrayList<String>();
if (isWindowOS) {
shellCommand.add("\"" + getGSPath() + "\"");
shellCommand.add("-dNOPAUSE");
shellCommand.add("-dEPSFitPage");
width = Unit.getPixelsFromPoints(width, dpi);
height = Unit.getPixelsFromPoints(height, dpi);
shellCommand.add("-g" + width + "x" + height);
shellCommand.add("-r" + dpi);
shellCommand.add("-sDEVICE=" + device);
if (GSUtilsInterface.DEVICE_PNG.equals(device)) {
shellCommand.add("-dGraphicsAlphaBits=4");
}
shellCommand.add("-dBATCH");
shellCommand.add("-sOutputFile=" +outputFile.toString());
shellCommand.add("\"" + imagePath + "\"");
} else {
shellCommand.add("pstopdf");
shellCommand.add(imagePath);
shellCommand.add("-o");
shellCommand.add(outputFile.toString());
}
log.debug("shellCommand:" + shellCommand);
InputStream in = null;
try {
Process p;
// Process p1 = null;
log.eventLow("Executing: " + shellCommand.toString());
if (isWindowOS) {
p = new ProcessBuilder(shellCommand).start();
Try adding -dAutoRotatePages=/None

google cloud vision api OCR bounding box

i used google cloud vision api.
i want recognize only certain parts of the image
and ocr analysis through coordinate input..
(if I found the coordinates in the image)
not in the google example.
is it possible?
Yes it can be possible, here i leave a part of code that you can try,
The main thing is to find the vertex "x" and "y" of the fields you are trying to work with.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
System.out.format("Error: %s%n", res.getError().getMessage());
return;
}
for (int i = 0; i < res.getTextAnnotationsCount(); i++) {
EntityAnnotation annotation = res.getTextAnnotations(i);
if (i > 0) {
descriptionText = annotation.getDescription().replaceAll("\\s+", "").trim();
System.out.println("Text--> " + descriptionText);
for (int x = 0; x < annotation.getBoundingPoly().getVerticesCount(); x++) {
xvertice = annotation.getBoundingPoly().getVertices(x).getX();
yvertice = annotation.getBoundingPoly().getVertices(x).getY();
System.out.println("X--> " + xvertice);
System.out.println("Y--> " + yvertice);
System.out.println("<---------------->");
}
/*
* for (int xx = 0; xx <
* annotation.getBoundingPoly().getNormalizedVerticesCount(); xx++) { xp =
* annotation.getBoundingPoly().getNormalizedVertices(xx).getX(); yy =
* annotation.getBoundingPoly().getNormalizedVertices(xx).getY();
*
* System.out.println("X--> " + xp); System.out.println("Y--> " + yy);
* System.out.println("<---------------->"); }
*/
}
}
}

Performing Feature Matching of Images in Java

I'm trying to perform feature matching on images in Java. The code example for this I found is this one. I fixed the FeatureDetector deprecation via this answer.
However, when I use this code, it does not detect a sub image being part of the bigger image when they clearly are. Here is my adapted code example:
public static boolean performFeatureMatching(BufferedImage largeBufferedImage, BufferedImage smallBufferedImage) throws IOException
{
FastFeatureDetector fd = FastFeatureDetector.create();
final MatOfKeyPoint keyPointsLarge = new MatOfKeyPoint();
final MatOfKeyPoint keyPointsSmall = new MatOfKeyPoint();
Mat largeImage = bufferedImage2Mat(largeBufferedImage);
Mat smallImage = bufferedImage2Mat(smallBufferedImage);
fd.detect(largeImage, keyPointsLarge);
fd.detect(smallImage, keyPointsSmall);
System.out.println("keyPoints.size() : " + keyPointsLarge.size());
System.out.println("keyPoints2.size() : " + keyPointsSmall.size());
Mat descriptorsLarge = new Mat();
Mat descriptorsSmall = new Mat();
DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.BRISK);
extractor.compute(largeImage, keyPointsLarge, descriptorsLarge);
extractor.compute(smallImage, keyPointsSmall, descriptorsSmall);
System.out.println("descriptorsA.size() : " + descriptorsLarge.size());
System.out.println("descriptorsB.size() : " + descriptorsSmall.size());
MatOfDMatch matches = new MatOfDMatch();
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMINGLUT);
matcher.match(descriptorsLarge, descriptorsSmall, matches);
System.out.println("matches.size() : " + matches.size());
MatOfDMatch matchesFiltered = new MatOfDMatch();
List<DMatch> matchesList = matches.toList();
List<DMatch> bestMatches = new ArrayList<>();
Double max_dist = 0.0;
Double min_dist = 100.0;
for (DMatch aMatchesList : matchesList)
{
Double dist = (double) aMatchesList.distance;
if (dist < min_dist && dist != 0)
{
min_dist = dist;
}
if (dist > max_dist)
{
max_dist = dist;
}
}
System.out.println("max_dist : " + max_dist);
System.out.println("min_dist : " + min_dist);
if (min_dist > 50)
{
System.out.println("No match found");
System.out.println("Just return ");
return false;
}
double threshold = 3 * min_dist;
double threshold2 = 2 * min_dist;
if (threshold > 75)
{
threshold = 75;
} else if (threshold2 >= max_dist)
{
threshold = min_dist * 1.1;
} else if (threshold >= max_dist)
{
threshold = threshold2 * 1.4;
}
System.out.println("Threshold : " + threshold);
for (int i = 0; i < matchesList.size(); i++)
{
double dist = (double) matchesList.get(i).distance;
if (dist < threshold)
{
bestMatches.add(matches.toList().get(i));
//System.out.println(String.format(i + " best match added : %s", dist));
}
}
matchesFiltered.fromList(bestMatches);
System.out.println("matchesFiltered.size() : " + matchesFiltered.size());
if (matchesFiltered.rows() >= 4)
{
System.out.println("match found");
return true;
} else
{
return false;
}
}
My test is the following:
#Test
public void testFeatureMatching() throws IOException
{
BufferedImage completeImage = getBufferedImageFromClasspath("lena.png");
BufferedImage subImage = getBufferedImageFromClasspath("lips.png");
boolean matches = performFeatureMatching(completeImage, subImage);
assertTrue(matches);
}
The example images are the following:
Since the lower image is cut out of the upper one it should definitely be found but the match returns false.
Furthermore there are still deprecations left in the code (related question):
Warning:(7, 29) java: org.opencv.features2d.DescriptorExtractor in org.opencv.features2d has been deprecated
Warning:(37, 17) java: org.opencv.features2d.DescriptorExtractor in org.opencv.features2d has been deprecated
Warning:(37, 76) java: org.opencv.features2d.DescriptorExtractor in org.opencv.features2d has been deprecated
Warning:(37, 49) java: org.opencv.features2d.DescriptorExtractor in org.opencv.features2d has been deprecated
Can someone fix the deprecations and/or the general code itself to make feature detection work properly because I can't get it to detect anything correctly?

Compare Camera captured images with computer generated images

I am making an app which compare camera captured image with computer generated images and these images are stored in database.
I am using Opencv for image comparison but it only compare computer generated images correctly.When i capture same image with camera and compare that image with computer generated image then it does not match them acuurately.Is there any way where i can compare camera images also;
Here is my code
public static void main(String[] args) {
// Set image path
String path = System.getProperty("user.dir");
String filename1 = path+"/images/266.jpg";
String filename2 = path+"/images/20160805_121529.jpg";
int ret;
ret = compareFeature(filename1, filename2);
if (ret > 0) {
System.out.println("Two images are same.");
} else {
System.out.println("Two images are different.");
}
}
/**
* Compare that two images is similar using feature mapping
* #author minikim
* #param filename1 - the first image
* #param filename2 - the second image
* #return integer - count that has the similarity within images
*/
public static int compareFeature(String filename1, String filename2) {
int retVal = 0;
long startTime = System.currentTimeMillis();
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
// Load images to compare
Mat img1 = Imgcodecs.imread(filename1, Imgcodecs.CV_LOAD_IMAGE_COLOR);
Mat img2 = Imgcodecs.imread(filename2, Imgcodecs.CV_LOAD_IMAGE_COLOR);
// Declare key point of images
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
Mat descriptors1 = new Mat();
Mat descriptors2 = new Mat();
// Definition of ORB key point detector and descriptor extractors
FeatureDetector detector = FeatureDetector.create(FeatureDetector.ORB);
DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.ORB);
// Detect key points
detector.detect(img1, keypoints1);
detector.detect(img2, keypoints2);
// Extract descriptors
extractor.compute(img1, keypoints1, descriptors1);
extractor.compute(img2, keypoints2, descriptors2);
// Definition of descriptor matcher
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
// Match points of two images
MatOfDMatch matches = new MatOfDMatch();
// System.out.println("Type of Image1= " + descriptors1.type() + ", Type of Image2= " + descriptors2.type());
// System.out.println("Cols of Image1= " + descriptors1.cols() + ", Cols of Image2= " + descriptors2.cols());
// Avoid to assertion failed
// Assertion failed (type == src2.type() && src1.cols == src2.cols && (type == CV_32F || type == CV_8U)
if (descriptors2.cols() == descriptors1.cols()) {
matcher.match(descriptors1, descriptors2 ,matches);
// Check matches of key points
DMatch[] match = matches.toArray();
double max_dist = 0; double min_dist = 100;
for (int i = 0; i < descriptors1.rows(); i++) {
double dist = match[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
System.out.println("max_dist=" + max_dist + ", min_dist=" + min_dist);
// Extract good images (distances are under 10)
for (int i = 0; i < descriptors1.rows(); i++) {
if (match[i].distance <= 10) {
retVal++;
}
}
System.out.println("matching count=" + retVal);
}
long estimatedTime = System.currentTimeMillis() - startTime;
System.out.println("estimatedTime=" + estimatedTime + "ms");
return retVal;
}
Here is pictures that i am comparing
And here is camera captured image
Thanks in Advance

CR4E Crystal Reports save modified ChartObject

I am attempting to programatically modify a chart within a Crystal Report using Java. The java then feeds the report to the viewer. Removing an item works, modifying does not.
//Experiment : Can we programatically modify a chart?
ReportDefController rdc = reportClientDocument.getReportDefController();
ReportObjectController roc = rdc.getReportObjectController();
ReportObjects ros = roc.getReportObjectsByKind(ReportObjectKind.chart);
logger.debug("There are " + ros.size() + " chart items");
IChartObject ro = null;
IChartObject ro_original = null;
ISection iSection = null;
for (int i = 0; i <ros.size(); i++){
ro = (IChartObject)ros.get(i);
ro_original = (IChartObject)ros.get(i);
String rn = ro.getName();
ChartStyle cs = (ChartStyle) ro.getChartStyle();
cs.setEnableDataAxisAutoRange(false);
cs.setEnableShowLegend(false);
cs.setEnableDepthEffect(true);
cs.setIsVertical(true);
cs.setDataAxisMinValue(-2.0);
cs.setDataAxisMaxValue(100.0);
Double minVal = (Double)cs.getDataAxisMinValue();
Double maxVal = (Double)cs.getDataAxisMaxValue();
boolean d = cs.getEnableDepthEffect();
boolean l = cs.getEnableShowLegend();
boolean a = cs.getEnableDataAxisAutoRange();
boolean v = cs.getIsVertical();
ro.setChartStyle(cs);
int sectionCode = ro.getSectionCode();
iSection = rdc.getReportDefinition().getDetailArea().getSections().getSection(0);
try
{
//roc.modify((IChartObject)ros.get(i), ro);
rdc.modifyChartObject((IChartObject)ros.get(i), ro);
reportClientDocument.refreshReportDocument();
reportClientDocument.save();
} catch (ReportSDKException e){
writer.println("Couldn't modify graph");
e.printStackTrace();
}
logger.debug("Chart named "+rn + " With Min Val " + minVal + " and Max Val " + maxVal +" with depth " + d + " and legend " + l + " autorange " + a + " Vertical " + v);
}
I've tried the modify method of ReportObjectController and the modifychartobject method of ReportDefController, and have tried refreshReportDocument and save to attempt to get something to update, but nothing's happening. Logger is showing that the values are updating as you'd expect. Any ideas?
My mistake was in not cloning the object at...
ro = (IChartObject)ros.get(i)
...so it should read...
ro = (IChartObject)ros.get(i).clone(false)
..so that..
roc.modify((IChartObject)ros.get(i), ro)
.. will now work. Hope this helps someone else having similar fun and games.

Categories