gdx-bullet hello world first use - java

I am trying to use gdx-bullet stand alone and run the hello world like in the cpp one and I am getting values from 0 to 2 and up.
Why is that in the jbullet and cpp bullet is working fine
any idea why is it maybe is the setToTranslation or the string output
image.
Here is the code:
public static void main(String[] args) {
Bullet.init();
btCollisionConfiguration collisionConfig;
btCollisionDispatcher dispatcher;
btBroadphaseInterface overlappingPairCache;
btSequentialImpulseConstraintSolver solver;
btDiscreteDynamicsWorld dynamicWorld;
ArrayList<btCollisionShape> collisionShapes;
collisionShapes = new ArrayList<btCollisionShape>();
collisionConfig = new btDefaultCollisionConfiguration();
dispatcher = new btCollisionDispatcher(collisionConfig);
overlappingPairCache = new btDbvtBroadphase();
solver = new btSequentialImpulseConstraintSolver();
dynamicWorld = new btDiscreteDynamicsWorld(dispatcher, overlappingPairCache, solver, collisionConfig);
dynamicWorld.setGravity(new Vector3(0, -10f, 0));
{
btCollisionShape groundShape = new btBoxShape(new Vector3(50.f, 50.f, 50.f));
collisionShapes.add(groundShape);
Matrix4 groundTransform = new Matrix4();
groundTransform.setToTranslation(new Vector3(0, -56, 0));
float mass = 0f;
boolean isDynamic = (mass != 0f);
Vector3 localInertia = new Vector3(0, 0, 0);
if (isDynamic)
groundShape.calculateLocalInertia(mass, localInertia);
btDefaultMotionState myMotionState = new btDefaultMotionState(groundTransform);
btRigidBodyConstructionInfo rbInfo = new btRigidBodyConstructionInfo(mass, myMotionState, groundShape,
localInertia);
btRigidBody body = new btRigidBody(rbInfo);
dynamicWorld.addRigidBody(body);
}
{
btCollisionShape colShape = new btSphereShape(1.f);
collisionShapes.add(colShape);
Matrix4 startTransform = new Matrix4();
startTransform.setToTranslation(new Vector3(2, 10, 0));
float mass = 1;
boolean isDynamic = (mass != 0f);
Vector3 localInertia = new Vector3(0, 0, 0);
if (isDynamic)
colShape.calculateLocalInertia(mass, localInertia);
btDefaultMotionState myMotionState = new btDefaultMotionState(startTransform);
btRigidBodyConstructionInfo rbInfo = new btRigidBodyConstructionInfo(mass, myMotionState, colShape,
localInertia);
btRigidBody body = new btRigidBody(rbInfo);
dynamicWorld.addRigidBody(body);
}
for (int i = 0; i < 150; i++) {
dynamicWorld.stepSimulation(1.f / 60.f, 10);
for (int j = dynamicWorld.getNumCollisionObjects() - 1; j >= 0; j--) {
btCollisionObject obj = dynamicWorld.getCollisionObjectArray().atConst(j);
btRigidBody body = (btRigidBody) obj;
if (body != null && body.getMotionState() != null) {
Matrix4 trans = new Matrix4();
body.getMotionState().getWorldTransform(trans);
System.out.println(
"world pos " + j + " = " + trans.val[12] + " , " + trans.val[13] + ", " + trans.val[14]);
}
}
}
collisionShapes.clear();
dynamicWorld.dispose();
overlappingPairCache.dispose();
dispatcher.dispose();
collisionConfig.dispose();
}
How does the second character feel about the protagonist?
Something the protagonist would like to say to the second character

Related

Tensorflow lite Interpreter crash when run after resizeInput()

I have a tensorflow lite model which use,
input is {1,320,320,3}
output array dataType is {FLOAT32,FLOAT32,INT32}.
Interpreter#runForMultipleInputsOutputs crashed when resized the input to {1,224,320,3}.
My tensorflow lite version:
implementation('org.tensorflow:tensorflow-lite:0.0.0-nightly') { changing = true }
implementation('org.tensorflow:tensorflow-lite-gpu:0.0.0-nightly') { changing = true }
implementation('org.tensorflow:tensorflow-lite-support:0.0.0-nightly') { changing = true }
Create the input tensor. Prepare the output tensor:
inputImageBuffer = new TensorImage(imageDataType);
outputArr = new TensorBuffer[tflite.getOutputTensorCount()];
resizeOutputArr = new TensorBuffer[tflite.getOutputTensorCount()];
for (int i = 0, count = tflite.getOutputTensorCount(); i < count; i++) {
DataType pDataType = tflite.getOutputTensor(i).dataType();
if (pDataType == DataType.INT32) pDataType = DataType.FLOAT32;
outputArr[i] = TensorBuffer.createFixedSize(i == 0 ? new int[]{imageSizeY, imageSizeX} : new int[]{imageSizeX}, pDataType);
resizeOutputArr[i] = TensorBuffer.createDynamic(pDataType);
}
Resize the input imagebuffer
// Loads bitmap into a TensorImage.
inputImageBuffer.load(bitmap);
float sc = bitmap.getWidth() / 320.0f;
int scaleWid = Math.round(bitmap.getWidth() / sc);
int scaleHei = Math.round(bitmap.getHeight() / sc);
imageSizeY = scaleHei / 32 * 32;
// Creates processor for the TensorImage.
int numRotation = sensorOrientation / 90;
this.processor =
new ImageProcessor.Builder()
.add(new ResizeOp(scaleHei, scaleWid, ResizeOp.ResizeMethod.NEAREST_NEIGHBOR))
.add(new ResizeWithCropOrPadOp(imageSizeY, imageSizeX))
.add(new Rot90Op(numRotation))
.add(new GrayOp(imageSizeY, imageSizeX))
.add(getPreprocessNormalizeOp())
.build();
processor.process(inputImageBuffer);
Run:
int imageTensorIndex = 0;
int[] imageShape = tflite.getInputTensor(imageTensorIndex).shape(); // {1, height, width, 3}
if (imageSizeY != imageShape[1] || imageShape[1] == 0) { // imageSizeY changed
tflite.resizeInput(imageTensorIndex, new int[]{1, imageSizeY, imageSizeX, 3}); //resize inputTensor
for (int i = 0, count = tflite.getOutputTensorCount(); i < count; i++) { //recreate outputTensor
DataType pDataType = tflite.getOutputTensor(i).dataType();
if (pDataType == DataType.INT32) pDataType = DataType.FLOAT32;
outputArr[i] = TensorBuffer.createFixedSize(i == 0 ? new int[]{imageSizeY, imageSizeX} : new int[]{imageSizeX}, pDataType);
}
}
Map<Integer, Object> outputMap = new HashMap<>();
for (int i = 0, cnt = outputArr.length; i < cnt; i++) { //rewind outputBuffer
outputMap.put(i, outputArr[i].getBuffer().rewind());
resizeOutputArr[i].getBuffer().rewind();
}
try {
tflite.runForMultipleInputsOutputs(new Object[]{inputImageBuffer.getBuffer()}, outputMap);
} catch (IllegalArgumentException e) {
e.printStackTrace();
return null;
}
for (int i = 0, count = tflite.getOutputTensorCount(); i < count; i++) { //resize outputArr with new shape
int[] shape = tflite.getOutputTensor(i).shape();
outputArr[i].getBuffer().limit(computeFlatSize(shape) * DataType.FLOAT32.byteSize());
resizeOutputArr[i].loadBuffer(outputArr[i].getBuffer(), shape);
outputArr[i].getBuffer().clear();
if (i == count - 1 && shape != null && shape[0] <= 0) return null;
}
return resizeOutputArr;
protected static int computeFlatSize(#NonNull int[] shape) {
SupportPreconditions.checkNotNull(shape, "Shape cannot be null.");
int prod = 1;
for (int i = 0; i < shape.length; ++i) {
prod *= shape[i];
}
return prod;
}
I got blew Exception when there is no recognized object in the Bitmap:
W: java.lang.IllegalArgumentException: Internal error: Tensor hasn't been allocated.
W: at org.tensorflow.lite.Tensor.buffer(Native Method)
W: at org.tensorflow.lite.Tensor.buffer(Tensor.java:493)
W: at org.tensorflow.lite.Tensor.copyTo(Tensor.java:264)
W: at org.tensorflow.lite.Tensor.copyTo(Tensor.java:254)
W: at org.tensorflow.lite.NativeInterpreterWrapper.run(NativeInterpreterWrapper.java:170)
W: at org.tensorflow.lite.Interpreter.runForMultipleInputsOutputs(Interpreter.java:343)
W: at com.fotric.irdetector.qf.tflite.Classifier.recognizeImage(Classifier.java:283)
This exception reduces the recognition performance, Someone help me, thanks!!!

How to adjust the threshold for template matching in openCV (java)?

I am running template matching using openCV 3.4.7 Android SDK (java).
The code work almost perfectly; when the template is match, it draws a rectangle on the matching area. The problem is that even when there is no match, it draws a random rectangle. I think that happens because the threshold is not set correctly. If so, can someone please help me out?
Here's the code:
public static void run(String inFile, String templateFile, String outFile,
int match_method) {
Mat img = Imgcodecs.imread(inFile);
Mat templ = Imgcodecs.imread(templateFile);
// / Create the result matrix
int result_cols = img.cols() - templ.cols() + 1;
int result_rows = img.rows() - templ.rows() + 1;
Mat result = new Mat(result_rows, result_cols, CvType.CV_32FC1);
// / Do the Matching and Normalize
Imgproc.matchTemplate(img, templ, result, match_method);
Core.normalize(result, result, 0, 1, Core.NORM_MINMAX, -1, new Mat());
// / Localizing the best match with minMaxLoc
Core.MinMaxLocResult mmr = Core.minMaxLoc(result);
Point matchLoc;
if (match_method == Imgproc.TM_SQDIFF
|| match_method == Imgproc.TM_SQDIFF_NORMED) {
matchLoc = mmr.minLoc;
} else {
matchLoc = mmr.maxLoc;
}
// / Show me what you got
Imgproc.rectangle(img, matchLoc, new Point(matchLoc.x + templ.cols(),
matchLoc.y + templ.rows()), new Scalar(0, 0, 128));
// Save the visualized detection.
System.out.println("Writing " + outFile);
Imgcodecs.imwrite(outFile, img);
}
You can use Imgproc.TM_CCOEFF_NORMED or Imgproc.TM_CCORR_NORMED and mmr.maxVal >= 0.8. It should take care of most of your false positives.
Sample Code:
import org.opencv.core.*;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import java.io.File;
import java.nio.file.Files;
public class templateMatchingTester {
private static String str = null;
static {
if (str == null) {
str = "initialised";
nu.pattern.OpenCV.loadShared();
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
}
}
private static Mat createMatrixFromImage(String imagePath) {
Mat imageMatrix = Imgcodecs.imread(imagePath);
Mat greyImage = new Mat();
Imgproc.cvtColor(imageMatrix, greyImage, Imgproc.COLOR_BGR2GRAY);
return greyImage;
}
private static boolean matchTemplate(String pathToInputImage,String pathToTemplate){
Mat inputImage = createMatrixFromImage(pathToInputImage);
Mat templateImage = createMatrixFromImage(pathToTemplate);
// Create the result matrix
int result_cols = inputImage.cols() - templateImage.cols() + 1;
int result_rows = inputImage.rows() - templateImage.rows() + 1;
Mat result = new Mat(result_rows, result_cols, CvType.CV_8UC1);
int match_method;
match_method = Imgproc.TM_CCOEFF_NORMED;//Imgproc.TM_CCORR_NORMED;
Imgproc.matchTemplate(inputImage, templateImage, result, match_method);
Core.MinMaxLocResult mmr = Core.minMaxLoc(result);
double minMatchQuality = 0.85;
System.out.println(mmr.maxVal);
if (mmr.maxVal >= minMatchQuality){
return true;
} else
return false;
}
public static void main(String args[]) {
String template = "path/to/your/templateImage";
final File folder = new File("path/to/your/testImagesFolder/");
int matchCount = 0;
for (final File fileEntry : folder.listFiles()){
if (matchTemplate(fileEntry.getPath(),template)){
matchCount+=1;
}else
System.out.println(fileEntry.getPath());
}
System.out.println(matchCount);
}
}
Use a normed match method to ensure your match value is [0..1].
Replace this line
Core.normalize(result, result, 0, 1, Core.NORM_MINMAX, -1, new Mat());
with a thresholding operation. Otherwise a best match of 0.9 would become 1 by the second normalization and you would lose the actual match "quality" information.
Normalizing the result of the template matching will always result in your best match being 1 making it impossible to discard a bad match.
i wrote an app that would take a screenshot of the game overwatch and attempt to tell who is on each team. using template matching and open cv. project need to iterate over the result image and check values.
OpenCVUtils.getPointsFromMatAboveThreshold(result,
0.90f)
public static void scaleAndCheckAll(String guid){
Mat source = imread(IMG_PROC_PATH + guid); //load the source image
Mat scaledSrc = new Mat(defaultScreenshotSize, source.type());
resize(source, scaledSrc, defaultScreenshotSize);
Mat sourceGrey = new Mat(scaledSrc.size(), CV_8UC1);
cvtColor(scaledSrc, sourceGrey, COLOR_BGR2GRAY);
for (String hero : getCharacters()) {
Mat template = OpenCVUtils.matFromJar(TEMPLATES_FOLDER + hero + ".png", 0); //load a template
Size size = new Size(sourceGrey.cols()-template.cols()+1, sourceGrey.rows()-template.rows()+1);
Mat result = new Mat(size, CV_32FC1);
matchTemplate(sourceGrey, template, result, TM_CCORR_NORMED);// get results
Scalar color = OpenCVUtils.randColor();
List<Point> points = OpenCVUtils.getPointsFromMatAboveThreshold(result,
0.90f);
for (Point point : points) {
//rectangle(scaledSrc, new Rect(point.x(),point.y(),template.cols(),template.rows()), color, -2, 0, 0);
putText(scaledSrc, hero, point, FONT_HERSHEY_PLAIN, 2, color);
}
}
String withExt = IMG_PROC_PATH + guid +".png";
imwrite(withExt, scaledSrc);
File noExt = new File(IMG_PROC_PATH + guid);
File ext = new File(withExt);
noExt.delete();
ext.renameTo(noExt);
}
the other method.
public static List<Point> getPointsFromMatAboveThreshold(Mat m, float t){
List<Point> matches = new ArrayList<Point>();
FloatIndexer indexer = m.createIndexer();
for (int y = 0; y < m.rows(); y++) {
for (int x = 0; x < m.cols(); x++) {
if (indexer.get(y,x)>t) {
System.out.println("(" + x + "," + y +") = "+ indexer.get(y,x));
matches.add(new Point(x, y));
}
}
}
return matches;
}
you can just get the first from the list or see how close they are if you expect multiple matches.

Mouth detection using openCv returns a lot of circles in only one face

I'm using OpenCv with android studio to detect faces in an image along with the eyes and the mouth in each face. But, the problem is whenever I try to detect the mouth it returns multiple circles in a face which is wrong.
Here is the code I added for mouth detection:
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
switch (status) {
...
InputStream iserM = getResources().openRawResource(
R.raw.haarcascade_mcs_mouth);
File cascadeDirERM = getDir("cascadeERM",
Context.MODE_PRIVATE);
File cascadeFileERM = new File(cascadeDirERM,
"haarcascade_mcs_mouth.xml");
FileOutputStream oserM = new FileOutputStream(cascadeFileERM);
byte[] bufferERM = new byte[4096];
int bytesReadERM;
while ((bytesReadERM = iserM.read(bufferERM)) != -1) {
oserM.write(bufferERM, 0, bytesReadERM);
}
iserM.close();
oserM.close();
...
//here begins
mJavaDetectorMouth = new CascadeClassifier(
cascadeFileERM.getAbsolutePath());
if (mJavaDetectorMouth.empty()) {
Log.e(TAG, "Failed to load cascade classifier");
mJavaDetectorMouth = null;
} else
Log.i(TAG, "Loaded cascade classifier from "
+ mCascadeFile.getAbsolutePath());
//here ends
...
}
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
...
Rect r = facesArray[i];
MatOfRect mouths = new MatOfRect();
Mat faceROI = mRgba.submat(facesArray[i]);
mJavaDetectorMouth.detectMultiScale(faceROI, mouths,1.1,1,1, new org.opencv.core.Size(30, 30), new org.opencv.core.Size());
Rect[] mouthArray = mouths.toArray();
for (int j = 0; j < mouthArray.length; j++){
Point center1 = new Point(facesArray[i].x + mouthArray[j].x + mouthArray[j].width * 0.5,
facesArray[i].y + mouthArray[j].y + mouthArray[j].height * 0.5);
int radius = (int) Math.round(mouthArray[j].width / 2);
Imgproc.circle(mRgba, center1, radius, new Scalar(255, 0, 0), 4, 8, 0);
}
...
}
I've been looking around since I posted the question and tried a lot of things. But finally I found the solution:
I changed:
mJavaDetectorMouth.detectMultiScale(faceROI, mouths,1.1,1,1, new org.opencv.core.Size(30, 30), new org.opencv.core.Size());
To:
mJavaDetectorMouth.detectMultiScale(faceROI, mouths,1.1, 2,
Objdetect.CASCADE_FIND_BIGGEST_OBJECT
| Objdetect.CASCADE_SCALE_IMAGE, new org.opencv.core.Size(30, 30), new org.opencv.core.Size());
and I solved the issue.

Libgdx - TextureAtlas has stopped my application

I'm loading in Assets from the main game class which extends the libgdx class game. In the create() method of that class i'm calling Assets.loadAtlas(); and this calls:
public static void loadAtlas() {
String textureFile = "data/rubenSprite.txt";
myTextures = new TextureAtlas(Gdx.files.internal(textureFile));
TextureRegion[] walkRightFrames = new TextureRegion[4];
for (int i = 0; i < 5; i++)
{
walkRightFrames[i] = myTextures.findRegion("wframe" + (i + 1));
}
rubenWalkRight = new Animation(0.2f, walkRightFrames[0], walkRightFrames[1],
walkRightFrames[2], walkRightFrames[3], walkRightFrames[4]);
TextureRegion[] walkLeftFrames = new TextureRegion[4];
for (int i = 0; i < 5; i++)
{
walkLeftFrames[i] = new TextureRegion(walkRightFrames[i]);
walkLeftFrames[i].flip(true,false);
}
rubenWalkLeft = new Animation(0.2f, walkLeftFrames[0], walkLeftFrames[1],
walkLeftFrames[2], walkLeftFrames[3], walkLeftFrames[4]);
TextureRegion[] idleRightFrames = new TextureRegion[3];
for (int i = 0; i < 4; i ++)
{
idleRightFrames[i] = myTextures.findRegion("iframe" + (i + 1));
}
rubenIdleRight = new Animation(0.2f, idleRightFrames[0], idleRightFrames[1],
idleRightFrames[2], idleRightFrames[3]);
TextureRegion[] idleLeftFrames = new TextureRegion[3];
for (int i = 0; i < 4; i++)
{
idleLeftFrames[i] = new TextureRegion(idleRightFrames[i]);
idleLeftFrames[i].flip(true,false);
}
rubenIdleLeft = new Animation(0.2f, idleLeftFrames[0], idleLeftFrames[1],
idleLeftFrames[2], idleLeftFrames[3]);
TextureRegion[] jumpRightFrames = new TextureRegion[4];
for (int i = 0; i < 5; i++)
{
jumpRightFrames[i] = myTextures.findRegion("jframe" + (i + 1));
}
rubenJumpRight = new Animation(0.2f, jumpRightFrames[0], jumpRightFrames[1],
jumpRightFrames[2], jumpRightFrames[3], jumpRightFrames[4]);
TextureRegion[] jumpLeftFrames = new TextureRegion[4];
for (int i = 0; i < 5; i++)
{
jumpLeftFrames[i] = new TextureRegion(jumpRightFrames[i]);
jumpLeftFrames[i].flip(true,false);
}
rubenJumpLeft = new Animation(0.2f, jumpLeftFrames[0], jumpLeftFrames[1],
jumpLeftFrames[2], jumpLeftFrames[3], jumpLeftFrames[4]);
TextureRegion fallRight = new TextureRegion();
fallRight = myTextures.findRegion("fall");
rubenFallRight = new Animation(0f, fallRight);
TextureRegion fallLeft = new TextureRegion(fallRight); ;
fallLeft.flip(true,false);
rubenFallLeft = new Animation(0f, fallLeft);
}
From the tutorials I've looked at, it seems correct to me. I've given the player states and drawn the animations within them inside the mainRenderer depending on the state the player is in. This code seems fine also:
private void renderRuben () {
TextureRegion keyFrame;
switch (world.ruben.getState()) {
case Ruben.RUBEN_STATE_HIT:
keyFrame = Assets.bobHit;
break;
case Ruben.RUBEN_STATE_WALKING:
if (Ruben.facingRight == true)
{
keyFrame = Assets.rubenWalkRight.getKeyFrame(world.ruben.getStateTime(), Animation.ANIMATION_NONLOOPING);
break;
}
else
{
keyFrame = Assets.rubenWalkLeft.getKeyFrame(world.ruben.getStateTime(), Animation.ANIMATION_NONLOOPING);
break;
}
case Ruben.RUBEN_STATE_FALL:
if (Ruben.facingRight == true) {
keyFrame = Assets.rubenFallRight.getKeyFrame(world.ruben.getStateTime(), Animation.ANIMATION_NONLOOPING);
break;
}
else {
keyFrame = Assets.rubenFallLeft.getKeyFrame(world.ruben.getStateTime(), Animation.ANIMATION_NONLOOPING);
break;
}
case Ruben.RUBEN_STATE_JUMP:
if (Ruben.facingRight == true) {
keyFrame = Assets.rubenJumpRight.getKeyFrame(world.ruben.getStateTime(), Animation.ANIMATION_NONLOOPING);
break;
}
else {
keyFrame = Assets.rubenJumpRight.getKeyFrame(world.ruben.getStateTime(), Animation.ANIMATION_NONLOOPING);
break;
}
case Ruben.RUBEN_STATE_IDLE:
default:
if (Ruben.facingRight == true) {
keyFrame = Assets.rubenIdleRight.getKeyFrame(world.ruben.getStateTime(), Animation.ANIMATION_LOOPING);
}
else {
keyFrame = Assets.rubenIdleRight.getKeyFrame(world.ruben.getStateTime(), Animation.ANIMATION_LOOPING);
}
}
float side = world.ruben.getVelocity().x < 0 ? -1 : 1;
if (side < 0)
batch.draw(keyFrame, world.ruben.position.x + 0.5f, world.ruben.position.y - 0.5f, side * Ruben.RUBEN_WIDTH, Ruben.RUBEN_HEIGHT);
else
batch.draw(keyFrame, world.ruben.position.x - 0.5f, world.ruben.position.y - 0.5f, side * Ruben.RUBEN_WIDTH, Ruben.RUBEN_HEIGHT);
}
I packed the Atlases using TexturePacker GUI and saved the png and txt file into /assets/data folder.
The overall project has no errors, it just says 'application not responding' when it opens up in the emulator. What am i doing wrong?
Create Method for game class:
public class GameView extends Game {
// used by all screens
public SpriteBatch batcher;
#Override
public void create () {
batcher = new SpriteBatch();
Settings.load();
Assets.loadAtlas();
setScreen(new MainMenuScreen(this));
}
}
Overall it was something to do with the Emulator not the application, The emulator wasn't compatible with Open GL ES 2.0, So I downloaded GenyMotion and now it works fine.
http://www.genymobile.com/en/

Use different color for path segment in the map view overlay

My application can give a path/direction using JSON. How can I change in different colors in on path given by JSON?
For example, form point A to point B is red, B to C is yellow, C to D is green, and so on.
The code is:
Getting points in JSON.
private void parsing(GeoPoint start, GeoPoint end) throws ClientProtocolException, IOException, JSONException, URISyntaxException{
HttpClient httpclient = new DefaultHttpClient();
StringBuilder urlstring = new StringBuilder();
urlstring.append("https://maps.googleapis.com/maps/api/directions/json?origin=")
.append(Double.toString((double)start.getLatitudeE6()/1E6)).append(",").append(Double.toString((double)start.getLongitudeE6()/1E6)).append("&destination=")
.append(Double.toString((double)end.getLatitudeE6()/1E6)).append(",").append(Double.toString((double)end.getLongitudeE6()/1E6))
.append("&sensor=false");
//urlstring.append("http://maps.googleapis.com/maps/api/directions/json?origin=Toronto&destination=Montreal&sensor=true");
url = new URI(urlstring.toString());
HttpPost httppost = new HttpPost(url);
HttpResponse response = httpclient.execute(httppost);
HttpEntity entity = response.getEntity();
InputStream is = null;
is = entity.getContent();
BufferedReader reader = new BufferedReader(new InputStreamReader(is, "iso-8859-1"), 8);
StringBuilder sb = new StringBuilder();
sb.append(reader.readLine() + "\n");
String line = "0";
while ((line = reader.readLine()) != null) {
sb.append(line + "\n");
}
is.close();
reader.close();
String result = sb.toString();
JSONObject jsonObject = new JSONObject(result);
JSONArray routeArray = jsonObject.getJSONArray("routes");
JSONObject routes = routeArray.getJSONObject(0);
JSONObject overviewPolylines = routes.getJSONObject("overview_polyline");
String encodedString = overviewPolylines.getString("points");
List<GeoPoint> pointToDraw = decodePoly(encodedString);
//Added line:
mv_mapview.getOverlays().add(new RoutePathOverlay(pointToDraw));
}
List of Geopoints given by JSON.
private List<GeoPoint> decodePoly(String encoded) {
List<GeoPoint> poly = new ArrayList<GeoPoint>();
int index = 0, len = encoded.length();
int lat = 0, lng = 0;
while (index < len) {
int b, shift = 0, result = 0;
do {
b = encoded.charAt(index++) - 63;
result |= (b & 0x1f) << shift;
shift += 5;
} while (b >= 0x20);
int dlat = ((result & 1) != 0 ? ~(result >> 1) : (result >> 1));
lat += dlat;
shift = 0;
result = 0;
do {
b = encoded.charAt(index++) - 63;
result |= (b & 0x1f) << shift;
shift += 5;
} while (b >= 0x20);
int dlng = ((result & 1) != 0 ? ~(result >> 1) : (result >> 1));
lng += dlng;
GeoPoint p = new GeoPoint((int) (((double) lat / 1E5) * 1E6), (int) (((double) lng / 1E5) * 1E6));
//n = new Node(p,);
poly.add(p);
}
return poly;
}
and Overlay class
public class RoutePathOverlay extends Overlay {
private int _pathColor;
private final List<GeoPoint> _points;
private boolean _drawStartEnd;
public RoutePathOverlay(List<GeoPoint> points) {
this(points, Color.GREEN, true);
}
public RoutePathOverlay(List<GeoPoint> points, int pathColor, boolean drawStartEnd) {
_points = points;
_pathColor = pathColor;
_drawStartEnd = drawStartEnd;
}
private void drawOval(Canvas canvas, Paint paint, Point point) {
Paint ovalPaint = new Paint(paint);
ovalPaint.setStyle(Paint.Style.FILL_AND_STROKE);
ovalPaint.setStrokeWidth(2);
int _radius = 6;
RectF oval = new RectF(point.x - _radius, point.y - _radius, point.x + _radius, point.y + _radius);
canvas.drawOval(oval, ovalPaint);
}
public boolean draw(Canvas canvas, MapView mapView, boolean shadow, long when) {
Projection projection = mapView.getProjection();
if (shadow == false && _points != null) {
Point startPoint = null, endPoint = null;
Path path = new Path();
//We are creating the path
for (int i = 0; i < _points.size(); i++) {
GeoPoint gPointA = _points.get(i);
Point pointA = new Point();
projection.toPixels(gPointA, pointA);
//if()
if (i == 0) { //This is the start point
startPoint = pointA;
path.moveTo(pointA.x, pointA.y);
} else {
if (i == _points.size() - 1)//This is the end point
endPoint = pointA;
path.lineTo(pointA.x, pointA.y);
}
}
Paint paint = new Paint();
paint.setAntiAlias(true);
// if(){
paint.setColor(_pathColor);
//}
paint.setStyle(Paint.Style.STROKE);
paint.setStrokeWidth(5);
paint.setAlpha(90);
if (getDrawStartEnd()) {
if (startPoint != null) {
drawOval(canvas, paint, startPoint);
}
if (endPoint != null) {
drawOval(canvas, paint, endPoint);
}
}
if (!path.isEmpty())
canvas.drawPath(path, paint);
}
return super.draw(canvas, mapView, shadow, when);
}
public boolean getDrawStartEnd() {
return _drawStartEnd;
}
public void setDrawStartEnd(boolean markStartEnd) {
_drawStartEnd = markStartEnd;
}
}
You are decoding only overview_polyline.
Each part of the trip (steps array in response JSON) is defined as
{
distance: {
text: "89 m",
value: 89
},
duration: {
text: "1 min",
value: 8
},
end_location: {
lat: 45.51101000000001,
lng: -73.5545
},
html_instructions: "Enter <b>Rue Saint Antoine E</b>",
polyline: {
points: "cvwtG~d}_MaBs#s#W"
},
start_location: {
lat: 45.51026,
lng: -73.55488000000001
},
travel_mode: "DRIVING"
},
As you can see there is a polyline for each step. Have you tried decoding single steps, store them in an array and then visualize each section (using a different color for each part of the trip)?
This will give you a resolution of a "sector" of the trip, but it could be a good start point.
OR you could try to generate an array for every secton of overview_polyline (since its defined by points you could create single lines: if your polyline passes trough A, B, C and D you can define 3 segments A-B B-C C-D).
I don't know how much this (generate and visualize an overlay with a lot of pieces in it) will affect performance since i never used Maps API in this way (i used it to get ETA for some trip) so i'm just guessing :D

Categories