Coordinate Rotation - java

I need to rotate the coordinates of the pixels of an image (or a matrix).
My aim is to calculate, for each point, where it will be positioned after a rotation, and vice versa.
I'm currently using this procedure:
private void setRot(int i) {
this.rot = -i / (180.0 / Math.PI);
this.coseno = Math.cos(rot);
this.seno = Math.sin(rot);
}
private Point ruota(int x, int y){
int centerY=h/2;
int centerX=w/2;
int j = (int) (coseno * (x-centerX) - seno * (y-centerY) + centerX);
int i = (int) (seno * (x-centerX) + coseno * (y-centerY) + centerY);
return new Point(j,i);
}
However, if you apply a test, using the method for each pixel, the result is not rotated about the center.
private void esegui() {
for(int i=0;i<w;i++)
for(int j=0;j<h;j++){
Point p = ruota(i,j); //ip : origina image | ip2 destination image
ip2.putPixel(p.x, p.y, ip.getPixel(i, j));
}
nu.setProcessor(ip); // destination image
nu.repaintWindow();
}
Would someone help me make a proper rotation in one direction and in the other?
this is my Test Class:
private int getH(){ //height of destination image
return (int) (this.h*Math.abs(coseno) + this.w*Math.abs(seno));
}
private int getW(){
return (int) (this.plus.getHeight()*Math.abs(seno) + this.plus.getWidth()*Math.abs(coseno));
}
public Test(){ // class test
IJ.open();
this.plus = IJ.getImage();
ip = plus.getProcessor();
this.h = this.plus.getHeight(); // height of start image
this.w = this.plus.getWidth();
setRot(14); // rotation of 14°
dHeight = this.getH(); // dimension of the target image
dWidth = this.getW();
System.out.println("h "+h+" gh " + getH());
System.out.println("w "+w+" gw " + getW());
// create an image target
this.nu = NewImage.createRGBImage("rotate", getW(), getH(), 1, NewImage.FILL_BLACK);
ip2 = nu.getProcessor();
esegui();
nu.setProcessor(ip2);
nu.show();
}

Related

SUNFLOW: Paint only contour edges of 3D mesh

This is literal copy of WireframeShader from really old and long time abandoned java app called Sunflow:
package org.sunflow.core.shader;
import org.sunflow.SunflowAPI;
import org.sunflow.core.ParameterList;
import org.sunflow.core.Shader;
import org.sunflow.core.ShadingState;
import org.sunflow.image.Color;
import org.sunflow.math.Matrix4;
import org.sunflow.math.Point3;
public class WireframeShader implements Shader {
private Color lineColor;
private Color fillColor;
private float width;
private float cosWidth;
public WireframeShader() {
lineColor = Color.BLACK;
fillColor = Color.WHITE;
// pick a very small angle - should be roughly the half the angular width of a pixel
width = (float) (Math.PI * 0.5 / 4096);
cosWidth = (float) Math.cos(width);
}
public boolean update(ParameterList pl, SunflowAPI api) {
lineColor = pl.getColor("line", lineColor);
fillColor = pl.getColor("fill", fillColor);
width = pl.getFloat("width", width);
cosWidth = (float) Math.cos(width);
return true;
}
public Color getMaterialColor() {
return lineColor;
}
public Color getFillColor(ShadingState state) {
return fillColor;
}
public Color getLineColor(ShadingState state) {
return lineColor;
}
public Color getRadiance(ShadingState state) {
Point3[] p = new Point3[3];
if (!state.getTrianglePoints(p)) {
return getFillColor(state);
}
// transform points into camera space
Point3 center = state.getPoint();
Matrix4 w2c = state.getWorldToCamera();
center = w2c.transformP(center);
for (int i = 0; i < 3; i++) {
p[i] = w2c.transformP(state.getInstance().transformObjectToWorld(p[i]));
}
float cn = 1.0f / (float) Math.sqrt(center.x * center.x + center.y * center.y + center.z * center.z);
for (int i = 0, i2 = 2; i < 3; i2 = i, i++) {
// compute orthogonal projection of the shading point onto each triangle edge as in:
// http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
float t = (center.x - p[i].x) * (p[i2].x - p[i].x);
t += (center.y - p[i].y) * (p[i2].y - p[i].y);
t += (center.z - p[i].z) * (p[i2].z - p[i].z);
t /= p[i].distanceToSquared(p[i2]);
float projx = (1 - t) * p[i].x + t * p[i2].x;
float projy = (1 - t) * p[i].y + t * p[i2].y;
float projz = (1 - t) * p[i].z + t * p[i2].z;
float n = 1.0f / (float) Math.sqrt(projx * projx + projy * projy + projz * projz);
// check angular width
float dot = projx * center.x + projy * center.y + projz * center.z;
if (dot * n * cn >= cosWidth) {
return getLineColor(state);
}
}
return getFillColor(state);
}
public void scatterPhoton(ShadingState state, Color power) {
}
#Override
public float getReflectionValue() {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
}
It would render any 3D mesh so that every edge of mesh triangle would be painted thus creating a wireframe-like visual (see pic below)
My question is: does anybody know how to change/update the code (specifically getRadiance() method) so it would only paint contour edges of mesh so it would look like in the pic below?
This is harder than you think, because it cannot be done by using information from just a single triangle. You need to check all edges in the mesh and for each edge take the two faces which contain it. You draw the edge if and only if the normals of these two faces are not the same (differ enough).

why I cant rotate 2 points to draw in the same line

I have this code to draw some vertex and edges, and
I have tried almost all the possibilities that were within my reach, but I believe the bug is in the method rotate or in the construtor
but I'm not sure
public static int CONFIG_NODE_DIAMETER = 20; //pixels
//construtor
public GraphDraw(Graph<V, E> graph) {
//count of vertex
int N = graph.numVertex();
double width = this.getWidth();
double height = this.getHeight();
Point2D center = new Point2D(width / 2, height / 2);
double angleIncrement = 360f / N;
//get all vertex from graph
ArrayList<Vertex<V>> vertex = graph.getVertex();
//draw the line and point
boolean first = true;
Point2D p = null;
for (Vertex<V> v : vertex ) { {
if (first) {
if (width > height) {
p = new Point2D(center.getX(),
center.getY() - height / 2 + CONFIG_NODE_DIAMETER * 2);
} else {
p = new Point2D(center.getX(),
center.getY() - width / 2 + CONFIG_NODE_DIAMETER * 2);
}
first = false;
} else {
p = rotate(p, center, angleIncrement);
}
}
}
}
the method that makes the rotation between 2 points
private static Point2D rotate(Point2D point, Point2D pivot, double angle_degrees) {
double angle = Math.toRadians(angle_degrees);
double sin = Math.sin(angle);
double cos = Math.cos(angle);
//translate to origin
Point2D result = point.subtract(pivot);
// rotate point
Point2D rotatedOrigin = new Point2D(
result.getX() * cos - result.getY() * sin,
result.getX() * sin + result.getY() * cos);
// translate point back
result = rotatedOrigin.add(pivot);
return result;
}
I wanna do like the image below and I tried to rotate but it is not working
any suggestion?
in this link, you can check all method in class GraphDraw, and I dont put because the post would be extensive

Java OpenCV Layer small image onto larger image with transparency

I am trying to write a function that overlays an image at a rectangle with transparency over top of another image, However it doesn't layer the images it just erases the section that I overlay and the transparency cuts through the entire image. Here is my code.
public static void overlayImage(String imagePath, String overlayPath, int x, int y, int width, int height) {
Mat overlay = Imgcodecs.imread(overlayPath, Imgcodecs.IMREAD_UNCHANGED);
Mat image = Imgcodecs.imread(imagePath, Imgcodecs.IMREAD_UNCHANGED);
Rectangle rect = new Rectangle(x, y, width, height);
Imgproc.resize(overlay, overlay, rect.size());
Mat submat = image.submat(new Rect(rect.x, rect.y, overlay.cols(), overlay.rows()));
overlay.copyTo(submat);
Imgcodecs.imwrite(imagePath, image);
}
EDIT: Here are some example pictures:
Before:
After:
Found this function that does exactly what I needed.
public static void overlayImage(Mat background,Mat foreground,Mat output, Point location){
background.copyTo(output);
for(int y = (int) Math.max(location.y , 0); y < background.rows(); ++y){
int fY = (int) (y - location.y);
if(fY >= foreground.rows())
break;
for(int x = (int) Math.max(location.x, 0); x < background.cols(); ++x){
int fX = (int) (x - location.x);
if(fX >= foreground.cols()){
break;
}
double opacity;
double[] finalPixelValue = new double[4];
opacity = foreground.get(fY , fX)[3];
finalPixelValue[0] = background.get(y, x)[0];
finalPixelValue[1] = background.get(y, x)[1];
finalPixelValue[2] = background.get(y, x)[2];
finalPixelValue[3] = background.get(y, x)[3];
for(int c = 0; c < output.channels(); ++c){
if(opacity > 0){
double foregroundPx = foreground.get(fY, fX)[c];
double backgroundPx = background.get(y, x)[c];
float fOpacity = (float) (opacity / 255);
finalPixelValue[c] = ((backgroundPx * ( 1.0 - fOpacity)) + (foregroundPx * fOpacity));
if(c==3){
finalPixelValue[c] = foreground.get(fY,fX)[3];
}
}
}
output.put(y, x,finalPixelValue);
}
}
}

Translations between screen co-ordinates and image co-ordinates after scaling and zooming

I have a class that manages translating from screen co-ordinates to image co-ordinates.
However, I have an "off by one error".
The following gives 318, 198, instead of 319, 199:
#Test
public void test6rightScreenCornerToImageCoOrdAfterZoomingAndScaling() {
PointTranslation pointTranslation = new PointTranslation();
pointTranslation.setOriginalSize(0, 0, 320, 200); // original image
pointTranslation.zoomIn(9, 9, 310, 190); // zoomed image starting at 9,9
pointTranslation.scale(0, 0, 800, 800);
Point translatedPoint = pointTranslation.transformPoint(799,799);
System.out.println(testName.getMethodName() + " : " + translatedPoint.toString());
assertTrue(translatedPoint.x == 319);
assertTrue(translatedPoint.y == 199);
}
=============================================================
Full Listing:
package gtx;
import org.eclipse.swt.graphics.Point;
import org.eclipse.swt.internal.gdip.PointF;
import org.eclipse.swt.internal.gdip.RectF;
#SuppressWarnings("restriction")
public class PointTranslation {
RectF originalSize = null;
RectF currentSize = null;
RectF scaledSize = null;
public void setOriginalSize(int originX, int originY, int width, int height) {
originalSize = getRectangle(originX, originY, width, height);
currentSize = originalSize;
}
public void zoomIn(int originX, int originY, int width, int height) {
// System.out.println("addTranslation: " + originX + " " + originY + " "
// + width + " " + height);
currentSize = getRectangle(originX, originY, width, height);
}
public void scale(int originX, int originY, int width, int height) {
// System.out.println("addTranslation: " + originX + " " + originY + " "
// + width + " " + height);
scaledSize = getRectangle(originX, originY, width, height);
}
public boolean isPointWithinBounds(Point point) {
return isPointWithinBounds(point.x, point.y);
}
public boolean isPointWithinBounds(int xPos, int yPos) {
boolean ret = false;
if (scaledSize != null) {
RectF sourceRec = scaledSize;
int xBounds = (int) (sourceRec.Width + sourceRec.X);
int yBounds = (int) (sourceRec.Height + sourceRec.Y);
ret = (xPos < xBounds) && (yPos < yBounds) && (xPos > sourceRec.X) && (yPos > sourceRec.Y);
}
return ret;
}
public Point transformPoint(Point point) {
return transformPoint(point.x, point.y);
}
public Point transformPoint(int xPos, int yPos) {
Point sourcePoint = new Point((int) xPos, (int) yPos);
Point retPoint = sourcePoint;
if (this.scaledSize != null) {
retPoint = transformPoint(this.scaledSize, this.currentSize, sourcePoint);
}
return retPoint;
}
/*
* Rectangle 1 has (x1, y1) origin and (w1, h1) for width and height, and
* Rectangle 2 has (x2, y2) origin and (w2, h2) for width and height, then
*
* Given point (x, y) in terms of Rectangle 1 co-ords, to convert it to
* Rectangle 2 co-ords: xNew = ((x-x1)/w1)*w2 + x2; yNew = ((y-y1)/h1)*h2 +
* y2;
*/
private Point transformPoint(RectF source, RectF destination, Point intPoint) {
PointF point = new PointF();
point.X = intPoint.x;
point.Y = intPoint.y;
return transformPoint(source, destination, point);
}
private Point transformPoint(RectF source, RectF destination, PointF point) {
return new Point((int) ((((point.X - source.X) / source.Width) * destination.Width + destination.X)),
(int) ((((point.Y - source.Y) / source.Height) * destination.Height + destination.Y)));
}
private RectF getRectangle(int x, int y, int width, int height) {
RectF rect = new RectF();
rect.X = x;
rect.Y = y;
rect.Height = height;
rect.Width = width;
return rect;
}
private PointF getPoint(int x, int y) {
PointF retPoint = new PointF();
retPoint.X = x;
retPoint.Y = y;
return retPoint;
}
public void reset() {
this.originalSize = null;
this.currentSize = null;
this.scaledSize = null;
}
}
Update:
My issue definitely seems to be with rounding. Its strange, for some test cases I need to Round Up to get the correct Point, and sometimes I need to round Up. I am missing something like a scaling factor or something. Any suggestions how to the Translation between two Rectangles correctly?
Update 2:
I tried the following method, but with still no joy:
private Point transformPoint(RectF source, RectF destination, PointF point) {
float xPercent = normalize(point.X,source.X,source.Width);
float destX = xPercent*(Math.abs(destination.Width - destination.X)) + destination.X;
float yPercent = normalize(point.Y,source.Y,source.Height);
float destY = yPercent*(Math.abs(destination.Height - destination.Y)) + destination.Y;
System.out.println("Float x,y: " + destX + ", " + destY);
System.out.println("Ceil Float x,y: " + Math.floor(destX) + ", " + Math.floor(destY) );
return new Point((int)Math.floor(destX), (int)Math.floor(destY));
}
private float normalize(float value, float min, float max) {
return Math.abs((value - min) / (max - min));
}
In running the test case and stepping through the code, my debugging shows the following substitutions...
transformPoint(RectF source, RectF destination, PointF point) {
return new Point (
(int) (((( 799 - 0 ) / 800 ) * 310 ) + 9 )),
(int) (((( 799 - 0 ) / 800 ) * 190 ) + 9 ))
);
}
The first half of the equation returns 318.6125. The second half of the equation returns 198.7625.
You need to either
modify the equation so that the int transformation truncates to the
desired value (such as + 1 at the end)
round up before conversion to int
live with the result
And as Mathew noted, multiple translations in a row distort and magnify the problem, much like averaging averages.
As stated by Reenactor, you need to round points before conversion to int:
private Point transformPoint(RectF source, RectF destination, PointF point) {
final int ptx = Math.round((((point.X - source.X) / source.Width) * destination.Width + destination.X));
final int pty = Math.round((((point.Y - source.Y) / source.Height) * destination.Height + destination.Y));
return new Point(ptx, pty);
}
Is it possible you are converting the interim results after each translation to integer points?
Remember, org.eclipse.swt.graphics.Point stores x and y as int, so any fractions in your calculations are dropped.
That is:
1) First translation:
x1 = ((799 - 0) / 800) * 301 + 9 = 309.62375
y1 = ((799 - 0) / 800) * 181 + 9 = 189.77375
If you were storing those in a Point object before going into the next translation, they would be truncated to (309, 189) and you would get
2) Second translation
x2 = ((309 - 9) / 301) * 320 + 0 = 318.93...
y2 = ((189 - 9) / 181) * 200 + 0 = 198.89...
Which would, in turn, be truncated to (318, 198).

Collision detection between circles

I got this working class that creates a DarkHole object in the screen. Its a filled circle with random color, position and radius. When screen is touched, it is supposed to draw a ball avoiding the other ones that have already been drawn.
I used this method:
Colision by Radius
public class DarkHole extends View {
public static final int maxDiameter = 250;
public static final int minDiameter = 240;
/**
* This class log tag.
*/
private static final String LOG_TAG = DarkHole.class.getSimpleName();
private static List<ShapeDrawable> mHoles;
private int mWindowWidth;
private int mWindowHeight;
private Random random;
/**
* The constructor;
*
* #param context application context.
*/
public DarkHole(Context context) {
super(context);
mHoles = new ArrayList<ShapeDrawable>();
random = new Random();
//Get Screen Size
Point point = new Point();
WindowManager windowManager = (WindowManager) context.getSystemService(Context.WINDOW_SERVICE);
Display display = windowManager.getDefaultDisplay();
display.getSize(point);
// Get screen max x and y.
mWindowWidth = point.x;
mWindowHeight = point.y;
}
/**
* Draw random hole.
*/
private void generateRandomHole() {
while(true) {
ShapeDrawable hole = new ShapeDrawable(new OvalShape());
// Generate random color.
int r = random.nextInt(255);
int g = random.nextInt(255);
int b = random.nextInt(255);
int randomColor = Color.rgb(r, g, b);
// Generate random position.
int diameter = random.nextInt(maxDiameter - minDiameter + 1) + minDiameter;
int x = random.nextInt((mWindowWidth - diameter) + 1);
int y = random.nextInt((mWindowHeight - diameter) + 1);
hole.getPaint().setColor(randomColor);
hole.setBounds(x, y, x + diameter, y + diameter);
if (checkDrawContains(hole)) {
Log.d(LOG_TAG, "[generateRandomHole] contains!");
hole = null;
random = null;
} else {
Log.d(LOG_TAG, "[generateRandomHole] not contains!");
mHoles.add(hole);
break;
}
}
}
/**
* Draw informative text.
*
* #param canvas canvas object.
*/
private void generateText(Canvas canvas) {
Paint color = new Paint();
color.setColor(Color.BLACK);
color.setTextSize(70);
color.setAntiAlias(true);
color.setDither(true);
canvas.drawText("Bolas: " + mHoles.size(), 10, 50, color);
}
private boolean checkDrawContains(ShapeDrawable newHole) {
long newCenterX = newHole.getBounds().left + (newHole.getBounds().width()/2);
long newCenterY = newHole.getBounds().top + (newHole.getBounds().height()/2);
Log.d(LOG_TAG, "[checkDrawContains] newCenterX: " + newCenterX);
for(ShapeDrawable hole: mHoles) {
long centerX = hole.getBounds().left + (hole.getBounds().width()/2);
long centerY = hole.getBounds().top + (hole.getBounds().height()/2);
long x = centerX - newCenterX;
long y = centerY - newCenterY;
long aux = (long) ((Math.pow(Math.abs(x),2)) + (Math.pow(Math.abs(y),2)));
long distance = (long) Math.sqrt(aux);
Log.d(LOG_TAG, "[checkDrawContains] x: " + x);
Log.d(LOG_TAG, "[checkDrawContains] y: " + y);
Log.d(LOG_TAG, "[checkDrawContains] distance: " + distance);
Log.d(LOG_TAG, "[checkDrawContains] sum radius: " + (newCenterX + centerX));
long sRads = (newHole.getBounds().width()/2) + (hole.getBounds().width()/2);
if(distance <= sRads ) {
return true;
}
}
return false;
}
/** {#inheritDoc} */
protected void onDraw(Canvas canvas) {
super.onDraw(canvas);
generateText(canvas);
for (ShapeDrawable hole : mHoles) {
hole.draw(canvas);
}
invalidate();
}
/** {#inheritDoc} */
#Override
public boolean onTouchEvent(MotionEvent event) {
switch (event.getAction()) {
case MotionEvent.ACTION_DOWN:
generateRandomHole();
return true;
}
return super.onTouchEvent(event);
}
}
It should work, but It crashes when the recursion starts to recalculate all the random parameters. Is it a stack overflow? Why is that happening? How it should be solved?

Categories