use of georegression.struct.point.Vector3D_F64 in project BoofCV by lessthanoptimal.
the class Polygon3DSequenceViewer method keyTyped.
@Override
public void keyTyped(KeyEvent e) {
Vector3D_F64 T = worldToCamera.getT();
if (e.getKeyChar() == 'w') {
T.z -= stepSize;
} else if (e.getKeyChar() == 's') {
T.z += stepSize;
} else if (e.getKeyChar() == 'a') {
T.x += stepSize;
} else if (e.getKeyChar() == 'd') {
T.x -= stepSize;
} else if (e.getKeyChar() == 'q') {
T.y -= stepSize;
} else if (e.getKeyChar() == 'e') {
T.y += stepSize;
} else if (e.getKeyChar() == 'h') {
synchronized (polygons) {
worldToCamera.reset();
}
}
repaint();
}
use of georegression.struct.point.Vector3D_F64 in project BoofCV by lessthanoptimal.
the class Zhang99DecomposeHomography method decompose.
/**
* Compute the rigid body motion that composes the homography matrix H. It is assumed
* that H was computed using {@link Zhang99ComputeTargetHomography}.
*
* @param H homography matrix.
* @return Found camera motion.
*/
public Se3_F64 decompose(DMatrixRMaj H) {
// step through each calibration grid and compute its parameters
DMatrixRMaj[] h = SpecializedOps_DDRM.splitIntoVectors(H, true);
// lambda = 1/norm(inv(K)*h1) or 1/norm(inv(K)*h2)
// use the average to attempt to reduce error
CommonOps_DDRM.mult(K_inv, h[0], temp);
double lambda = NormOps_DDRM.normF(temp);
CommonOps_DDRM.mult(K_inv, h[1], temp);
lambda += NormOps_DDRM.normF(temp);
lambda = 2.0 / lambda;
// compute the column in the rotation matrix
CommonOps_DDRM.mult(lambda, K_inv, h[0], r1);
CommonOps_DDRM.mult(lambda, K_inv, h[1], r2);
CommonOps_DDRM.mult(lambda, K_inv, h[2], t);
Vector3D_F64 v1 = UtilVector3D_F64.convert(r1);
Vector3D_F64 v2 = UtilVector3D_F64.convert(r2);
Vector3D_F64 v3 = v1.cross(v2);
UtilVector3D_F64.createMatrix(R, v1, v2, v3);
Se3_F64 ret = new Se3_F64();
// the R matrix is probably not a real rotation matrix. So find
// the closest real rotation matrix
ConvertRotation3D_F64.approximateRotationMatrix(R, ret.getR());
ret.getT().set(t.data[0], t.data[1], t.data[2]);
return ret;
}
use of georegression.struct.point.Vector3D_F64 in project BoofCV by lessthanoptimal.
the class CalibPoseAndPointRodriguesCodec method decode.
@Override
public void decode(double[] input, CalibratedPoseAndPoint outputModel) {
int paramIndex = 0;
// first decode the transformation
for (int i = 0; i < numViews; i++) {
// don't decode if it is already known
if (knownView[i])
continue;
Se3_F64 se = outputModel.getWorldToCamera(i);
rotation.setParamVector(input[paramIndex++], input[paramIndex++], input[paramIndex++]);
ConvertRotation3D_F64.rodriguesToMatrix(rotation, se.getR());
Vector3D_F64 T = se.getT();
T.x = input[paramIndex++];
T.y = input[paramIndex++];
T.z = input[paramIndex++];
}
// now decode the points
for (int i = 0; i < numPoints; i++) {
Point3D_F64 p = outputModel.getPoint(i);
p.x = input[paramIndex++];
p.y = input[paramIndex++];
p.z = input[paramIndex++];
}
}
use of georegression.struct.point.Vector3D_F64 in project BoofCV by lessthanoptimal.
the class MultiViewOps method createTrifocal.
/**
* <p>
* Creates a trifocal tensor from two rigid body motions. This is for the calibrated camera case.
* </p>
*
* <p>
* NOTE: View 1 is the world coordinate system.
* </p>
*
* @param P2 Transform from view 1 to view 2.
* @param P3 Transform from view 1 to view 3.
* @param ret Storage for trifocal tensor. If null a new instance will be created.
* @return The trifocal tensor
*/
public static TrifocalTensor createTrifocal(Se3_F64 P2, Se3_F64 P3, TrifocalTensor ret) {
if (ret == null)
ret = new TrifocalTensor();
DMatrixRMaj R2 = P2.getR();
DMatrixRMaj R3 = P3.getR();
Vector3D_F64 T2 = P2.getT();
Vector3D_F64 T3 = P3.getT();
for (int col = 0; col < 3; col++) {
DMatrixRMaj T = ret.getT(col);
int index = 0;
for (int i = 0; i < 3; i++) {
double a_left = R2.unsafe_get(i, col);
double a_right = T2.getIndex(i);
for (int j = 0; j < 3; j++) {
T.data[index++] = a_left * T3.getIndex(j) - a_right * R3.unsafe_get(j, col);
}
}
}
return ret;
}
use of georegression.struct.point.Vector3D_F64 in project BoofCV by lessthanoptimal.
the class MultiViewOps method constraint.
/**
* <p>
* Trifocal tensor with line-line-line correspondence:<br>
* (l2<sup>T</sup>*[T1,T2,T3]*L2)*[l1]<sub>x</sub> = 0
* </p>
*
* @param tensor Trifocal tensor
* @param l1 A line in the first view.
* @param l2 A line in the second view.
* @param l3 A line in the third view.
* @param ret Storage for output. If null a new instance will be declared.
* @return Result of applying the constraint. With perfect inputs will be zero.
*/
public static Vector3D_F64 constraint(TrifocalTensor tensor, Vector3D_F64 l1, Vector3D_F64 l2, Vector3D_F64 l3, Vector3D_F64 ret) {
if (ret == null)
ret = new Vector3D_F64();
double x = GeometryMath_F64.innerProd(l2, tensor.T1, l3);
double y = GeometryMath_F64.innerProd(l2, tensor.T2, l3);
double z = GeometryMath_F64.innerProd(l2, tensor.T3, l3);
GeometryMath_F64.cross(new Vector3D_F64(x, y, z), l1, ret);
return ret;
}
Aggregations