use of georegression.struct.point.Point4D_F64 in project BoofCV by lessthanoptimal.
the class ChecksMotionNPointHomogenous method checkMotion.
private void checkMotion(int N, DMatrixRMaj cameraMatrix, boolean planar) {
generateScene(N, cameraMatrix, planar);
// extract the motion
DMatrixRMaj foundP = compute(assocPairs, worldPts);
// see if the found motion produces the same output as the original motion
for (int i = 0; i < worldPts.size(); i++) {
Point4D_F64 X = worldPts.get(i);
Point2D_F64 x = assocPairs.get(i).p2;
Point2D_F64 foundPt = PerspectiveOps.renderPixel(foundP, X, (Point2D_F64) null);
assertEquals(x.x, foundPt.x, 1e-6);
assertEquals(x.y, foundPt.y, 1e-6);
}
}
use of georegression.struct.point.Point4D_F64 in project BoofCV by lessthanoptimal.
the class CommonMotionNPointHomogenous method generateScene.
protected void generateScene(int N, DMatrixRMaj P, boolean planar) {
this.projection = P;
// randomly generate points in space
if (planar) {
worldPts = CommonHomographyChecks.createRandomPlaneH(rand, 3, N);
} else {
worldPts = GeoTestingOps.randomPointsH_F64(-1, 1, N, rand);
}
DMatrixRMaj P0 = new DMatrixRMaj(3, 4);
CommonOps_DDRM.setIdentity(P0);
// transform points into second camera's reference frame
assocPairs = new ArrayList<>();
pixelsView2 = new ArrayList<>();
for (Point4D_F64 X : worldPts) {
Point2D_F64 p1 = PerspectiveOps.renderPixel(P0, X, (Point2D_F64) null);
Point2D_F64 p2 = PerspectiveOps.renderPixel(P, X, (Point2D_F64) null);
AssociatedPair pair = new AssociatedPair();
pair.p1.setTo(p1.x, p1.y);
pair.p2.setTo(p2.x, p2.y);
assocPairs.add(pair);
pixelsView2.add(p2);
}
}
use of georegression.struct.point.Point4D_F64 in project BoofCV by lessthanoptimal.
the class TestDistanceSe3SymmetricSq method intrinsicParameters.
/**
* Manually compute the error using a calibration matrix and see if they match
*/
@Test
void intrinsicParameters() {
// intrinsic camera calibration matrix
DMatrixRMaj K1 = new DMatrixRMaj(3, 3, true, 100, 0.01, 200, 0, 150, 200, 0, 0, 1);
DMatrixRMaj K2 = new DMatrixRMaj(3, 3, true, 105, 0.021, 180, 0, 155, 210, 0, 0, 1);
DMatrixRMaj K1_inv = new DMatrixRMaj(3, 3);
DMatrixRMaj K2_inv = new DMatrixRMaj(3, 3);
CommonOps_DDRM.invert(K1, K1_inv);
CommonOps_DDRM.invert(K2, K2_inv);
Se3_F64 keyToCurr = new Se3_F64();
keyToCurr.getT().setTo(0.1, -0.1, 0.01);
Point4D_F64 X = new Point4D_F64(0.02, -0.05, 3, 1.0);
AssociatedPair obs = new AssociatedPair();
AssociatedPair obsP = new AssociatedPair();
obs.p1.x = X.x / X.z;
obs.p1.y = X.y / X.z;
SePointOps_F64.transform(keyToCurr, X, X);
obs.p2.x = X.x / X.z;
obs.p2.y = X.y / X.z;
// translate into pixels
GeometryMath_F64.mult(K1, obs.p1, obsP.p1);
GeometryMath_F64.mult(K2, obs.p2, obsP.p2);
// add some noise
obsP.p1.x += 0.25;
obsP.p1.y += 0.25;
obsP.p2.x -= 0.25;
obsP.p2.y -= 0.25;
// convert noisy into normalized coordinates
GeometryMath_F64.mult(K1_inv, obsP.p1, obsP.p1);
GeometryMath_F64.mult(K2_inv, obsP.p2, obsP.p2);
// triangulate the point's position given noisy data
LineParametric3D_F64 rayA = new LineParametric3D_F64();
LineParametric3D_F64 rayB = new LineParametric3D_F64();
rayA.slope.setTo(obsP.p1.x, obsP.p1.y, 1);
rayB.p.setTo(-0.1, 0.1, -0.01);
rayB.slope.setTo(obsP.p2.x, obsP.p2.y, 1);
ClosestPoint3D_F64.closestPoint(rayA, rayB, X);
// compute predicted given noisy triangulation
AssociatedPair ugh = new AssociatedPair();
ugh.p1.x = X.x / X.z;
ugh.p1.y = X.y / X.z;
SePointOps_F64.transform(keyToCurr, X, X);
ugh.p2.x = X.x / X.z;
ugh.p2.y = X.y / X.z;
// convert everything into pixels
GeometryMath_F64.mult(K1, ugh.p1, ugh.p1);
GeometryMath_F64.mult(K2, ugh.p2, ugh.p2);
GeometryMath_F64.mult(K1, obsP.p1, obsP.p1);
GeometryMath_F64.mult(K2, obsP.p2, obsP.p2);
double dx1 = ugh.p1.x - obsP.p1.x;
double dy1 = ugh.p1.y - obsP.p1.y;
double dx2 = ugh.p2.x - obsP.p2.x;
double dy2 = ugh.p2.y - obsP.p2.y;
double error = dx1 * dx1 + dy1 * dy1 + dx2 * dx2 + dy2 * dy2;
// convert noisy back into normalized coordinates
GeometryMath_F64.mult(K1_inv, obsP.p1, obsP.p1);
GeometryMath_F64.mult(K2_inv, obsP.p2, obsP.p2);
DistanceSe3SymmetricSq alg = new DistanceSe3SymmetricSq(triangulate);
alg.setIntrinsic(0, PerspectiveOps.matrixToPinhole(K1, 0, 0, null));
alg.setIntrinsic(1, PerspectiveOps.matrixToPinhole(K2, 0, 0, null));
alg.setModel(keyToCurr);
assertEquals(error, alg.distance(obsP), 1e-8);
}
use of georegression.struct.point.Point4D_F64 in project BoofCV by lessthanoptimal.
the class TestProjectiveReconstructionByFactorization method perfect_input_badDepths.
/**
* All data is perfect, but 1 is used for the depth estimate
*/
@Test
void perfect_input_badDepths() {
int numViews = 8;
int numFeatures = 10;
simulate(numViews, numFeatures, false);
ProjectiveStructureByFactorization alg = new ProjectiveStructureByFactorization();
alg.initialize(features3D.size(), projections.size());
// depth isn't known so just set it to 1. it could easily converge to a poor local optimal
alg.setAllDepths(1);
for (int viewIdx = 0; viewIdx < projections.size(); viewIdx++) {
alg.setPixels(viewIdx, observations.get(viewIdx));
}
assertTrue(alg.process());
DMatrixRMaj P = new DMatrixRMaj(3, 4);
Point4D_F64 X = new Point4D_F64();
int total = 0;
for (int viewIdx = 0; viewIdx < numViews; viewIdx++) {
alg.getCameraMatrix(viewIdx, P);
for (int featureIdx = 0; featureIdx < numFeatures; featureIdx++) {
alg.getFeature3D(featureIdx, X);
Point2D_F64 expected = observations.get(viewIdx).get(featureIdx);
Point3D_F64 xh = PerspectiveOps.renderPixel(P, X, (Point3D_F64) null);
Point2D_F64 found = new Point2D_F64(xh.x / xh.z, xh.y / xh.z);
// System.out.println(expected+" "+found);
if (expected.distance(found) <= 2)
total++;
}
}
// see if a large number of solutions are within 2 pixels
assertTrue(total >= 0.95 * numViews * numFeatures);
}
use of georegression.struct.point.Point4D_F64 in project BoofCV by lessthanoptimal.
the class CommonTriangulationChecks method createScene.
public void createScene(Point3D_F64 worldPoint) {
Point4D_F64 X = new Point4D_F64(worldPoint.x, worldPoint.y, worldPoint.z, 1.0);
createScene(X);
}
Aggregations