use of boofcv.struct.geo.Point2D3D in project BoofCV by lessthanoptimal.
the class CheckEstimate1ofPnP method perfectObservations.
private void perfectObservations(Se3_F64 worldToCamera, int numSample) {
List<Point2D3D> inputs = createObservations(worldToCamera, numSample);
Se3_F64 found = new Se3_F64();
assertTrue(alg.process(inputs, found));
assertTrue(MatrixFeatures_DDRM.isIdentical(worldToCamera.getR(), found.getR(), 1e-8));
assertTrue(found.getT().isIdentical(worldToCamera.getT(), 1e-8));
}
use of boofcv.struct.geo.Point2D3D in project BoofCV by lessthanoptimal.
the class CheckEstimateNofPnP method perfectObservations.
private void perfectObservations(Se3_F64 worldToCamera, int numSample) {
List<Point2D3D> inputs = createObservations(worldToCamera, numSample);
assertTrue(alg.process(inputs, solutions));
int numMatched = 0;
for (Se3_F64 found : solutions.toList()) {
if (!MatrixFeatures_DDRM.isIdentical(worldToCamera.getR(), found.getR(), 1e-8))
continue;
if (!found.getT().isIdentical(worldToCamera.getT(), 1e-8))
continue;
numMatched++;
}
assertTrue(numMatched > 0);
}
use of boofcv.struct.geo.Point2D3D in project BoofCV by lessthanoptimal.
the class CheckEstimateNofPnP method checkMultipleCalls.
/**
* Call it multiple times and make sure the same solutions are returned.
*/
@Test
public void checkMultipleCalls() {
List<Point2D3D> inputs = createObservations(worldToCamera0, alg.getMinimumPoints());
assertTrue(alg.process(inputs, solutions));
assertTrue(solutions.size() > 0);
List<Se3_F64> orig = new ArrayList<>();
for (Se3_F64 m : solutions.toList()) {
orig.add(m.copy());
}
// run it a few times and see if the output is identical
for (int i = 0; i < 2; i++) {
assertTrue(alg.process(inputs, solutions));
assertEquals(orig.size(), solutions.size());
for (int j = 0; j < orig.size(); j++) {
Se3_F64 o = orig.get(j);
Se3_F64 f = solutions.get(j);
assertTrue(MatrixFeatures_DDRM.isIdentical(o.getR(), f.getR(), 1e-8));
assertTrue(f.getT().isIdentical(o.getT(), 1e-8));
}
}
}
use of boofcv.struct.geo.Point2D3D in project BoofCV by lessthanoptimal.
the class TestPnPRefineRodrigues method perfect.
@Test
public void perfect() {
Se3_F64 motion = new Se3_F64();
motion.getR().set(ConvertRotation3D_F64.eulerToMatrix(EulerType.XYZ, 0.05, -0.03, 0.02, null));
motion.getT().set(0.1, -0.1, 0.01);
generateScene(10, motion, false);
ModelFitter<Se3_F64, Point2D3D> alg = new PnPRefineRodrigues(1e-8, 200);
assertTrue(alg.fitModel(pointPose, motion, found));
assertEquals(motion.getT().getX(), found.getX(), 1e-8);
assertEquals(motion.getT().getY(), found.getY(), 1e-8);
assertEquals(motion.getT().getZ(), found.getZ(), 1e-8);
}
use of boofcv.struct.geo.Point2D3D in project BoofCV by lessthanoptimal.
the class ExampleMultiviewSceneReconstruction method estimateMotionPnP.
/**
* Estimate the motion between two images. Image A is assumed to have known features with 3D coordinates already
* and image B is an unprocessed image with no 3D features yet.
*/
private void estimateMotionPnP(int imageA, int imageB) {
// Mark image B as processed so that it isn't processed a second time.
processedImage[imageB] = true;
System.out.println("Estimating PnP motion between " + imageA + " and " + imageB);
// initially prune features using essential matrix
Se3_F64 dummy = new Se3_F64();
List<AssociatedIndex> inliers = new ArrayList<>();
if (!estimateStereoPose(imageA, imageB, dummy, inliers))
throw new RuntimeException("The first image pair is a bad keyframe!");
FastQueue<Point2D_F64> pixelsA = imagePixels.get(imageA);
FastQueue<Point2D_F64> pixelsB = imagePixels.get(imageB);
List<Feature3D> featuresA = imageFeature3D.get(imageA);
// this should be empty
List<Feature3D> featuresB = imageFeature3D.get(imageB);
// create the associated pair for motion estimation
List<Point2D3D> features = new ArrayList<>();
List<AssociatedIndex> inputRansac = new ArrayList<>();
List<AssociatedIndex> unmatched = new ArrayList<>();
for (int i = 0; i < inliers.size(); i++) {
AssociatedIndex a = inliers.get(i);
Feature3D t = lookupFeature(featuresA, imageA, pixelsA.get(a.src));
if (t != null) {
Point2D_F64 p = pixelsB.get(a.dst);
features.add(new Point2D3D(p, t.worldPt));
inputRansac.add(a);
} else {
unmatched.add(a);
}
}
// make sure there are enough features to estimate motion
if (features.size() < 15) {
System.out.println(" Too few features for PnP!! " + features.size());
return;
}
// estimate the motion between the two images
if (!estimatePnP.process(features))
throw new RuntimeException("Motion estimation failed");
// refine the motion estimate using non-linear optimization
Se3_F64 motionWorldToB = new Se3_F64();
if (!refinePnP.fitModel(estimatePnP.getMatchSet(), estimatePnP.getModelParameters(), motionWorldToB))
throw new RuntimeException("Refine failed!?!?");
motionWorldToCamera[imageB].set(motionWorldToB);
estimatedImage[imageB] = true;
// Add all tracks in the inlier list to the B's list of 3D features
int N = estimatePnP.getMatchSet().size();
boolean[] inlierPnP = new boolean[features.size()];
for (int i = 0; i < N; i++) {
int index = estimatePnP.getInputIndex(i);
AssociatedIndex a = inputRansac.get(index);
// find the track that this was associated with and add it to B
Feature3D t = lookupFeature(featuresA, imageA, pixelsA.get(a.src));
featuresB.add(t);
t.frame.add(imageB);
t.obs.grow().set(pixelsB.get(a.dst));
inlierPnP[index] = true;
}
// Create new tracks for all features which were a member of essential matrix but not used to estimate
// the motion using PnP.
Se3_F64 motionBtoWorld = motionWorldToB.invert(null);
Se3_F64 motionWorldToA = motionWorldToCamera[imageA];
Se3_F64 motionBtoA = motionBtoWorld.concat(motionWorldToA, null);
Point3D_F64 pt_in_b = new Point3D_F64();
int totalAdded = 0;
GrowQueue_I32 colorsA = imageColors.get(imageA);
for (AssociatedIndex a : unmatched) {
if (!triangulate.triangulate(pixelsB.get(a.dst), pixelsA.get(a.src), motionBtoA, pt_in_b))
continue;
// the feature has to be in front of the camera
if (pt_in_b.z > 0) {
Feature3D t = new Feature3D();
// transform from B back to world frame
SePointOps_F64.transform(motionBtoWorld, pt_in_b, t.worldPt);
t.color = colorsA.get(a.src);
t.obs.grow().set(pixelsA.get(a.src));
t.obs.grow().set(pixelsB.get(a.dst));
t.frame.add(imageA);
t.frame.add(imageB);
featuresAll.add(t);
featuresA.add(t);
featuresB.add(t);
totalAdded++;
}
}
// out better if the 3D coordinate is re-triangulated as a new feature
for (int i = 0; i < features.size(); i++) {
if (inlierPnP[i])
continue;
AssociatedIndex a = inputRansac.get(i);
if (!triangulate.triangulate(pixelsB.get(a.dst), pixelsA.get(a.src), motionBtoA, pt_in_b))
continue;
// the feature has to be in front of the camera
if (pt_in_b.z > 0) {
Feature3D t = new Feature3D();
// transform from B back to world frame
SePointOps_F64.transform(motionBtoWorld, pt_in_b, t.worldPt);
// only add this feature to image B since a similar one already exists in A.
t.color = colorsA.get(a.src);
t.obs.grow().set(pixelsB.get(a.dst));
t.frame.add(imageB);
featuresAll.add(t);
featuresB.add(t);
totalAdded++;
}
}
System.out.println(" New added " + totalAdded + " tracksA.size = " + featuresA.size() + " tracksB.size = " + featuresB.size());
}
Aggregations