use of boofcv.struct.feature.AssociatedIndex in project BoofCV by lessthanoptimal.
the class ExampleMultiviewSceneReconstruction method estimateMotionPnP.
/**
* Estimate the motion between two images. Image A is assumed to have known features with 3D coordinates already
* and image B is an unprocessed image with no 3D features yet.
*/
private void estimateMotionPnP(int imageA, int imageB) {
// Mark image B as processed so that it isn't processed a second time.
processedImage[imageB] = true;
System.out.println("Estimating PnP motion between " + imageA + " and " + imageB);
// initially prune features using essential matrix
Se3_F64 dummy = new Se3_F64();
List<AssociatedIndex> inliers = new ArrayList<>();
if (!estimateStereoPose(imageA, imageB, dummy, inliers))
throw new RuntimeException("The first image pair is a bad keyframe!");
FastQueue<Point2D_F64> pixelsA = imagePixels.get(imageA);
FastQueue<Point2D_F64> pixelsB = imagePixels.get(imageB);
List<Feature3D> featuresA = imageFeature3D.get(imageA);
// this should be empty
List<Feature3D> featuresB = imageFeature3D.get(imageB);
// create the associated pair for motion estimation
List<Point2D3D> features = new ArrayList<>();
List<AssociatedIndex> inputRansac = new ArrayList<>();
List<AssociatedIndex> unmatched = new ArrayList<>();
for (int i = 0; i < inliers.size(); i++) {
AssociatedIndex a = inliers.get(i);
Feature3D t = lookupFeature(featuresA, imageA, pixelsA.get(a.src));
if (t != null) {
Point2D_F64 p = pixelsB.get(a.dst);
features.add(new Point2D3D(p, t.worldPt));
inputRansac.add(a);
} else {
unmatched.add(a);
}
}
// make sure there are enough features to estimate motion
if (features.size() < 15) {
System.out.println(" Too few features for PnP!! " + features.size());
return;
}
// estimate the motion between the two images
if (!estimatePnP.process(features))
throw new RuntimeException("Motion estimation failed");
// refine the motion estimate using non-linear optimization
Se3_F64 motionWorldToB = new Se3_F64();
if (!refinePnP.fitModel(estimatePnP.getMatchSet(), estimatePnP.getModelParameters(), motionWorldToB))
throw new RuntimeException("Refine failed!?!?");
motionWorldToCamera[imageB].set(motionWorldToB);
estimatedImage[imageB] = true;
// Add all tracks in the inlier list to the B's list of 3D features
int N = estimatePnP.getMatchSet().size();
boolean[] inlierPnP = new boolean[features.size()];
for (int i = 0; i < N; i++) {
int index = estimatePnP.getInputIndex(i);
AssociatedIndex a = inputRansac.get(index);
// find the track that this was associated with and add it to B
Feature3D t = lookupFeature(featuresA, imageA, pixelsA.get(a.src));
featuresB.add(t);
t.frame.add(imageB);
t.obs.grow().set(pixelsB.get(a.dst));
inlierPnP[index] = true;
}
// Create new tracks for all features which were a member of essential matrix but not used to estimate
// the motion using PnP.
Se3_F64 motionBtoWorld = motionWorldToB.invert(null);
Se3_F64 motionWorldToA = motionWorldToCamera[imageA];
Se3_F64 motionBtoA = motionBtoWorld.concat(motionWorldToA, null);
Point3D_F64 pt_in_b = new Point3D_F64();
int totalAdded = 0;
GrowQueue_I32 colorsA = imageColors.get(imageA);
for (AssociatedIndex a : unmatched) {
if (!triangulate.triangulate(pixelsB.get(a.dst), pixelsA.get(a.src), motionBtoA, pt_in_b))
continue;
// the feature has to be in front of the camera
if (pt_in_b.z > 0) {
Feature3D t = new Feature3D();
// transform from B back to world frame
SePointOps_F64.transform(motionBtoWorld, pt_in_b, t.worldPt);
t.color = colorsA.get(a.src);
t.obs.grow().set(pixelsA.get(a.src));
t.obs.grow().set(pixelsB.get(a.dst));
t.frame.add(imageA);
t.frame.add(imageB);
featuresAll.add(t);
featuresA.add(t);
featuresB.add(t);
totalAdded++;
}
}
// out better if the 3D coordinate is re-triangulated as a new feature
for (int i = 0; i < features.size(); i++) {
if (inlierPnP[i])
continue;
AssociatedIndex a = inputRansac.get(i);
if (!triangulate.triangulate(pixelsB.get(a.dst), pixelsA.get(a.src), motionBtoA, pt_in_b))
continue;
// the feature has to be in front of the camera
if (pt_in_b.z > 0) {
Feature3D t = new Feature3D();
// transform from B back to world frame
SePointOps_F64.transform(motionBtoWorld, pt_in_b, t.worldPt);
// only add this feature to image B since a similar one already exists in A.
t.color = colorsA.get(a.src);
t.obs.grow().set(pixelsB.get(a.dst));
t.frame.add(imageB);
featuresAll.add(t);
featuresB.add(t);
totalAdded++;
}
}
System.out.println(" New added " + totalAdded + " tracksA.size = " + featuresA.size() + " tracksB.size = " + featuresB.size());
}
use of boofcv.struct.feature.AssociatedIndex in project BoofCV by lessthanoptimal.
the class ExampleMultiviewSceneReconstruction method estimateStereoPose.
/**
* Given two images compute the relative location of each image using the essential matrix.
*/
protected boolean estimateStereoPose(int imageA, int imageB, Se3_F64 motionAtoB, List<AssociatedIndex> inliers) {
// associate the features together
associate.setSource(imageVisualFeatures.get(imageA));
associate.setDestination(imageVisualFeatures.get(imageB));
associate.associate();
FastQueue<AssociatedIndex> matches = associate.getMatches();
// create the associated pair for motion estimation
FastQueue<Point2D_F64> pixelsA = imagePixels.get(imageA);
FastQueue<Point2D_F64> pixelsB = imagePixels.get(imageB);
List<AssociatedPair> pairs = new ArrayList<>();
for (int i = 0; i < matches.size(); i++) {
AssociatedIndex a = matches.get(i);
pairs.add(new AssociatedPair(pixelsA.get(a.src), pixelsB.get(a.dst)));
}
if (!estimateEssential.process(pairs))
throw new RuntimeException("Motion estimation failed");
List<AssociatedPair> inliersEssential = estimateEssential.getMatchSet();
motionAtoB.set(estimateEssential.getModelParameters());
for (int i = 0; i < inliersEssential.size(); i++) {
int index = estimateEssential.getInputIndex(i);
inliers.add(matches.get(index));
}
return true;
}
use of boofcv.struct.feature.AssociatedIndex in project BoofCV by lessthanoptimal.
the class ExampleFundamentalMatrix method computeMatches.
/**
* Use the associate point feature example to create a list of {@link AssociatedPair} for use in computing the
* fundamental matrix.
*/
public static List<AssociatedPair> computeMatches(BufferedImage left, BufferedImage right) {
DetectDescribePoint detDesc = FactoryDetectDescribe.surfStable(new ConfigFastHessian(1, 2, 200, 1, 9, 4, 4), null, null, GrayF32.class);
// DetectDescribePoint detDesc = FactoryDetectDescribe.sift(null,new ConfigSiftDetector(2,0,200,5),null,null);
ScoreAssociation<BrightFeature> scorer = FactoryAssociation.scoreEuclidean(BrightFeature.class, true);
AssociateDescription<BrightFeature> associate = FactoryAssociation.greedy(scorer, 1, true);
ExampleAssociatePoints<GrayF32, BrightFeature> findMatches = new ExampleAssociatePoints<>(detDesc, associate, GrayF32.class);
findMatches.associate(left, right);
List<AssociatedPair> matches = new ArrayList<>();
FastQueue<AssociatedIndex> matchIndexes = associate.getMatches();
for (int i = 0; i < matchIndexes.size; i++) {
AssociatedIndex a = matchIndexes.get(i);
AssociatedPair p = new AssociatedPair(findMatches.pointsA.get(a.src), findMatches.pointsB.get(a.dst));
matches.add(p);
}
return matches;
}
use of boofcv.struct.feature.AssociatedIndex in project BoofCV by lessthanoptimal.
the class StandardAssociateDescriptionChecks method performBasicTest.
private void performBasicTest(int numFeatures) {
init();
AssociateDescription<Desc> alg = createAlg();
alg.setThreshold(0.01);
for (int i = 0; i < numFeatures; i++) {
listSrc.add(c(i + 1));
listDst.add(c(i + 1 + 0.001));
}
alg.setSource(listSrc);
alg.setDestination(listDst);
alg.associate();
FastQueue<AssociatedIndex> matches = alg.getMatches();
// Every features should be associated
assertEquals(numFeatures, matches.size());
// see if everything is assigned as expected
for (int i = 0; i < matches.size(); i++) {
int numMatches = 0;
for (int j = 0; j < matches.size(); j++) {
AssociatedIndex a = matches.get(j);
if (i == a.src) {
assertEquals(a.src, a.dst);
assertTrue(a.fitScore != 0);
numMatches++;
}
}
assertEquals(1, numMatches);
}
// in this example there should be perfect unambiguous associations
assertEquals(0, alg.getUnassociatedSource().size);
assertEquals(0, alg.getUnassociatedDestination().size);
}
use of boofcv.struct.feature.AssociatedIndex in project BoofCV by lessthanoptimal.
the class ExampleImageStitching method computeTransform.
/**
* Using abstracted code, find a transform which minimizes the difference between corresponding features
* in both images. This code is completely model independent and is the core algorithms.
*/
public static <T extends ImageGray<T>, FD extends TupleDesc> Homography2D_F64 computeTransform(T imageA, T imageB, DetectDescribePoint<T, FD> detDesc, AssociateDescription<FD> associate, ModelMatcher<Homography2D_F64, AssociatedPair> modelMatcher) {
// get the length of the description
List<Point2D_F64> pointsA = new ArrayList<>();
FastQueue<FD> descA = UtilFeature.createQueue(detDesc, 100);
List<Point2D_F64> pointsB = new ArrayList<>();
FastQueue<FD> descB = UtilFeature.createQueue(detDesc, 100);
// extract feature locations and descriptions from each image
describeImage(imageA, detDesc, pointsA, descA);
describeImage(imageB, detDesc, pointsB, descB);
// Associate features between the two images
associate.setSource(descA);
associate.setDestination(descB);
associate.associate();
// create a list of AssociatedPairs that tell the model matcher how a feature moved
FastQueue<AssociatedIndex> matches = associate.getMatches();
List<AssociatedPair> pairs = new ArrayList<>();
for (int i = 0; i < matches.size(); i++) {
AssociatedIndex match = matches.get(i);
Point2D_F64 a = pointsA.get(match.src);
Point2D_F64 b = pointsB.get(match.dst);
pairs.add(new AssociatedPair(a, b, false));
}
// find the best fit model to describe the change between these images
if (!modelMatcher.process(pairs))
throw new RuntimeException("Model Matcher failed!");
// return the found image transform
return modelMatcher.getModelParameters().copy();
}
Aggregations