use of boofcv.struct.sfm.Stereo2D3D in project BoofCV by lessthanoptimal.
the class FactoryVisualOdometry method stereoQuadPnP.
/**
* Stereo visual odometry which uses the two most recent stereo observations (total of four views) to estimate
* motion.
*
* @see VisOdomQuadPnP
*
* @param inlierPixelTol Pixel tolerance for RANSAC inliers - Euclidean distance
* @param epipolarPixelTol Feature association tolerance in pixels.
* @param maxDistanceF2F Maximum allowed distance between two features in pixels
* @param maxAssociationError Maxium error between two features when associating.
* @param ransacIterations Number of iterations RANSAC will perform
* @param refineIterations Number of refinement iterations
* @param detector Which feature detector to use
* @param imageType Type of input image
*/
public static <T extends ImageGray<T>, Desc extends TupleDesc> StereoVisualOdometry<T> stereoQuadPnP(double inlierPixelTol, double epipolarPixelTol, double maxDistanceF2F, double maxAssociationError, int ransacIterations, int refineIterations, DetectDescribeMulti<T, Desc> detector, Class<T> imageType) {
EstimateNofPnP pnp = FactoryMultiView.computePnP_N(EnumPNP.P3P_FINSTERWALDER, -1);
DistanceModelMonoPixels<Se3_F64, Point2D3D> distanceMono = new PnPDistanceReprojectionSq();
PnPStereoDistanceReprojectionSq distanceStereo = new PnPStereoDistanceReprojectionSq();
PnPStereoEstimator pnpStereo = new PnPStereoEstimator(pnp, distanceMono, 0);
ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
EstimatorToGenerator<Se3_F64, Stereo2D3D> generator = new EstimatorToGenerator<>(pnpStereo);
// euclidean error squared from left + right images
double ransacTOL = 2 * inlierPixelTol * inlierPixelTol;
ModelMatcher<Se3_F64, Stereo2D3D> motion = new Ransac<>(2323, manager, generator, distanceStereo, ransacIterations, ransacTOL);
RefinePnPStereo refinePnP = null;
if (refineIterations > 0) {
refinePnP = new PnPStereoRefineRodrigues(1e-12, refineIterations);
}
Class<Desc> descType = detector.getDescriptionType();
ScoreAssociation<Desc> scorer = FactoryAssociation.defaultScore(descType);
AssociateDescription2D<Desc> assocSame;
if (maxDistanceF2F > 0)
assocSame = new AssociateMaxDistanceNaive<>(scorer, true, maxAssociationError, maxDistanceF2F);
else
assocSame = new AssociateDescTo2D<>(FactoryAssociation.greedy(scorer, maxAssociationError, true));
AssociateStereo2D<Desc> associateStereo = new AssociateStereo2D<>(scorer, epipolarPixelTol, descType);
TriangulateTwoViewsCalibrated triangulate = FactoryMultiView.triangulateTwoGeometric();
associateStereo.setThreshold(maxAssociationError);
VisOdomQuadPnP<T, Desc> alg = new VisOdomQuadPnP<>(detector, assocSame, associateStereo, triangulate, motion, refinePnP);
return new WrapVisOdomQuadPnP<>(alg, refinePnP, associateStereo, distanceStereo, distanceMono, imageType);
}
use of boofcv.struct.sfm.Stereo2D3D in project BoofCV by lessthanoptimal.
the class FactoryVisualOdometry method stereoDualTrackerPnP.
/**
* Creates a stereo visual odometry algorithm that independently tracks features in left and right camera.
*
* @see VisOdomDualTrackPnP
*
* @param thresholdAdd When the number of inliers is below this number new features are detected
* @param thresholdRetire When a feature has not been in the inlier list for this many ticks it is dropped
* @param inlierPixelTol Tolerance in pixels for defining an inlier during robust model matching. Typically 1.5
* @param epipolarPixelTol Tolerance in pixels for enforcing the epipolar constraint
* @param ransacIterations Number of iterations performed by RANSAC. Try 300 or more.
* @param refineIterations Number of iterations done during non-linear optimization. Try 50 or more.
* @param trackerLeft Tracker used for left camera
* @param trackerRight Tracker used for right camera
* @param imageType Type of image being processed
* @return Stereo visual odometry algorithm.
*/
public static <T extends ImageGray<T>, Desc extends TupleDesc> StereoVisualOdometry<T> stereoDualTrackerPnP(int thresholdAdd, int thresholdRetire, double inlierPixelTol, double epipolarPixelTol, int ransacIterations, int refineIterations, PointTracker<T> trackerLeft, PointTracker<T> trackerRight, DescribeRegionPoint<T, Desc> descriptor, Class<T> imageType) {
EstimateNofPnP pnp = FactoryMultiView.computePnP_N(EnumPNP.P3P_FINSTERWALDER, -1);
DistanceModelMonoPixels<Se3_F64, Point2D3D> distanceMono = new PnPDistanceReprojectionSq();
PnPStereoDistanceReprojectionSq distanceStereo = new PnPStereoDistanceReprojectionSq();
PnPStereoEstimator pnpStereo = new PnPStereoEstimator(pnp, distanceMono, 0);
ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
EstimatorToGenerator<Se3_F64, Stereo2D3D> generator = new EstimatorToGenerator<>(pnpStereo);
// Pixel tolerance for RANSAC inliers - euclidean error squared from left + right images
double ransacTOL = 2 * inlierPixelTol * inlierPixelTol;
ModelMatcher<Se3_F64, Stereo2D3D> motion = new Ransac<>(2323, manager, generator, distanceStereo, ransacIterations, ransacTOL);
RefinePnPStereo refinePnP = null;
Class<Desc> descType = descriptor.getDescriptionType();
ScoreAssociation<Desc> scorer = FactoryAssociation.defaultScore(descType);
AssociateStereo2D<Desc> associateStereo = new AssociateStereo2D<>(scorer, epipolarPixelTol, descType);
// need to make sure associations are unique
AssociateDescription2D<Desc> associateUnique = associateStereo;
if (!associateStereo.uniqueDestination() || !associateStereo.uniqueSource()) {
associateUnique = new EnforceUniqueByScore.Describe2D<>(associateStereo, true, true);
}
if (refineIterations > 0) {
refinePnP = new PnPStereoRefineRodrigues(1e-12, refineIterations);
}
TriangulateTwoViewsCalibrated triangulate = FactoryMultiView.triangulateTwoGeometric();
VisOdomDualTrackPnP<T, Desc> alg = new VisOdomDualTrackPnP<>(thresholdAdd, thresholdRetire, epipolarPixelTol, trackerLeft, trackerRight, descriptor, associateUnique, triangulate, motion, refinePnP);
return new WrapVisOdomDualTrackPnP<>(pnpStereo, distanceMono, distanceStereo, associateStereo, alg, refinePnP, imageType);
}
use of boofcv.struct.sfm.Stereo2D3D in project BoofCV by lessthanoptimal.
the class PnPStereoEstimator method process.
@Override
public boolean process(List<Stereo2D3D> points, Se3_F64 estimatedModel) {
int N = alg.getMinimumPoints();
// create a list of observation from the left camera
monoPoints.reset();
for (int i = 0; i < N; i++) {
Stereo2D3D s = points.get(i);
Point2D3D p = monoPoints.grow();
p.observation = s.leftObs;
p.location = s.location;
}
// compute solutions
solutions.reset();
alg.process(monoPoints.toList(), solutions);
// use one for temporary storage when computing distance
Point2D3D p = monoPoints.get(0);
// use observations from the left and right cameras to select the best solution
Se3_F64 bestMotion = null;
double bestError = Double.MAX_VALUE;
for (int i = 0; i < solutions.size; i++) {
Se3_F64 worldToLeft = solutions.data[i];
double totalError = 0;
// use extra observations from the left camera
distance.setModel(worldToLeft);
for (int j = N; j < points.size(); j++) {
Stereo2D3D s = points.get(i);
p.observation = s.leftObs;
p.location = s.location;
totalError += distance.computeDistance(p);
}
// Use all observations from the right camera
worldToLeft.concat(leftToRight, worldToRight);
distance.setModel(worldToRight);
for (int j = 0; j < points.size(); j++) {
Stereo2D3D s = points.get(j);
p.observation = s.rightObs;
p.location = s.location;
totalError += distance.computeDistance(p);
}
if (totalError < bestError) {
bestError = totalError;
bestMotion = worldToLeft;
}
}
if (bestMotion == null)
return false;
estimatedModel.set(bestMotion);
return true;
}
use of boofcv.struct.sfm.Stereo2D3D in project BoofCV by lessthanoptimal.
the class VisOdomQuadPnP method estimateMotion.
/**
* Estimates camera egomotion between the two most recent image frames
* @return
*/
private boolean estimateMotion() {
modelFitData.reset();
Point2D_F64 normLeft = new Point2D_F64();
Point2D_F64 normRight = new Point2D_F64();
// use 0 -> 1 stereo associations to estimate each feature's 3D position
for (int i = 0; i < quadViews.size; i++) {
QuadView obs = quadViews.get(i);
// convert old stereo view to normalized coordinates
leftImageToNorm.compute(obs.v0.x, obs.v0.y, normLeft);
rightImageToNorm.compute(obs.v1.x, obs.v1.y, normRight);
// compute 3D location using triangulation
triangulate.triangulate(normLeft, normRight, leftToRight, obs.X);
// add to data set for fitting if not at infinity
if (!Double.isInfinite(obs.X.normSq())) {
Stereo2D3D data = modelFitData.grow();
leftImageToNorm.compute(obs.v2.x, obs.v2.y, data.leftObs);
rightImageToNorm.compute(obs.v3.x, obs.v3.y, data.rightObs);
data.location.set(obs.X);
}
}
// robustly match the data
if (!matcher.process(modelFitData.toList()))
return false;
Se3_F64 oldToNew = matcher.getModelParameters();
// optionally refine the results
if (modelRefiner != null) {
Se3_F64 found = new Se3_F64();
if (modelRefiner.fitModel(matcher.getMatchSet(), oldToNew, found)) {
// System.out.println("matcher rot = "+toString(found));
found.invert(newToOld);
} else {
oldToNew.invert(newToOld);
// System.out.println("Fit failed!");
}
} else {
oldToNew.invert(newToOld);
}
// compound the just found motion with the previously found motion
Se3_F64 temp = new Se3_F64();
newToOld.concat(leftCamToWorld, temp);
leftCamToWorld.set(temp);
return true;
}
Aggregations