use of georegression.fitting.se.ModelManagerSe3_F64 in project BoofCV by lessthanoptimal.
the class FactoryMultiViewRobust method epipolarLMedS.
private static LeastMedianOfSquares<Se3_F64, AssociatedPair> epipolarLMedS(Estimate1ofEpipolar epipolar, CameraPinholeRadial intrinsic, ConfigLMedS configLMedS) {
TriangulateTwoViewsCalibrated triangulate = FactoryMultiView.triangulateTwoGeometric();
ModelManager<Se3_F64> manager = new ModelManagerSe3_F64();
ModelGenerator<Se3_F64, AssociatedPair> generateEpipolarMotion = new Se3FromEssentialGenerator(epipolar, triangulate);
DistanceFromModel<Se3_F64, AssociatedPair> distanceSe3 = new DistanceSe3SymmetricSq(triangulate, intrinsic.fx, intrinsic.fy, intrinsic.skew, intrinsic.fx, intrinsic.fy, intrinsic.skew);
LeastMedianOfSquares<Se3_F64, AssociatedPair> config = new LeastMedianOfSquares<>(configLMedS.randSeed, configLMedS.totalCycles, manager, generateEpipolarMotion, distanceSe3);
config.setErrorFraction(configLMedS.errorFraction);
return config;
}
use of georegression.fitting.se.ModelManagerSe3_F64 in project BoofCV by lessthanoptimal.
the class FactoryVisualOdometry method depthDepthPnP.
/**
* Depth sensor based visual odometry algorithm which runs a sparse feature tracker in the visual camera and
* estimates the range of tracks once when first detected using the depth sensor.
*
* @see VisOdomPixelDepthPnP
*
* @param thresholdAdd Add new tracks when less than this number are in the inlier set. Tracker dependent. Set to
* a value ≤ 0 to add features every frame.
* @param thresholdRetire Discard a track if it is not in the inlier set after this many updates. Try 2
* @param sparseDepth Extracts depth of pixels from a depth sensor.
* @param visualType Type of visual image being processed.
* @param depthType Type of depth image being processed.
* @return StereoVisualOdometry
*/
public static <Vis extends ImageGray<Vis>, Depth extends ImageGray<Depth>> DepthVisualOdometry<Vis, Depth> depthDepthPnP(double inlierPixelTol, int thresholdAdd, int thresholdRetire, int ransacIterations, int refineIterations, boolean doublePass, DepthSparse3D<Depth> sparseDepth, PointTrackerTwoPass<Vis> tracker, Class<Vis> visualType, Class<Depth> depthType) {
// Range from sparse disparity
ImagePixelTo3D pixelTo3D = new DepthSparse3D_to_PixelTo3D<>(sparseDepth);
Estimate1ofPnP estimator = FactoryMultiView.computePnP_1(EnumPNP.P3P_FINSTERWALDER, -1, 2);
final DistanceModelMonoPixels<Se3_F64, Point2D3D> distance = new PnPDistanceReprojectionSq();
ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
EstimatorToGenerator<Se3_F64, Point2D3D> generator = new EstimatorToGenerator<>(estimator);
// 1/2 a pixel tolerance for RANSAC inliers
double ransacTOL = inlierPixelTol * inlierPixelTol;
ModelMatcher<Se3_F64, Point2D3D> motion = new Ransac<>(2323, manager, generator, distance, ransacIterations, ransacTOL);
RefinePnP refine = null;
if (refineIterations > 0) {
refine = FactoryMultiView.refinePnP(1e-12, refineIterations);
}
VisOdomPixelDepthPnP<Vis> alg = new VisOdomPixelDepthPnP<>(thresholdAdd, thresholdRetire, doublePass, motion, pixelTo3D, refine, tracker, null, null);
return new VisOdomPixelDepthPnP_to_DepthVisualOdometry<>(sparseDepth, alg, distance, ImageType.single(visualType), depthType);
}
use of georegression.fitting.se.ModelManagerSe3_F64 in project BoofCV by lessthanoptimal.
the class FactoryVisualOdometry method stereoQuadPnP.
/**
* Stereo visual odometry which uses the two most recent stereo observations (total of four views) to estimate
* motion.
*
* @see VisOdomQuadPnP
*
* @param inlierPixelTol Pixel tolerance for RANSAC inliers - Euclidean distance
* @param epipolarPixelTol Feature association tolerance in pixels.
* @param maxDistanceF2F Maximum allowed distance between two features in pixels
* @param maxAssociationError Maxium error between two features when associating.
* @param ransacIterations Number of iterations RANSAC will perform
* @param refineIterations Number of refinement iterations
* @param detector Which feature detector to use
* @param imageType Type of input image
*/
public static <T extends ImageGray<T>, Desc extends TupleDesc> StereoVisualOdometry<T> stereoQuadPnP(double inlierPixelTol, double epipolarPixelTol, double maxDistanceF2F, double maxAssociationError, int ransacIterations, int refineIterations, DetectDescribeMulti<T, Desc> detector, Class<T> imageType) {
EstimateNofPnP pnp = FactoryMultiView.computePnP_N(EnumPNP.P3P_FINSTERWALDER, -1);
DistanceModelMonoPixels<Se3_F64, Point2D3D> distanceMono = new PnPDistanceReprojectionSq();
PnPStereoDistanceReprojectionSq distanceStereo = new PnPStereoDistanceReprojectionSq();
PnPStereoEstimator pnpStereo = new PnPStereoEstimator(pnp, distanceMono, 0);
ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
EstimatorToGenerator<Se3_F64, Stereo2D3D> generator = new EstimatorToGenerator<>(pnpStereo);
// euclidean error squared from left + right images
double ransacTOL = 2 * inlierPixelTol * inlierPixelTol;
ModelMatcher<Se3_F64, Stereo2D3D> motion = new Ransac<>(2323, manager, generator, distanceStereo, ransacIterations, ransacTOL);
RefinePnPStereo refinePnP = null;
if (refineIterations > 0) {
refinePnP = new PnPStereoRefineRodrigues(1e-12, refineIterations);
}
Class<Desc> descType = detector.getDescriptionType();
ScoreAssociation<Desc> scorer = FactoryAssociation.defaultScore(descType);
AssociateDescription2D<Desc> assocSame;
if (maxDistanceF2F > 0)
assocSame = new AssociateMaxDistanceNaive<>(scorer, true, maxAssociationError, maxDistanceF2F);
else
assocSame = new AssociateDescTo2D<>(FactoryAssociation.greedy(scorer, maxAssociationError, true));
AssociateStereo2D<Desc> associateStereo = new AssociateStereo2D<>(scorer, epipolarPixelTol, descType);
TriangulateTwoViewsCalibrated triangulate = FactoryMultiView.triangulateTwoGeometric();
associateStereo.setThreshold(maxAssociationError);
VisOdomQuadPnP<T, Desc> alg = new VisOdomQuadPnP<>(detector, assocSame, associateStereo, triangulate, motion, refinePnP);
return new WrapVisOdomQuadPnP<>(alg, refinePnP, associateStereo, distanceStereo, distanceMono, imageType);
}
use of georegression.fitting.se.ModelManagerSe3_F64 in project BoofCV by lessthanoptimal.
the class FactoryVisualOdometry method stereoDualTrackerPnP.
/**
* Creates a stereo visual odometry algorithm that independently tracks features in left and right camera.
*
* @see VisOdomDualTrackPnP
*
* @param thresholdAdd When the number of inliers is below this number new features are detected
* @param thresholdRetire When a feature has not been in the inlier list for this many ticks it is dropped
* @param inlierPixelTol Tolerance in pixels for defining an inlier during robust model matching. Typically 1.5
* @param epipolarPixelTol Tolerance in pixels for enforcing the epipolar constraint
* @param ransacIterations Number of iterations performed by RANSAC. Try 300 or more.
* @param refineIterations Number of iterations done during non-linear optimization. Try 50 or more.
* @param trackerLeft Tracker used for left camera
* @param trackerRight Tracker used for right camera
* @param imageType Type of image being processed
* @return Stereo visual odometry algorithm.
*/
public static <T extends ImageGray<T>, Desc extends TupleDesc> StereoVisualOdometry<T> stereoDualTrackerPnP(int thresholdAdd, int thresholdRetire, double inlierPixelTol, double epipolarPixelTol, int ransacIterations, int refineIterations, PointTracker<T> trackerLeft, PointTracker<T> trackerRight, DescribeRegionPoint<T, Desc> descriptor, Class<T> imageType) {
EstimateNofPnP pnp = FactoryMultiView.computePnP_N(EnumPNP.P3P_FINSTERWALDER, -1);
DistanceModelMonoPixels<Se3_F64, Point2D3D> distanceMono = new PnPDistanceReprojectionSq();
PnPStereoDistanceReprojectionSq distanceStereo = new PnPStereoDistanceReprojectionSq();
PnPStereoEstimator pnpStereo = new PnPStereoEstimator(pnp, distanceMono, 0);
ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
EstimatorToGenerator<Se3_F64, Stereo2D3D> generator = new EstimatorToGenerator<>(pnpStereo);
// Pixel tolerance for RANSAC inliers - euclidean error squared from left + right images
double ransacTOL = 2 * inlierPixelTol * inlierPixelTol;
ModelMatcher<Se3_F64, Stereo2D3D> motion = new Ransac<>(2323, manager, generator, distanceStereo, ransacIterations, ransacTOL);
RefinePnPStereo refinePnP = null;
Class<Desc> descType = descriptor.getDescriptionType();
ScoreAssociation<Desc> scorer = FactoryAssociation.defaultScore(descType);
AssociateStereo2D<Desc> associateStereo = new AssociateStereo2D<>(scorer, epipolarPixelTol, descType);
// need to make sure associations are unique
AssociateDescription2D<Desc> associateUnique = associateStereo;
if (!associateStereo.uniqueDestination() || !associateStereo.uniqueSource()) {
associateUnique = new EnforceUniqueByScore.Describe2D<>(associateStereo, true, true);
}
if (refineIterations > 0) {
refinePnP = new PnPStereoRefineRodrigues(1e-12, refineIterations);
}
TriangulateTwoViewsCalibrated triangulate = FactoryMultiView.triangulateTwoGeometric();
VisOdomDualTrackPnP<T, Desc> alg = new VisOdomDualTrackPnP<>(thresholdAdd, thresholdRetire, epipolarPixelTol, trackerLeft, trackerRight, descriptor, associateUnique, triangulate, motion, refinePnP);
return new WrapVisOdomDualTrackPnP<>(pnpStereo, distanceMono, distanceStereo, associateStereo, alg, refinePnP, imageType);
}
Aggregations