Search in sources :

Example 1 with EstimatorToGenerator

use of boofcv.factory.geo.EstimatorToGenerator in project BoofCV by lessthanoptimal.

the class FactoryVisualOdometry method stereoDepth.

/**
 * Stereo vision based visual odometry algorithm which runs a sparse feature tracker in the left camera and
 * estimates the range of tracks once when first detected using disparity between left and right cameras.
 *
 * @see VisOdomPixelDepthPnP
 *
 * @param thresholdAdd Add new tracks when less than this number are in the inlier set.  Tracker dependent. Set to
 *                     a value ≤ 0 to add features every frame.
 * @param thresholdRetire Discard a track if it is not in the inlier set after this many updates.  Try 2
 * @param sparseDisparity Estimates the 3D location of features
 * @param imageType Type of image being processed.
 * @return StereoVisualOdometry
 */
public static <T extends ImageGray<T>> StereoVisualOdometry<T> stereoDepth(double inlierPixelTol, int thresholdAdd, int thresholdRetire, int ransacIterations, int refineIterations, boolean doublePass, StereoDisparitySparse<T> sparseDisparity, PointTrackerTwoPass<T> tracker, Class<T> imageType) {
    // Range from sparse disparity
    StereoSparse3D<T> pixelTo3D = new StereoSparse3D<>(sparseDisparity, imageType);
    Estimate1ofPnP estimator = FactoryMultiView.computePnP_1(EnumPNP.P3P_FINSTERWALDER, -1, 2);
    final DistanceModelMonoPixels<Se3_F64, Point2D3D> distance = new PnPDistanceReprojectionSq();
    ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
    EstimatorToGenerator<Se3_F64, Point2D3D> generator = new EstimatorToGenerator<>(estimator);
    // 1/2 a pixel tolerance for RANSAC inliers
    double ransacTOL = inlierPixelTol * inlierPixelTol;
    ModelMatcher<Se3_F64, Point2D3D> motion = new Ransac<>(2323, manager, generator, distance, ransacIterations, ransacTOL);
    RefinePnP refine = null;
    if (refineIterations > 0) {
        refine = FactoryMultiView.refinePnP(1e-12, refineIterations);
    }
    VisOdomPixelDepthPnP<T> alg = new VisOdomPixelDepthPnP<>(thresholdAdd, thresholdRetire, doublePass, motion, pixelTo3D, refine, tracker, null, null);
    return new WrapVisOdomPixelDepthPnP<>(alg, pixelTo3D, distance, imageType);
}
Also used : RefinePnP(boofcv.abst.geo.RefinePnP) Ransac(org.ddogleg.fitting.modelset.ransac.Ransac) EstimatorToGenerator(boofcv.factory.geo.EstimatorToGenerator) StereoSparse3D(boofcv.alg.sfm.StereoSparse3D) Point2D3D(boofcv.struct.geo.Point2D3D) Estimate1ofPnP(boofcv.abst.geo.Estimate1ofPnP) ModelManagerSe3_F64(georegression.fitting.se.ModelManagerSe3_F64) Se3_F64(georegression.struct.se.Se3_F64) ModelManagerSe3_F64(georegression.fitting.se.ModelManagerSe3_F64)

Example 2 with EstimatorToGenerator

use of boofcv.factory.geo.EstimatorToGenerator in project MAVSlam by ecmnet.

the class FactoryMAVOdometry method depthDepthPnP.

/**
 * Depth sensor based visual odometry algorithm which runs a sparse feature tracker in the visual camera and
 * estimates the range of tracks once when first detected using the depth sensor.
 *
 * @see MAVOdomPixelDepthPnP
 *
 * @param thresholdAdd Add new tracks when less than this number are in the inlier set.  Tracker dependent. Set to
 *                     a value &le; 0 to add features every frame.
 * @param thresholdRetire Discard a track if it is not in the inlier set after this many updates.  Try 2
 * @param sparseDepth Extracts depth of pixels from a depth sensor.
 * @param visualType Type of visual image being processed.
 * @param depthType Type of depth image being processed.
 * @return StereoVisualOdometry
 */
public static <Vis extends ImageGray, Depth extends ImageGray> MAVDepthVisualOdometry<Vis, Depth> depthDepthPnP(double inlierPixelTol, int thresholdAdd, int thresholdRetire, int ransacIterations, int refineIterations, boolean doublePass, DepthSparse3D<Depth> sparseDepth, PointTrackerTwoPass<Vis> tracker, Class<Vis> visualType, Class<Depth> depthType) {
    // Range from sparse disparity
    ImagePixelTo3D pixelTo3D = new DepthSparse3D_to_PixelTo3D<Depth>(sparseDepth);
    Estimate1ofPnP estimator = FactoryMultiView.computePnP_1(EnumPNP.P3P_FINSTERWALDER, -1, 2);
    final DistanceModelMonoPixels<Se3_F64, Point2D3D> distance = new PnPDistanceReprojectionSq();
    ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
    EstimatorToGenerator<Se3_F64, Point2D3D> generator = new EstimatorToGenerator<Se3_F64, Point2D3D>(estimator);
    // 1/2 a pixel tolerance for RANSAC inliers
    double ransacTOL = inlierPixelTol * inlierPixelTol;
    ModelMatcher<Se3_F64, Point2D3D> motion = new Ransac<Se3_F64, Point2D3D>(2323, manager, generator, distance, ransacIterations, ransacTOL);
    RefinePnP refine = null;
    if (refineIterations > 0) {
        refine = FactoryMultiView.refinePnP(1e-12, refineIterations);
    }
    MAVOdomPixelDepthPnP<Vis> alg = new MAVOdomPixelDepthPnP<Vis>(thresholdAdd, thresholdRetire, doublePass, motion, pixelTo3D, refine, tracker, null, null);
    return new MAVOdomPixelDepthPnP_to_DepthVisualOdometry<Vis, Depth>(sparseDepth, alg, distance, ImageType.single(visualType), depthType);
}
Also used : RefinePnP(boofcv.abst.geo.RefinePnP) ImagePixelTo3D(boofcv.abst.sfm.ImagePixelTo3D) DepthSparse3D_to_PixelTo3D(boofcv.abst.sfm.DepthSparse3D_to_PixelTo3D) Ransac(org.ddogleg.fitting.modelset.ransac.Ransac) EstimatorToGenerator(boofcv.factory.geo.EstimatorToGenerator) Point2D3D(boofcv.struct.geo.Point2D3D) PnPDistanceReprojectionSq(boofcv.alg.geo.pose.PnPDistanceReprojectionSq) Estimate1ofPnP(boofcv.abst.geo.Estimate1ofPnP) ModelManagerSe3_F64(georegression.fitting.se.ModelManagerSe3_F64) Se3_F64(georegression.struct.se.Se3_F64) ModelManagerSe3_F64(georegression.fitting.se.ModelManagerSe3_F64)

Example 3 with EstimatorToGenerator

use of boofcv.factory.geo.EstimatorToGenerator in project BoofCV by lessthanoptimal.

the class FactoryVisualOdometry method depthDepthPnP.

/**
 * Depth sensor based visual odometry algorithm which runs a sparse feature tracker in the visual camera and
 * estimates the range of tracks once when first detected using the depth sensor.
 *
 * @see VisOdomPixelDepthPnP
 *
 * @param thresholdAdd Add new tracks when less than this number are in the inlier set.  Tracker dependent. Set to
 *                     a value &le; 0 to add features every frame.
 * @param thresholdRetire Discard a track if it is not in the inlier set after this many updates.  Try 2
 * @param sparseDepth Extracts depth of pixels from a depth sensor.
 * @param visualType Type of visual image being processed.
 * @param depthType Type of depth image being processed.
 * @return StereoVisualOdometry
 */
public static <Vis extends ImageGray<Vis>, Depth extends ImageGray<Depth>> DepthVisualOdometry<Vis, Depth> depthDepthPnP(double inlierPixelTol, int thresholdAdd, int thresholdRetire, int ransacIterations, int refineIterations, boolean doublePass, DepthSparse3D<Depth> sparseDepth, PointTrackerTwoPass<Vis> tracker, Class<Vis> visualType, Class<Depth> depthType) {
    // Range from sparse disparity
    ImagePixelTo3D pixelTo3D = new DepthSparse3D_to_PixelTo3D<>(sparseDepth);
    Estimate1ofPnP estimator = FactoryMultiView.computePnP_1(EnumPNP.P3P_FINSTERWALDER, -1, 2);
    final DistanceModelMonoPixels<Se3_F64, Point2D3D> distance = new PnPDistanceReprojectionSq();
    ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
    EstimatorToGenerator<Se3_F64, Point2D3D> generator = new EstimatorToGenerator<>(estimator);
    // 1/2 a pixel tolerance for RANSAC inliers
    double ransacTOL = inlierPixelTol * inlierPixelTol;
    ModelMatcher<Se3_F64, Point2D3D> motion = new Ransac<>(2323, manager, generator, distance, ransacIterations, ransacTOL);
    RefinePnP refine = null;
    if (refineIterations > 0) {
        refine = FactoryMultiView.refinePnP(1e-12, refineIterations);
    }
    VisOdomPixelDepthPnP<Vis> alg = new VisOdomPixelDepthPnP<>(thresholdAdd, thresholdRetire, doublePass, motion, pixelTo3D, refine, tracker, null, null);
    return new VisOdomPixelDepthPnP_to_DepthVisualOdometry<>(sparseDepth, alg, distance, ImageType.single(visualType), depthType);
}
Also used : RefinePnP(boofcv.abst.geo.RefinePnP) ImagePixelTo3D(boofcv.abst.sfm.ImagePixelTo3D) DepthSparse3D_to_PixelTo3D(boofcv.abst.sfm.DepthSparse3D_to_PixelTo3D) Ransac(org.ddogleg.fitting.modelset.ransac.Ransac) EstimatorToGenerator(boofcv.factory.geo.EstimatorToGenerator) Point2D3D(boofcv.struct.geo.Point2D3D) Estimate1ofPnP(boofcv.abst.geo.Estimate1ofPnP) ModelManagerSe3_F64(georegression.fitting.se.ModelManagerSe3_F64) Se3_F64(georegression.struct.se.Se3_F64) ModelManagerSe3_F64(georegression.fitting.se.ModelManagerSe3_F64)

Example 4 with EstimatorToGenerator

use of boofcv.factory.geo.EstimatorToGenerator in project BoofCV by lessthanoptimal.

the class FactoryVisualOdometry method stereoQuadPnP.

/**
 * Stereo visual odometry which uses the two most recent stereo observations (total of four views) to estimate
 * motion.
 *
 * @see VisOdomQuadPnP
 *
 * @param inlierPixelTol Pixel tolerance for RANSAC inliers - Euclidean distance
 * @param epipolarPixelTol Feature association tolerance in pixels.
 * @param maxDistanceF2F Maximum allowed distance between two features in pixels
 * @param maxAssociationError Maxium error between two features when associating.
 * @param ransacIterations Number of iterations RANSAC will perform
 * @param refineIterations Number of refinement iterations
 * @param detector Which feature detector to use
 * @param imageType Type of input image
 */
public static <T extends ImageGray<T>, Desc extends TupleDesc> StereoVisualOdometry<T> stereoQuadPnP(double inlierPixelTol, double epipolarPixelTol, double maxDistanceF2F, double maxAssociationError, int ransacIterations, int refineIterations, DetectDescribeMulti<T, Desc> detector, Class<T> imageType) {
    EstimateNofPnP pnp = FactoryMultiView.computePnP_N(EnumPNP.P3P_FINSTERWALDER, -1);
    DistanceModelMonoPixels<Se3_F64, Point2D3D> distanceMono = new PnPDistanceReprojectionSq();
    PnPStereoDistanceReprojectionSq distanceStereo = new PnPStereoDistanceReprojectionSq();
    PnPStereoEstimator pnpStereo = new PnPStereoEstimator(pnp, distanceMono, 0);
    ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
    EstimatorToGenerator<Se3_F64, Stereo2D3D> generator = new EstimatorToGenerator<>(pnpStereo);
    // euclidean error squared from left + right images
    double ransacTOL = 2 * inlierPixelTol * inlierPixelTol;
    ModelMatcher<Se3_F64, Stereo2D3D> motion = new Ransac<>(2323, manager, generator, distanceStereo, ransacIterations, ransacTOL);
    RefinePnPStereo refinePnP = null;
    if (refineIterations > 0) {
        refinePnP = new PnPStereoRefineRodrigues(1e-12, refineIterations);
    }
    Class<Desc> descType = detector.getDescriptionType();
    ScoreAssociation<Desc> scorer = FactoryAssociation.defaultScore(descType);
    AssociateDescription2D<Desc> assocSame;
    if (maxDistanceF2F > 0)
        assocSame = new AssociateMaxDistanceNaive<>(scorer, true, maxAssociationError, maxDistanceF2F);
    else
        assocSame = new AssociateDescTo2D<>(FactoryAssociation.greedy(scorer, maxAssociationError, true));
    AssociateStereo2D<Desc> associateStereo = new AssociateStereo2D<>(scorer, epipolarPixelTol, descType);
    TriangulateTwoViewsCalibrated triangulate = FactoryMultiView.triangulateTwoGeometric();
    associateStereo.setThreshold(maxAssociationError);
    VisOdomQuadPnP<T, Desc> alg = new VisOdomQuadPnP<>(detector, assocSame, associateStereo, triangulate, motion, refinePnP);
    return new WrapVisOdomQuadPnP<>(alg, refinePnP, associateStereo, distanceStereo, distanceMono, imageType);
}
Also used : AssociateMaxDistanceNaive(boofcv.alg.feature.associate.AssociateMaxDistanceNaive) AssociateDescTo2D(boofcv.abst.feature.associate.AssociateDescTo2D) Ransac(org.ddogleg.fitting.modelset.ransac.Ransac) EstimatorToGenerator(boofcv.factory.geo.EstimatorToGenerator) Point2D3D(boofcv.struct.geo.Point2D3D) TupleDesc(boofcv.struct.feature.TupleDesc) EstimateNofPnP(boofcv.abst.geo.EstimateNofPnP) AssociateStereo2D(boofcv.alg.feature.associate.AssociateStereo2D) Stereo2D3D(boofcv.struct.sfm.Stereo2D3D) TriangulateTwoViewsCalibrated(boofcv.abst.geo.TriangulateTwoViewsCalibrated) ModelManagerSe3_F64(georegression.fitting.se.ModelManagerSe3_F64) Se3_F64(georegression.struct.se.Se3_F64) ModelManagerSe3_F64(georegression.fitting.se.ModelManagerSe3_F64)

Example 5 with EstimatorToGenerator

use of boofcv.factory.geo.EstimatorToGenerator in project BoofCV by lessthanoptimal.

the class FactoryVisualOdometry method stereoDualTrackerPnP.

/**
 * Creates a stereo visual odometry algorithm that independently tracks features in left and right camera.
 *
 * @see VisOdomDualTrackPnP
 *
 * @param thresholdAdd When the number of inliers is below this number new features are detected
 * @param thresholdRetire When a feature has not been in the inlier list for this many ticks it is dropped
 * @param inlierPixelTol Tolerance in pixels for defining an inlier during robust model matching.  Typically 1.5
 * @param epipolarPixelTol Tolerance in pixels for enforcing the epipolar constraint
 * @param ransacIterations Number of iterations performed by RANSAC.  Try 300 or more.
 * @param refineIterations Number of iterations done during non-linear optimization.  Try 50 or more.
 * @param trackerLeft Tracker used for left camera
 * @param trackerRight Tracker used for right camera
 * @param imageType Type of image being processed
 * @return Stereo visual odometry algorithm.
 */
public static <T extends ImageGray<T>, Desc extends TupleDesc> StereoVisualOdometry<T> stereoDualTrackerPnP(int thresholdAdd, int thresholdRetire, double inlierPixelTol, double epipolarPixelTol, int ransacIterations, int refineIterations, PointTracker<T> trackerLeft, PointTracker<T> trackerRight, DescribeRegionPoint<T, Desc> descriptor, Class<T> imageType) {
    EstimateNofPnP pnp = FactoryMultiView.computePnP_N(EnumPNP.P3P_FINSTERWALDER, -1);
    DistanceModelMonoPixels<Se3_F64, Point2D3D> distanceMono = new PnPDistanceReprojectionSq();
    PnPStereoDistanceReprojectionSq distanceStereo = new PnPStereoDistanceReprojectionSq();
    PnPStereoEstimator pnpStereo = new PnPStereoEstimator(pnp, distanceMono, 0);
    ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
    EstimatorToGenerator<Se3_F64, Stereo2D3D> generator = new EstimatorToGenerator<>(pnpStereo);
    // Pixel tolerance for RANSAC inliers - euclidean error squared from left + right images
    double ransacTOL = 2 * inlierPixelTol * inlierPixelTol;
    ModelMatcher<Se3_F64, Stereo2D3D> motion = new Ransac<>(2323, manager, generator, distanceStereo, ransacIterations, ransacTOL);
    RefinePnPStereo refinePnP = null;
    Class<Desc> descType = descriptor.getDescriptionType();
    ScoreAssociation<Desc> scorer = FactoryAssociation.defaultScore(descType);
    AssociateStereo2D<Desc> associateStereo = new AssociateStereo2D<>(scorer, epipolarPixelTol, descType);
    // need to make sure associations are unique
    AssociateDescription2D<Desc> associateUnique = associateStereo;
    if (!associateStereo.uniqueDestination() || !associateStereo.uniqueSource()) {
        associateUnique = new EnforceUniqueByScore.Describe2D<>(associateStereo, true, true);
    }
    if (refineIterations > 0) {
        refinePnP = new PnPStereoRefineRodrigues(1e-12, refineIterations);
    }
    TriangulateTwoViewsCalibrated triangulate = FactoryMultiView.triangulateTwoGeometric();
    VisOdomDualTrackPnP<T, Desc> alg = new VisOdomDualTrackPnP<>(thresholdAdd, thresholdRetire, epipolarPixelTol, trackerLeft, trackerRight, descriptor, associateUnique, triangulate, motion, refinePnP);
    return new WrapVisOdomDualTrackPnP<>(pnpStereo, distanceMono, distanceStereo, associateStereo, alg, refinePnP, imageType);
}
Also used : Ransac(org.ddogleg.fitting.modelset.ransac.Ransac) EstimatorToGenerator(boofcv.factory.geo.EstimatorToGenerator) Point2D3D(boofcv.struct.geo.Point2D3D) TupleDesc(boofcv.struct.feature.TupleDesc) EstimateNofPnP(boofcv.abst.geo.EstimateNofPnP) AssociateStereo2D(boofcv.alg.feature.associate.AssociateStereo2D) EnforceUniqueByScore(boofcv.abst.feature.associate.EnforceUniqueByScore) Stereo2D3D(boofcv.struct.sfm.Stereo2D3D) TriangulateTwoViewsCalibrated(boofcv.abst.geo.TriangulateTwoViewsCalibrated) ModelManagerSe3_F64(georegression.fitting.se.ModelManagerSe3_F64) Se3_F64(georegression.struct.se.Se3_F64) ModelManagerSe3_F64(georegression.fitting.se.ModelManagerSe3_F64)

Aggregations

EstimatorToGenerator (boofcv.factory.geo.EstimatorToGenerator)5 Point2D3D (boofcv.struct.geo.Point2D3D)5 ModelManagerSe3_F64 (georegression.fitting.se.ModelManagerSe3_F64)5 Se3_F64 (georegression.struct.se.Se3_F64)5 Ransac (org.ddogleg.fitting.modelset.ransac.Ransac)5 Estimate1ofPnP (boofcv.abst.geo.Estimate1ofPnP)3 RefinePnP (boofcv.abst.geo.RefinePnP)3 EstimateNofPnP (boofcv.abst.geo.EstimateNofPnP)2 TriangulateTwoViewsCalibrated (boofcv.abst.geo.TriangulateTwoViewsCalibrated)2 DepthSparse3D_to_PixelTo3D (boofcv.abst.sfm.DepthSparse3D_to_PixelTo3D)2 ImagePixelTo3D (boofcv.abst.sfm.ImagePixelTo3D)2 AssociateStereo2D (boofcv.alg.feature.associate.AssociateStereo2D)2 TupleDesc (boofcv.struct.feature.TupleDesc)2 Stereo2D3D (boofcv.struct.sfm.Stereo2D3D)2 AssociateDescTo2D (boofcv.abst.feature.associate.AssociateDescTo2D)1 EnforceUniqueByScore (boofcv.abst.feature.associate.EnforceUniqueByScore)1 AssociateMaxDistanceNaive (boofcv.alg.feature.associate.AssociateMaxDistanceNaive)1 PnPDistanceReprojectionSq (boofcv.alg.geo.pose.PnPDistanceReprojectionSq)1 StereoSparse3D (boofcv.alg.sfm.StereoSparse3D)1