use of org.ddogleg.fitting.modelset.ransac.Ransac in project BoofCV by lessthanoptimal.
the class FactoryMultiViewRobust method epipolarRansac.
private static Ransac<Se3_F64, AssociatedPair> epipolarRansac(Estimate1ofEpipolar epipolar, CameraPinholeRadial intrinsic, ConfigRansac ransac) {
TriangulateTwoViewsCalibrated triangulate = FactoryMultiView.triangulateTwoGeometric();
ModelManager<Se3_F64> manager = new ModelManagerSe3_F64();
ModelGenerator<Se3_F64, AssociatedPair> generateEpipolarMotion = new Se3FromEssentialGenerator(epipolar, triangulate);
DistanceFromModel<Se3_F64, AssociatedPair> distanceSe3 = new DistanceSe3SymmetricSq(triangulate, intrinsic.fx, intrinsic.fy, intrinsic.skew, intrinsic.fx, intrinsic.fy, intrinsic.skew);
double ransacTOL = ransac.inlierThreshold * ransac.inlierThreshold * 2.0;
return new Ransac<>(ransac.randSeed, manager, generateEpipolarMotion, distanceSe3, ransac.maxIterations, ransacTOL);
}
use of org.ddogleg.fitting.modelset.ransac.Ransac in project MAVSlam by ecmnet.
the class FactoryMAVOdometry method depthDepthPnP.
/**
* Depth sensor based visual odometry algorithm which runs a sparse feature tracker in the visual camera and
* estimates the range of tracks once when first detected using the depth sensor.
*
* @see MAVOdomPixelDepthPnP
*
* @param thresholdAdd Add new tracks when less than this number are in the inlier set. Tracker dependent. Set to
* a value ≤ 0 to add features every frame.
* @param thresholdRetire Discard a track if it is not in the inlier set after this many updates. Try 2
* @param sparseDepth Extracts depth of pixels from a depth sensor.
* @param visualType Type of visual image being processed.
* @param depthType Type of depth image being processed.
* @return StereoVisualOdometry
*/
public static <Vis extends ImageGray, Depth extends ImageGray> MAVDepthVisualOdometry<Vis, Depth> depthDepthPnP(double inlierPixelTol, int thresholdAdd, int thresholdRetire, int ransacIterations, int refineIterations, boolean doublePass, DepthSparse3D<Depth> sparseDepth, PointTrackerTwoPass<Vis> tracker, Class<Vis> visualType, Class<Depth> depthType) {
// Range from sparse disparity
ImagePixelTo3D pixelTo3D = new DepthSparse3D_to_PixelTo3D<Depth>(sparseDepth);
Estimate1ofPnP estimator = FactoryMultiView.computePnP_1(EnumPNP.P3P_FINSTERWALDER, -1, 2);
final DistanceModelMonoPixels<Se3_F64, Point2D3D> distance = new PnPDistanceReprojectionSq();
ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
EstimatorToGenerator<Se3_F64, Point2D3D> generator = new EstimatorToGenerator<Se3_F64, Point2D3D>(estimator);
// 1/2 a pixel tolerance for RANSAC inliers
double ransacTOL = inlierPixelTol * inlierPixelTol;
ModelMatcher<Se3_F64, Point2D3D> motion = new Ransac<Se3_F64, Point2D3D>(2323, manager, generator, distance, ransacIterations, ransacTOL);
RefinePnP refine = null;
if (refineIterations > 0) {
refine = FactoryMultiView.refinePnP(1e-12, refineIterations);
}
MAVOdomPixelDepthPnP<Vis> alg = new MAVOdomPixelDepthPnP<Vis>(thresholdAdd, thresholdRetire, doublePass, motion, pixelTo3D, refine, tracker, null, null);
return new MAVOdomPixelDepthPnP_to_DepthVisualOdometry<Vis, Depth>(sparseDepth, alg, distance, ImageType.single(visualType), depthType);
}
use of org.ddogleg.fitting.modelset.ransac.Ransac in project BoofCV by lessthanoptimal.
the class FactoryMultiViewRobust method pnpRansac.
/**
* Robust solution to PnP problem using {@link Ransac}. Input observations are in normalized
* image coordinates.
*
* <p>NOTE: Observations are in normalized image coordinates NOT pixels.</p>
*
* <p>See code for all the details.</p>
*
* @param pnp PnP parameters. Can't be null.
* @param ransac Parameters for RANSAC. Can't be null.
* @return Robust Se3_F64 estimator
*/
public static Ransac<Se3_F64, Point2D3D> pnpRansac(ConfigPnP pnp, ConfigRansac ransac) {
pnp.checkValidity();
ransac.checkValidity();
Estimate1ofPnP estimatorPnP = FactoryMultiView.computePnP_1(pnp.which, pnp.epnpIterations, pnp.numResolve);
DistanceModelMonoPixels<Se3_F64, Point2D3D> distance = new PnPDistanceReprojectionSq();
distance.setIntrinsic(pnp.intrinsic.fx, pnp.intrinsic.fy, pnp.intrinsic.skew);
ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
EstimatorToGenerator<Se3_F64, Point2D3D> generator = new EstimatorToGenerator<>(estimatorPnP);
// convert from pixels to pixels squared
double threshold = ransac.inlierThreshold * ransac.inlierThreshold;
return new Ransac<>(ransac.randSeed, manager, generator, distance, ransac.maxIterations, threshold);
}
use of org.ddogleg.fitting.modelset.ransac.Ransac in project BoofCV by lessthanoptimal.
the class FactoryMultiViewRobust method homographyRansac.
/**
* Robust solution for estimating {@link Homography2D_F64} with {@link Ransac}. Input
* observations are in pixel coordinates.
*
* <ul>
* <li>Four point linear is used internally</p>
* <li>inlierThreshold is in pixels</p>
* </ul>
*
* <p>See code for all the details.</p>
*
* @param homography Homography estimation parameters. If null default is used.
* @param ransac Parameters for RANSAC. Can't be null.
* @return Homography estimator
*/
public static Ransac<Homography2D_F64, AssociatedPair> homographyRansac(ConfigHomography homography, ConfigRansac ransac) {
if (homography == null)
homography = new ConfigHomography();
ModelManager<Homography2D_F64> manager = new ModelManagerHomography2D_F64();
GenerateHomographyLinear modelFitter = new GenerateHomographyLinear(homography.normalize);
DistanceHomographySq distance = new DistanceHomographySq();
double ransacTol = ransac.inlierThreshold * ransac.inlierThreshold;
return new Ransac<>(ransac.randSeed, manager, modelFitter, distance, ransac.maxIterations, ransacTol);
}
use of org.ddogleg.fitting.modelset.ransac.Ransac in project BoofCV by lessthanoptimal.
the class CommonGridRansacLineDetectorChecks method checkObvious.
/**
* Give it a single straight line and see if it can detect it. Allow the region size to be changed to check
* for issues related to that
* @param regionSize
*/
protected void checkObvious(int regionSize) {
// System.out.println("regionSize = "+regionSize);
int where = 25;
GrayU8 edgeImage = new GrayU8(width, height);
D derivX = GeneralizedImageOps.createSingleBand(derivType, width, height);
D derivY = GeneralizedImageOps.createSingleBand(derivType, width, height);
for (int i = 0; i < height; i++) {
edgeImage.set(where, i, 1);
GeneralizedImageOps.set(derivX, where, i, 20);
}
ModelManagerLinePolar2D_F32 manager = new ModelManagerLinePolar2D_F32();
GridLineModelDistance distance = new GridLineModelDistance(0.9f);
GridLineModelFitter fitter = new GridLineModelFitter(0.9f);
ModelMatcher<LinePolar2D_F32, Edgel> matcher = new Ransac<>(123123, manager, fitter, distance, 25, 1);
GridRansacLineDetector<D> alg = createDetector(regionSize, 5, matcher);
alg.process(derivX, derivY, edgeImage);
MatrixOfList<LineSegment2D_F32> lines = alg.getFoundLines();
assertEquals(width / regionSize, lines.getWidth());
assertEquals(height / regionSize, lines.getHeight());
int gridCol = where / regionSize;
for (int i = 0; i < lines.height; i++) {
List<LineSegment2D_F32> l = lines.get(gridCol, i);
assertTrue(l.size() == 1);
LineSegment2D_F32 a = l.get(0);
assertTrue(Math.abs(a.slopeY()) > 1);
assertTrue(Math.abs(a.slopeX()) < 0.01);
}
}
Aggregations