use of boofcv.struct.geo.Point2D3D in project BoofCV by lessthanoptimal.
the class TestPnPStereoEstimator method perfectData.
private void perfectData(int numExtra) {
EstimateNofPnP pnp = FactoryMultiView.computePnP_N(EnumPNP.P3P_FINSTERWALDER, -1);
DistanceModelMonoPixels<Se3_F64, Point2D3D> distanceMono = new PnPDistanceReprojectionSq();
PnPStereoEstimator alg = new PnPStereoEstimator(pnp, distanceMono, numExtra);
Se3_F64 expected = new Se3_F64();
expected.getR().set(ConvertRotation3D_F64.eulerToMatrix(EulerType.XYZ, 0.05, -0.03, 0.02, null));
expected.getT().set(0.2, -0.1, 0.01);
generateScene(alg.getMinimumPoints(), expected, false);
Se3_F64 found = new Se3_F64();
alg.setLeftToRight(leftToRight);
assertTrue(alg.process(pointPose, found));
// found.print();
// expected.print();
assertTrue(MatrixFeatures_DDRM.isIdentical(expected.getR(), found.getR(), 1e-8));
assertTrue(found.getT().isIdentical(expected.getT(), 1e-8));
}
use of boofcv.struct.geo.Point2D3D in project BoofCV by lessthanoptimal.
the class FiducialDetectorPnP method computeStability.
/**
* Estimates the stability by perturbing each land mark by the specified number of pixels in the distorted image.
*/
@Override
public boolean computeStability(int which, double disturbance, FiducialStability results) {
if (!getFiducialToCamera(which, targetToCamera))
return false;
targetToCamera.invert(referenceCameraToTarget);
maxOrientation = 0;
maxLocation = 0;
for (int i = 0; i < detected2D3D.size(); i++) {
Point2D3D p23 = detected2D3D.get(i);
Point2D_F64 p = detectedPixels.get(i);
workPt.set(p);
perturb(which, disturbance, workPt, p, p23);
}
results.location = maxLocation;
results.orientation = maxOrientation;
return true;
}
use of boofcv.struct.geo.Point2D3D in project BoofCV by lessthanoptimal.
the class FiducialDetectorPnP method createDetectedList.
/**
* Create the list of observed points in 2D3D
*/
private void createDetectedList(int which, List<PointIndex2D_F64> pixels) {
detected2D3D.clear();
List<Point2D3D> all = getControl3D(which);
for (int i = 0; i < pixels.size(); i++) {
PointIndex2D_F64 a = pixels.get(i);
Point2D3D b = all.get(i);
pixelToNorm.compute(a.x, a.y, b.observation);
detected2D3D.add(b);
}
}
use of boofcv.struct.geo.Point2D3D in project BoofCV by lessthanoptimal.
the class VisOdomPixelDepthPnP method estimateMotion.
/**
* Estimates motion from the set of tracks and their 3D location
*
* @return true if successful.
*/
private boolean estimateMotion() {
List<PointTrack> active = tracker.getActiveTracks(null);
List<Point2D3D> obs = new ArrayList<>();
for (PointTrack t : active) {
Point2D3D p = t.getCookie();
pixelToNorm.compute(t.x, t.y, p.observation);
obs.add(p);
}
// estimate the motion up to a scale factor in translation
if (!motionEstimator.process(obs))
return false;
if (doublePass) {
if (!performSecondPass(active, obs))
return false;
}
tracker.finishTracking();
Se3_F64 keyToCurr;
if (refine != null) {
keyToCurr = new Se3_F64();
refine.fitModel(motionEstimator.getMatchSet(), motionEstimator.getModelParameters(), keyToCurr);
} else {
keyToCurr = motionEstimator.getModelParameters();
}
keyToCurr.invert(currToKey);
// mark tracks as being inliers and add to inlier list
int N = motionEstimator.getMatchSet().size();
for (int i = 0; i < N; i++) {
int index = motionEstimator.getInputIndex(i);
Point2D3DTrack t = active.get(index).getCookie();
t.lastInlier = tick;
inlierTracks.add(t);
}
return true;
}
use of boofcv.struct.geo.Point2D3D in project BoofCV by lessthanoptimal.
the class FactoryVisualOdometry method depthDepthPnP.
/**
* Depth sensor based visual odometry algorithm which runs a sparse feature tracker in the visual camera and
* estimates the range of tracks once when first detected using the depth sensor.
*
* @see VisOdomPixelDepthPnP
*
* @param thresholdAdd Add new tracks when less than this number are in the inlier set. Tracker dependent. Set to
* a value ≤ 0 to add features every frame.
* @param thresholdRetire Discard a track if it is not in the inlier set after this many updates. Try 2
* @param sparseDepth Extracts depth of pixels from a depth sensor.
* @param visualType Type of visual image being processed.
* @param depthType Type of depth image being processed.
* @return StereoVisualOdometry
*/
public static <Vis extends ImageGray<Vis>, Depth extends ImageGray<Depth>> DepthVisualOdometry<Vis, Depth> depthDepthPnP(double inlierPixelTol, int thresholdAdd, int thresholdRetire, int ransacIterations, int refineIterations, boolean doublePass, DepthSparse3D<Depth> sparseDepth, PointTrackerTwoPass<Vis> tracker, Class<Vis> visualType, Class<Depth> depthType) {
// Range from sparse disparity
ImagePixelTo3D pixelTo3D = new DepthSparse3D_to_PixelTo3D<>(sparseDepth);
Estimate1ofPnP estimator = FactoryMultiView.computePnP_1(EnumPNP.P3P_FINSTERWALDER, -1, 2);
final DistanceModelMonoPixels<Se3_F64, Point2D3D> distance = new PnPDistanceReprojectionSq();
ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
EstimatorToGenerator<Se3_F64, Point2D3D> generator = new EstimatorToGenerator<>(estimator);
// 1/2 a pixel tolerance for RANSAC inliers
double ransacTOL = inlierPixelTol * inlierPixelTol;
ModelMatcher<Se3_F64, Point2D3D> motion = new Ransac<>(2323, manager, generator, distance, ransacIterations, ransacTOL);
RefinePnP refine = null;
if (refineIterations > 0) {
refine = FactoryMultiView.refinePnP(1e-12, refineIterations);
}
VisOdomPixelDepthPnP<Vis> alg = new VisOdomPixelDepthPnP<>(thresholdAdd, thresholdRetire, doublePass, motion, pixelTo3D, refine, tracker, null, null);
return new VisOdomPixelDepthPnP_to_DepthVisualOdometry<>(sparseDepth, alg, distance, ImageType.single(visualType), depthType);
}
Aggregations