use of boofcv.struct.geo.Point2D3D in project BoofCV by lessthanoptimal.
the class TestDistanceTranGivenRotSq method testNoisy.
@Test
public void testNoisy() {
Se3_F64 keyToCurr = new Se3_F64();
keyToCurr.getR().set(ConvertRotation3D_F64.eulerToMatrix(EulerType.XYZ, 0.05, -0.03, 0.02, null));
keyToCurr.getT().set(0.1, -0.1, 0.01);
Point3D_F64 X = new Point3D_F64(0.1, -0.05, 3);
Point2D3D obs = new Point2D3D();
obs.location = X.copy();
SePointOps_F64.transform(keyToCurr, X, X);
obs.observation.x = X.x / X.z + 1;
obs.observation.y = X.y / X.z + 1;
alg.setRotation(keyToCurr.getR());
alg.setModel(keyToCurr.getT());
assertTrue(alg.computeDistance(obs) > 1e-8);
}
use of boofcv.struct.geo.Point2D3D in project BoofCV by lessthanoptimal.
the class CalibrationFiducialDetector method init.
protected void init(DetectorFiducialCalibration detector, double width, Class<T> imageType) {
this.detector = detector;
this.type = ImageType.single(imageType);
this.converted = new GrayF32(1, 1);
this.width = width;
List<Point2D_F64> layout = detector.getLayout();
points2D3D = new ArrayList<>();
for (int i = 0; i < layout.size(); i++) {
Point2D_F64 p2 = layout.get(i);
Point2D3D p = new Point2D3D();
p.location.set(p2.x, p2.y, 0);
points2D3D.add(p);
}
selectBoundaryCorners();
}
use of boofcv.struct.geo.Point2D3D in project MAVSlam by ecmnet.
the class FactoryMAVOdometry method depthDepthPnP.
/**
* Depth sensor based visual odometry algorithm which runs a sparse feature tracker in the visual camera and
* estimates the range of tracks once when first detected using the depth sensor.
*
* @see MAVOdomPixelDepthPnP
*
* @param thresholdAdd Add new tracks when less than this number are in the inlier set. Tracker dependent. Set to
* a value ≤ 0 to add features every frame.
* @param thresholdRetire Discard a track if it is not in the inlier set after this many updates. Try 2
* @param sparseDepth Extracts depth of pixels from a depth sensor.
* @param visualType Type of visual image being processed.
* @param depthType Type of depth image being processed.
* @return StereoVisualOdometry
*/
public static <Vis extends ImageGray, Depth extends ImageGray> MAVDepthVisualOdometry<Vis, Depth> depthDepthPnP(double inlierPixelTol, int thresholdAdd, int thresholdRetire, int ransacIterations, int refineIterations, boolean doublePass, DepthSparse3D<Depth> sparseDepth, PointTrackerTwoPass<Vis> tracker, Class<Vis> visualType, Class<Depth> depthType) {
// Range from sparse disparity
ImagePixelTo3D pixelTo3D = new DepthSparse3D_to_PixelTo3D<Depth>(sparseDepth);
Estimate1ofPnP estimator = FactoryMultiView.computePnP_1(EnumPNP.P3P_FINSTERWALDER, -1, 2);
final DistanceModelMonoPixels<Se3_F64, Point2D3D> distance = new PnPDistanceReprojectionSq();
ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
EstimatorToGenerator<Se3_F64, Point2D3D> generator = new EstimatorToGenerator<Se3_F64, Point2D3D>(estimator);
// 1/2 a pixel tolerance for RANSAC inliers
double ransacTOL = inlierPixelTol * inlierPixelTol;
ModelMatcher<Se3_F64, Point2D3D> motion = new Ransac<Se3_F64, Point2D3D>(2323, manager, generator, distance, ransacIterations, ransacTOL);
RefinePnP refine = null;
if (refineIterations > 0) {
refine = FactoryMultiView.refinePnP(1e-12, refineIterations);
}
MAVOdomPixelDepthPnP<Vis> alg = new MAVOdomPixelDepthPnP<Vis>(thresholdAdd, thresholdRetire, doublePass, motion, pixelTo3D, refine, tracker, null, null);
return new MAVOdomPixelDepthPnP_to_DepthVisualOdometry<Vis, Depth>(sparseDepth, alg, distance, ImageType.single(visualType), depthType);
}
use of boofcv.struct.geo.Point2D3D in project MAVSlam by ecmnet.
the class MAVOdomPixelDepthPnP method performSecondPass.
private boolean performSecondPass(List<PointTrack> active, List<Point2D3D> obs) {
Se3_F64 keyToCurr = motionEstimator.getModelParameters();
Point3D_F64 cameraPt = new Point3D_F64();
Point2D_F64 predicted = new Point2D_F64();
// predict where each track should be given the just estimated motion
List<PointTrack> all = tracker.getAllTracks(null);
for (PointTrack t : all) {
Point2D3D p = t.getCookie();
SePointOps_F64.transform(keyToCurr, p.location, cameraPt);
normToPixel.compute(cameraPt.x / cameraPt.z, cameraPt.y / cameraPt.z, predicted);
tracker.setHint(predicted.x, predicted.y, t);
}
// redo tracking with the additional information
tracker.performSecondPass();
active.clear();
obs.clear();
tracker.getActiveTracks(active);
for (PointTrack t : active) {
Point2D3D p = t.getCookie();
pixelToNorm.compute(t.x, t.y, p.observation);
obs.add(p);
}
return motionEstimator.process(obs);
}
use of boofcv.struct.geo.Point2D3D in project BoofCV by lessthanoptimal.
the class WrapP3PLineDistance method process.
@Override
public boolean process(List<Point2D3D> inputs, FastQueue<Se3_F64> solutions) {
if (inputs.size() != 3)
throw new IllegalArgumentException("Three and only three inputs are required. Not " + inputs.size());
solutions.reset();
Point2D3D P1 = inputs.get(0);
Point2D3D P2 = inputs.get(1);
Point2D3D P3 = inputs.get(2);
// Compute the length of each side in the triangle
double length12 = P1.location.distance(P2.getLocation());
double length13 = P1.location.distance(P3.getLocation());
double length23 = P2.location.distance(P3.getLocation());
if (!alg.process(P1.observation, P2.observation, P3.observation, length23, length13, length12))
return false;
FastQueue<PointDistance3> distances = alg.getSolutions();
if (distances.size == 0)
return false;
// convert observations into a 3D pointing vector and normalize to one
// homogeneous coordinates
u1.set(P1.observation.x, P1.observation.y, 1);
u2.set(P2.observation.x, P2.observation.y, 1);
u3.set(P3.observation.x, P3.observation.y, 1);
u1.normalize();
u2.normalize();
u3.normalize();
// set up world point cloud
cloudWorld.clear();
cloudWorld.add(P1.location);
cloudWorld.add(P2.location);
cloudWorld.add(P3.location);
for (int i = 0; i < distances.size; i++) {
PointDistance3 pd = distances.get(i);
// find points in camera frame
X1.set(u1.x * pd.dist1, u1.y * pd.dist1, u1.z * pd.dist1);
X2.set(u2.x * pd.dist2, u2.y * pd.dist2, u2.z * pd.dist2);
X3.set(u3.x * pd.dist3, u3.y * pd.dist3, u3.z * pd.dist3);
if (!motionFit.process(cloudWorld, cloudCamera))
continue;
// NOTE: This transform is world to camera and it's perfectly valid for to have a negative Z value
// and be behind the camera.
Se3_F64 found = solutions.grow();
found.set(motionFit.getTransformSrcToDst());
}
return solutions.size() != 0;
}
Aggregations