use of georegression.struct.point.Vector3D_F64 in project BoofCV by lessthanoptimal.
the class TriangulateLinearDLT method addView.
private int addView(Se3_F64 motion, Point2D_F64 a, int index) {
DMatrixRMaj R = motion.getR();
Vector3D_F64 T = motion.getT();
double r11 = R.data[0], r12 = R.data[1], r13 = R.data[2];
double r21 = R.data[3], r22 = R.data[4], r23 = R.data[5];
double r31 = R.data[6], r32 = R.data[7], r33 = R.data[8];
// no normalization of observations are needed since they are in normalized coordinates
// first row
A.data[index++] = a.x * r31 - r11;
A.data[index++] = a.x * r32 - r12;
A.data[index++] = a.x * r33 - r13;
A.data[index++] = a.x * T.z - T.x;
// second row
A.data[index++] = a.y * r31 - r21;
A.data[index++] = a.y * r32 - r22;
A.data[index++] = a.y * r33 - r23;
A.data[index++] = a.y * T.z - T.y;
return index;
}
use of georegression.struct.point.Vector3D_F64 in project BoofCV by lessthanoptimal.
the class CheckRefineFundamental method incorrectInput.
@Test
public void incorrectInput() {
init(30, false);
// compute true essential matrix
DMatrixRMaj E = MultiViewOps.createEssential(worldToCamera.getR(), worldToCamera.getT());
// create an alternative incorrect matrix
Vector3D_F64 T = worldToCamera.getT().copy();
T.x += 0.1;
DMatrixRMaj Emod = MultiViewOps.createEssential(worldToCamera.getR(), T);
ModelFitter<DMatrixRMaj, AssociatedPair> alg = createAlgorithm();
// compute and compare results
assertTrue(alg.fitModel(pairs, Emod, found));
// normalize to allow comparison
CommonOps_DDRM.divide(E, E.get(2, 2));
CommonOps_DDRM.divide(Emod, Emod.get(2, 2));
CommonOps_DDRM.divide(found, found.get(2, 2));
double error0 = 0;
double error1 = 0;
// very crude error metric
for (int i = 0; i < 9; i++) {
error0 += Math.abs(Emod.data[i] - E.data[i]);
error1 += Math.abs(found.data[i] - E.data[i]);
}
// System.out.println("error "+error1+" other "+error0);
assertTrue(error1 < error0);
}
use of georegression.struct.point.Vector3D_F64 in project BoofCV by lessthanoptimal.
the class TestDetectCircleHexagonalGrid method checkCounterClockWise.
static void checkCounterClockWise(Grid g) {
EllipseRotated_F64 a = g.get(0, 0);
EllipseRotated_F64 b = g.columns >= 3 ? g.get(0, 2) : g.get(1, 1);
EllipseRotated_F64 c = g.rows >= 3 ? g.get(2, 0) : g.get(1, 1);
double dx0 = b.center.x - a.center.x;
double dy0 = b.center.y - a.center.y;
double dx1 = c.center.x - a.center.x;
double dy1 = c.center.y - a.center.y;
Vector3D_F64 v = new Vector3D_F64();
GeometryMath_F64.cross(dx0, dy0, 0, dx1, dy1, 0, v);
assertTrue(v.z > 0);
}
use of georegression.struct.point.Vector3D_F64 in project BoofCV by lessthanoptimal.
the class ExampleVisualOdometryDepth method main.
public static void main(String[] args) throws IOException {
MediaManager media = DefaultMediaManager.INSTANCE;
String directory = UtilIO.pathExample("kinect/straight");
// load camera description and the video sequence
VisualDepthParameters param = CalibrationIO.load(media.openFile(directory + "visualdepth.yaml"));
// specify how the image features are going to be tracked
PkltConfig configKlt = new PkltConfig();
configKlt.pyramidScaling = new int[] { 1, 2, 4, 8 };
configKlt.templateRadius = 3;
PointTrackerTwoPass<GrayU8> tracker = FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(600, 3, 1), GrayU8.class, GrayS16.class);
DepthSparse3D<GrayU16> sparseDepth = new DepthSparse3D.I<>(1e-3);
// declares the algorithm
DepthVisualOdometry<GrayU8, GrayU16> visualOdometry = FactoryVisualOdometry.depthDepthPnP(1.5, 120, 2, 200, 50, true, sparseDepth, tracker, GrayU8.class, GrayU16.class);
// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
visualOdometry.setCalibration(param.visualParam, new DoNothing2Transform2_F32());
// Process the video sequence and output the location plus number of inliers
SimpleImageSequence<GrayU8> videoVisual = media.openVideo(directory + "rgb.mjpeg", ImageType.single(GrayU8.class));
SimpleImageSequence<GrayU16> videoDepth = media.openVideo(directory + "depth.mpng", ImageType.single(GrayU16.class));
while (videoVisual.hasNext()) {
GrayU8 visual = videoVisual.next();
GrayU16 depth = videoDepth.next();
if (!visualOdometry.process(visual, depth)) {
throw new RuntimeException("VO Failed!");
}
Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
Vector3D_F64 T = leftToWorld.getT();
System.out.printf("Location %8.2f %8.2f %8.2f inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
}
}
use of georegression.struct.point.Vector3D_F64 in project BoofCV by lessthanoptimal.
the class ExampleVisualOdometryMonocularPlane method main.
public static void main(String[] args) {
MediaManager media = DefaultMediaManager.INSTANCE;
String directory = UtilIO.pathExample("vo/drc/");
// load camera description and the video sequence
MonoPlaneParameters calibration = CalibrationIO.load(media.openFile(directory + "mono_plane.yaml"));
SimpleImageSequence<GrayU8> video = media.openVideo(directory + "left.mjpeg", ImageType.single(GrayU8.class));
// specify how the image features are going to be tracked
PkltConfig configKlt = new PkltConfig();
configKlt.pyramidScaling = new int[] { 1, 2, 4, 8 };
configKlt.templateRadius = 3;
ConfigGeneralDetector configDetector = new ConfigGeneralDetector(600, 3, 1);
PointTracker<GrayU8> tracker = FactoryPointTracker.klt(configKlt, configDetector, GrayU8.class, null);
// declares the algorithm
MonocularPlaneVisualOdometry<GrayU8> visualOdometry = FactoryVisualOdometry.monoPlaneInfinity(75, 2, 1.5, 200, tracker, ImageType.single(GrayU8.class));
// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
visualOdometry.setCalibration(calibration);
// Process the video sequence and output the location plus number of inliers
while (video.hasNext()) {
GrayU8 image = video.next();
if (!visualOdometry.process(image)) {
System.out.println("Fault!");
visualOdometry.reset();
}
Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
Vector3D_F64 T = leftToWorld.getT();
System.out.printf("Location %8.2f %8.2f %8.2f inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
}
}
Aggregations