use of boofcv.struct.distort.DoNothing2Transform2_F32 in project BoofCV by lessthanoptimal.
the class VisualizeDepthVisualOdometryApp method handleInputChange.
@Override
protected void handleInputChange(int source, InputMethod method, int width, int height) {
if (source != 0)
return;
fps = -1;
numFaults = 0;
frameNumber = 0;
alg.reset();
alg.setCalibration(config.visualParam, new DoNothing2Transform2_F32());
statusPanel.reset();
handleRunningStatus(Status.RUNNING);
guiCam3D.init();
guiCam3D.setFocalLength(300);
guiCam3D.setStepSize(0.05);
guiCam3D.setPreferredSize(new Dimension(config.visualParam.width, config.visualParam.height));
viewPanel.setPreferredSize(new Dimension(width * 2 + 20, height));
viewPanel.setDividerLocation(width);
viewPanel.setMaximumSize(viewPanel.getPreferredSize());
}
use of boofcv.struct.distort.DoNothing2Transform2_F32 in project BoofCV by lessthanoptimal.
the class ExampleVisualOdometryDepth method main.
public static void main(String[] args) throws IOException {
MediaManager media = DefaultMediaManager.INSTANCE;
String directory = UtilIO.pathExample("kinect/straight");
// load camera description and the video sequence
VisualDepthParameters param = CalibrationIO.load(media.openFile(directory + "visualdepth.yaml"));
// specify how the image features are going to be tracked
PkltConfig configKlt = new PkltConfig();
configKlt.pyramidScaling = new int[] { 1, 2, 4, 8 };
configKlt.templateRadius = 3;
PointTrackerTwoPass<GrayU8> tracker = FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(600, 3, 1), GrayU8.class, GrayS16.class);
DepthSparse3D<GrayU16> sparseDepth = new DepthSparse3D.I<>(1e-3);
// declares the algorithm
DepthVisualOdometry<GrayU8, GrayU16> visualOdometry = FactoryVisualOdometry.depthDepthPnP(1.5, 120, 2, 200, 50, true, sparseDepth, tracker, GrayU8.class, GrayU16.class);
// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
visualOdometry.setCalibration(param.visualParam, new DoNothing2Transform2_F32());
// Process the video sequence and output the location plus number of inliers
SimpleImageSequence<GrayU8> videoVisual = media.openVideo(directory + "rgb.mjpeg", ImageType.single(GrayU8.class));
SimpleImageSequence<GrayU16> videoDepth = media.openVideo(directory + "depth.mpng", ImageType.single(GrayU16.class));
while (videoVisual.hasNext()) {
GrayU8 visual = videoVisual.next();
GrayU16 depth = videoDepth.next();
if (!visualOdometry.process(visual, depth)) {
throw new RuntimeException("VO Failed!");
}
Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
Vector3D_F64 T = leftToWorld.getT();
System.out.printf("Location %8.2f %8.2f %8.2f inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
}
}
use of boofcv.struct.distort.DoNothing2Transform2_F32 in project BoofCV by lessthanoptimal.
the class CheckVisualOdometryDepthSim method moveForward.
@Test
public void moveForward() {
algorithm.reset();
algorithm.setCalibration(param, new DoNothing2Transform2_F32());
Se3_F64 worldToLeft = new Se3_F64();
for (int i = 0; i < 10; i++) {
// System.out.println("------------------------- Sim tick = "+i);
worldToLeft.getT().z = i * 0.05;
// render the images
setIntrinsic(param);
left.setTo(render(worldToLeft));
renderDepth(worldToLeft, depth, depthUnits);
// ShowImages.showWindow(left,"Rendered Left", true);
// BoofMiscOps.sleep(5000);
// process the images
assertTrue(algorithm.process(left, depth));
// Compare to truth. Only go for a crude approximation
Se3_F64 foundWorldToLeft = algorithm.getCameraToWorld().invert(null);
// worldToLeft.getT().print();
// foundWorldToLeft.getT().print();
assertTrue(MatrixFeatures_DDRM.isIdentical(foundWorldToLeft.getR(), worldToLeft.getR(), 0.1));
assertTrue(foundWorldToLeft.getT().distance(worldToLeft.getT()) < tolerance);
}
}
Aggregations