use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class ExampleStereoUncalibrated method main.
public static void main(String[] args) {
// Solution below is unstable. Turning concurrency off so that it always produces a valid solution
// The two view case is very challenging and I've not seen a stable algorithm yet
BoofConcurrency.USE_CONCURRENT = false;
// Successful
String name = "bobcats_";
// String name = "mono_wall_";
// String name = "minecraft_cave1_";
// String name = "chicken_";
// String name = "books_";
// Successful Failures
// String name = "triflowers_";
// Failures
// String name = "rock_leaves_";
// String name = "minecraft_distant_";
// String name = "rockview_";
// String name = "pebbles_";
// String name = "skull_";
// String name = "turkey_";
BufferedImage buff01 = UtilImageIO.loadImageNotNull(UtilIO.pathExample("triple/" + name + "01.jpg"));
BufferedImage buff02 = UtilImageIO.loadImageNotNull(UtilIO.pathExample("triple/" + name + "02.jpg"));
Planar<GrayU8> color01 = ConvertBufferedImage.convertFrom(buff01, true, ImageType.pl(3, GrayU8.class));
Planar<GrayU8> color02 = ConvertBufferedImage.convertFrom(buff02, true, ImageType.pl(3, GrayU8.class));
GrayU8 image01 = ConvertImage.average(color01, null);
GrayU8 image02 = ConvertImage.average(color02, null);
// Find a set of point feature matches
List<AssociatedPair> matches = ExampleComputeFundamentalMatrix.computeMatches(buff01, buff02);
// Prune matches using the epipolar constraint. use a low threshold to prune more false matches
var inliers = new ArrayList<AssociatedPair>();
DMatrixRMaj F = ExampleComputeFundamentalMatrix.robustFundamental(matches, inliers, 0.1);
// Perform self calibration using the projective view extracted from F
// Note that P1 = [I|0]
System.out.println("Self calibration");
DMatrixRMaj P2 = MultiViewOps.fundamentalToProjective(F);
// Take a crude guess at the intrinsic parameters. Bundle adjustment will fix this later.
int width = buff01.getWidth(), height = buff02.getHeight();
double fx = width / 2;
double fy = fx;
double cx = width / 2;
double cy = height / 2;
// Compute a transform from projective to metric by assuming we know the camera's calibration
var estimateV = new EstimatePlaneAtInfinityGivenK();
estimateV.setCamera1(fx, fy, 0, cx, cy);
estimateV.setCamera2(fx, fy, 0, cx, cy);
// plane at infinity
var v = new Vector3D_F64();
if (!estimateV.estimatePlaneAtInfinity(P2, v))
throw new RuntimeException("Failed!");
DMatrixRMaj K = PerspectiveOps.pinholeToMatrix(fx, fy, 0, cx, cy);
DMatrixRMaj H = MultiViewOps.createProjectiveToMetric(K, v.x, v.y, v.z, 1, null);
var P2m = new DMatrixRMaj(3, 4);
CommonOps_DDRM.mult(P2, H, P2m);
// Decompose and get the initial estimate for translation
var tmp = new DMatrixRMaj(3, 3);
var view1_to_view2 = new Se3_F64();
MultiViewOps.decomposeMetricCamera(P2m, tmp, view1_to_view2);
// ------------------------- Setting up bundle adjustment
// bundle adjustment will provide a more refined and accurate estimate of these parameters
System.out.println("Configuring bundle adjustment");
// Construct bundle adjustment data structure
var structure = new SceneStructureMetric(false);
var observations = new SceneObservations();
// We will assume that the camera has fixed intrinsic parameters
structure.initialize(1, 2, inliers.size());
observations.initialize(2);
var bp = new BundlePinholeSimplified();
bp.f = fx;
structure.setCamera(0, false, bp);
// The first view is the world coordinate system
structure.setView(0, 0, true, new Se3_F64());
// Second view was estimated previously
structure.setView(1, 0, false, view1_to_view2);
for (int i = 0; i < inliers.size(); i++) {
AssociatedPair t = inliers.get(i);
// substract out the camera center from points. This allows a simple camera model to be used and
// errors in the this coordinate tend to be non-fatal
observations.getView(0).add(i, (float) (t.p1.x - cx), (float) (t.p1.y - cy));
observations.getView(1).add(i, (float) (t.p2.x - cx), (float) (t.p2.y - cy));
// each point is visible in both of the views
structure.connectPointToView(i, 0);
structure.connectPointToView(i, 1);
}
// initial location of points is found through triangulation
MultiViewOps.triangulatePoints(structure, observations);
// ------------------ Running Bundle Adjustment
System.out.println("Performing bundle adjustment");
var configLM = new ConfigLevenbergMarquardt();
configLM.dampeningInitial = 1e-3;
configLM.hessianScaling = false;
var configSBA = new ConfigBundleAdjustment();
configSBA.configOptimizer = configLM;
// Create and configure the bundle adjustment solver
BundleAdjustment<SceneStructureMetric> bundleAdjustment = FactoryMultiView.bundleSparseMetric(configSBA);
// prints out useful debugging information that lets you know how well it's converging
bundleAdjustment.setVerbose(System.out, null);
// Specifies convergence criteria
bundleAdjustment.configure(1e-6, 1e-6, 100);
// Scaling improve accuracy of numerical calculations
var bundleScale = new ScaleSceneStructure();
bundleScale.applyScale(structure, observations);
bundleAdjustment.setParameters(structure, observations);
bundleAdjustment.optimize(structure);
// Sometimes pruning outliers help improve the solution. In the stereo case the errors are likely
// to already fatal
var pruner = new PruneStructureFromSceneMetric(structure, observations);
pruner.pruneObservationsByErrorRank(0.85);
pruner.prunePoints(1);
bundleAdjustment.setParameters(structure, observations);
bundleAdjustment.optimize(structure);
bundleScale.undoScale(structure, observations);
System.out.println("\nCamera");
for (int i = 0; i < structure.cameras.size; i++) {
System.out.println(structure.cameras.data[i].getModel().toString());
}
System.out.println("\n\nworldToView");
for (int i = 0; i < structure.views.size; i++) {
System.out.println(structure.getParentToView(i).toString());
}
// display the inlier matches found using the robust estimator
System.out.println("\n\nComputing Stereo Disparity");
BundlePinholeSimplified cp = structure.getCameras().get(0).getModel();
var intrinsic = new CameraPinholeBrown();
intrinsic.fsetK(cp.f, cp.f, 0, cx, cy, width, height);
intrinsic.fsetRadial(cp.k1, cp.k2);
Se3_F64 leftToRight = structure.getParentToView(1);
computeStereoCloud(image01, image02, color01, color02, intrinsic, intrinsic, leftToRight, 0, 250);
}
use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class ExampleTrifocalStereoUncalibrated method main.
public static void main(String[] args) {
String name = "rock_leaves_";
// String name = "mono_wall_";
// String name = "minecraft_cave1_";
// String name = "minecraft_distant_";
// String name = "bobcats_";
// String name = "chicken_";
// String name = "turkey_";
// String name = "rockview_";
// String name = "pebbles_";
// String name = "books_";
// String name = "skull_";
// String name = "triflowers_";
BufferedImage buff01 = UtilImageIO.loadImageNotNull(UtilIO.pathExample("triple/" + name + "01.jpg"));
BufferedImage buff02 = UtilImageIO.loadImageNotNull(UtilIO.pathExample("triple/" + name + "02.jpg"));
BufferedImage buff03 = UtilImageIO.loadImageNotNull(UtilIO.pathExample("triple/" + name + "03.jpg"));
Planar<GrayU8> color01 = ConvertBufferedImage.convertFrom(buff01, true, ImageType.pl(3, GrayU8.class));
Planar<GrayU8> color02 = ConvertBufferedImage.convertFrom(buff02, true, ImageType.pl(3, GrayU8.class));
Planar<GrayU8> color03 = ConvertBufferedImage.convertFrom(buff03, true, ImageType.pl(3, GrayU8.class));
GrayU8 image01 = ConvertImage.average(color01, null);
GrayU8 image02 = ConvertImage.average(color02, null);
GrayU8 image03 = ConvertImage.average(color03, null);
// using SURF features. Robust and fairly fast to compute
DetectDescribePoint<GrayU8, TupleDesc_F64> detDesc = FactoryDetectDescribe.surfStable(new ConfigFastHessian(0, 4, 1000, 1, 9, 4, 2), null, null, GrayU8.class);
// Associate features across all three views using previous example code
var associateThree = new ExampleAssociateThreeView();
associateThree.initialize(detDesc);
associateThree.detectFeatures(image01, 0);
associateThree.detectFeatures(image02, 1);
associateThree.detectFeatures(image03, 2);
System.out.println("features01.size = " + associateThree.features01.size);
System.out.println("features02.size = " + associateThree.features02.size);
System.out.println("features03.size = " + associateThree.features03.size);
int width = image01.width, height = image01.height;
System.out.println("Image Shape " + width + " x " + height);
double cx = width / 2;
double cy = height / 2;
// The self calibration step requires that the image coordinate system be in the image center
associateThree.locations01.forEach(p -> p.setTo(p.x - cx, p.y - cy));
associateThree.locations02.forEach(p -> p.setTo(p.x - cx, p.y - cy));
associateThree.locations03.forEach(p -> p.setTo(p.x - cx, p.y - cy));
// Converting data formats for the found features into what can be processed by SFM algorithms
// Notice how the image center is subtracted from the coordinates? In many cases a principle point
// of zero is assumed. This is a reasonable assumption in almost all modern cameras. Errors in
// the principle point tend to materialize as translations and are non fatal.
// Associate features in the three views using image information alone
DogArray<AssociatedTripleIndex> associatedIdx = associateThree.threeViewPairwiseAssociate();
// Convert the matched indexes into AssociatedTriple which contain the actual pixel coordinates
var associated = new DogArray<>(AssociatedTriple::new);
associatedIdx.forEach(p -> associated.grow().setTo(associateThree.locations01.get(p.a), associateThree.locations02.get(p.b), associateThree.locations03.get(p.c)));
System.out.println("Total Matched Triples = " + associated.size);
var model = new TrifocalTensor();
List<AssociatedTriple> inliers = ExampleComputeTrifocalTensor.computeTrifocal(associated, model);
System.out.println("Remaining after RANSAC " + inliers.size());
// Show remaining associations from RANSAC
var triplePanel = new AssociatedTriplePanel();
triplePanel.setPixelOffset(cx, cy);
triplePanel.setImages(buff01, buff02, buff03);
triplePanel.setAssociation(inliers);
ShowImages.showWindow(triplePanel, "Associations", true);
// estimate using all the inliers
// No need to re-scale the input because the estimator automatically adjusts the input on its own
var configTri = new ConfigTrifocal();
configTri.which = EnumTrifocal.ALGEBRAIC_7;
configTri.converge.maxIterations = 100;
Estimate1ofTrifocalTensor trifocalEstimator = FactoryMultiView.trifocal_1(configTri);
if (!trifocalEstimator.process(inliers, model))
throw new RuntimeException("Estimator failed");
model.print();
DMatrixRMaj P1 = CommonOps_DDRM.identity(3, 4);
DMatrixRMaj P2 = new DMatrixRMaj(3, 4);
DMatrixRMaj P3 = new DMatrixRMaj(3, 4);
MultiViewOps.trifocalToCameraMatrices(model, P2, P3);
// Most of the time this refinement step makes little difference, but in some edges cases it appears
// to help convergence
System.out.println("Refining projective camera matrices");
RefineThreeViewProjective refineP23 = FactoryMultiView.threeViewRefine(null);
if (!refineP23.process(inliers, P2, P3, P2, P3))
throw new RuntimeException("Can't refine P2 and P3!");
var selfcalib = new SelfCalibrationLinearDualQuadratic(1.0);
selfcalib.addCameraMatrix(P1);
selfcalib.addCameraMatrix(P2);
selfcalib.addCameraMatrix(P3);
var listPinhole = new ArrayList<CameraPinhole>();
GeometricResult result = selfcalib.solve();
if (GeometricResult.SOLVE_FAILED != result) {
for (int i = 0; i < 3; i++) {
Intrinsic c = selfcalib.getIntrinsics().get(i);
CameraPinhole p = new CameraPinhole(c.fx, c.fy, 0, 0, 0, width, height);
listPinhole.add(p);
}
} else {
System.out.println("Self calibration failed!");
for (int i = 0; i < 3; i++) {
CameraPinhole p = new CameraPinhole(width / 2, width / 2, 0, 0, 0, width, height);
listPinhole.add(p);
}
}
// parameter
for (int i = 0; i < 3; i++) {
CameraPinhole r = listPinhole.get(i);
System.out.println("fx=" + r.fx + " fy=" + r.fy + " skew=" + r.skew);
}
System.out.println("Projective to metric");
// convert camera matrix from projective to metric
// storage for rectifying homography
var H = new DMatrixRMaj(4, 4);
if (!MultiViewOps.absoluteQuadraticToH(selfcalib.getQ(), H))
throw new RuntimeException("Projective to metric failed");
var K = new DMatrixRMaj(3, 3);
var worldToView = new ArrayList<Se3_F64>();
for (int i = 0; i < 3; i++) {
worldToView.add(new Se3_F64());
}
// ignore K since we already have that
MultiViewOps.projectiveToMetric(P1, H, worldToView.get(0), K);
MultiViewOps.projectiveToMetric(P2, H, worldToView.get(1), K);
MultiViewOps.projectiveToMetric(P3, H, worldToView.get(2), K);
// scale is arbitrary. Set max translation to 1
adjustTranslationScale(worldToView);
// Construct bundle adjustment data structure
var structure = new SceneStructureMetric(false);
structure.initialize(3, 3, inliers.size());
var observations = new SceneObservations();
observations.initialize(3);
for (int i = 0; i < listPinhole.size(); i++) {
BundlePinholeSimplified bp = new BundlePinholeSimplified();
bp.f = listPinhole.get(i).fx;
structure.setCamera(i, false, bp);
structure.setView(i, i, i == 0, worldToView.get(i));
}
for (int i = 0; i < inliers.size(); i++) {
AssociatedTriple t = inliers.get(i);
observations.getView(0).add(i, (float) t.p1.x, (float) t.p1.y);
observations.getView(1).add(i, (float) t.p2.x, (float) t.p2.y);
observations.getView(2).add(i, (float) t.p3.x, (float) t.p3.y);
structure.connectPointToView(i, 0);
structure.connectPointToView(i, 1);
structure.connectPointToView(i, 2);
}
// Initial estimate for point 3D locations
triangulatePoints(structure, observations);
ConfigLevenbergMarquardt configLM = new ConfigLevenbergMarquardt();
configLM.dampeningInitial = 1e-3;
configLM.hessianScaling = false;
ConfigBundleAdjustment configSBA = new ConfigBundleAdjustment();
configSBA.configOptimizer = configLM;
// Create and configure the bundle adjustment solver
BundleAdjustment<SceneStructureMetric> bundleAdjustment = FactoryMultiView.bundleSparseMetric(configSBA);
// prints out useful debugging information that lets you know how well it's converging
// bundleAdjustment.setVerbose(System.out,0);
// convergence criteria
bundleAdjustment.configure(1e-6, 1e-6, 100);
bundleAdjustment.setParameters(structure, observations);
bundleAdjustment.optimize(structure);
// See if the solution is physically possible. If not fix and run bundle adjustment again
checkBehindCamera(structure, observations, bundleAdjustment);
// It's very difficult to find the best solution due to the number of local minimum. In the three view
// case it's often the problem that a small translation is virtually identical to a small rotation.
// Convergence can be improved by considering that possibility
// Now that we have a decent solution, prune the worst outliers to improve the fit quality even more
var pruner = new PruneStructureFromSceneMetric(structure, observations);
pruner.pruneObservationsByErrorRank(0.7);
pruner.pruneViews(10);
pruner.pruneUnusedMotions();
pruner.prunePoints(1);
bundleAdjustment.setParameters(structure, observations);
bundleAdjustment.optimize(structure);
System.out.println("Final Views");
for (int i = 0; i < 3; i++) {
BundlePinholeSimplified cp = structure.getCameras().get(i).getModel();
Vector3D_F64 T = structure.getParentToView(i).T;
System.out.printf("[ %d ] f = %5.1f T=%s\n", i, cp.f, T.toString());
}
System.out.println("\n\nComputing Stereo Disparity");
BundlePinholeSimplified cp = structure.getCameras().get(0).getModel();
var intrinsic01 = new CameraPinholeBrown();
intrinsic01.fsetK(cp.f, cp.f, 0, cx, cy, width, height);
intrinsic01.fsetRadial(cp.k1, cp.k2);
cp = structure.getCameras().get(1).getModel();
var intrinsic02 = new CameraPinholeBrown();
intrinsic02.fsetK(cp.f, cp.f, 0, cx, cy, width, height);
intrinsic02.fsetRadial(cp.k1, cp.k2);
Se3_F64 leftToRight = structure.getParentToView(1);
// TODO dynamic max disparity
computeStereoCloud(image01, image02, color01, color02, intrinsic01, intrinsic02, leftToRight, 0, 250);
}
use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class ExampleTrifocalStereoUncalibrated method computeStereoCloud.
public static void computeStereoCloud(GrayU8 distortedLeft, GrayU8 distortedRight, Planar<GrayU8> colorLeft, Planar<GrayU8> colorRight, CameraPinholeBrown intrinsicLeft, CameraPinholeBrown intrinsicRight, Se3_F64 leftToRight, int minDisparity, int rangeDisparity) {
// drawInliers(origLeft, origRight, intrinsic, inliers);
// Rectify and remove lens distortion for stereo processing
var rectifiedK = new DMatrixRMaj(3, 3);
var rectifiedR = new DMatrixRMaj(3, 3);
// rectify a colored image
Planar<GrayU8> rectColorLeft = colorLeft.createSameShape();
Planar<GrayU8> rectColorRight = colorLeft.createSameShape();
GrayU8 rectMask = new GrayU8(colorLeft.width, colorLeft.height);
rectifyImages(colorLeft, colorRight, leftToRight, intrinsicLeft, intrinsicRight, rectColorLeft, rectColorRight, rectMask, rectifiedK, rectifiedR);
if (rectifiedK.get(0, 0) < 0)
throw new RuntimeException("Egads");
System.out.println("Rectified K");
rectifiedK.print();
System.out.println("Rectified R");
rectifiedR.print();
GrayU8 rectifiedLeft = distortedLeft.createSameShape();
GrayU8 rectifiedRight = distortedRight.createSameShape();
ConvertImage.average(rectColorLeft, rectifiedLeft);
ConvertImage.average(rectColorRight, rectifiedRight);
// compute disparity
var config = new ConfigDisparityBMBest5();
config.errorType = DisparityError.CENSUS;
config.disparityMin = minDisparity;
config.disparityRange = rangeDisparity;
config.subpixel = true;
config.regionRadiusX = config.regionRadiusY = 6;
config.validateRtoL = 1;
config.texture = 0.2;
StereoDisparity<GrayU8, GrayF32> disparityAlg = FactoryStereoDisparity.blockMatchBest5(config, GrayU8.class, GrayF32.class);
// process and return the results
disparityAlg.process(rectifiedLeft, rectifiedRight);
GrayF32 disparity = disparityAlg.getDisparity();
RectifyImageOps.applyMask(disparity, rectMask, 0);
// show results
BufferedImage visualized = VisualizeImageData.disparity(disparity, null, rangeDisparity, 0);
BufferedImage outLeft = ConvertBufferedImage.convertTo(rectColorLeft, null, true);
BufferedImage outRight = ConvertBufferedImage.convertTo(rectColorRight, null, true);
ShowImages.showWindow(new RectifiedPairPanel(true, outLeft, outRight), "Rectification", true);
ShowImages.showWindow(visualized, "Disparity", true);
showPointCloud(disparity, outLeft, leftToRight, rectifiedK, rectifiedR, minDisparity, rangeDisparity);
}
use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class ExampleRectifyCalibratedStereo method main.
public static void main(String[] args) {
String dir = UtilIO.pathExample("calibration/stereo/Bumblebee2_Chess/");
StereoParameters param = CalibrationIO.load(new File(dir, "stereo.yaml"));
// load images
BufferedImage origLeft = UtilImageIO.loadImage(dir, "left05.jpg");
BufferedImage origRight = UtilImageIO.loadImage(dir, "right05.jpg");
// distorted images
Planar<GrayF32> distLeft = ConvertBufferedImage.convertFromPlanar(origLeft, null, true, GrayF32.class);
Planar<GrayF32> distRight = ConvertBufferedImage.convertFromPlanar(origRight, null, true, GrayF32.class);
// storage for undistorted + rectified images
Planar<GrayF32> rectLeft = distLeft.createSameShape();
Planar<GrayF32> rectRight = distRight.createSameShape();
// Compute rectification
RectifyCalibrated rectifyAlg = RectifyImageOps.createCalibrated();
Se3_F64 leftToRight = param.getRightToLeft().invert(null);
// original camera calibration matrices
DMatrixRMaj K1 = PerspectiveOps.pinholeToMatrix(param.getLeft(), (DMatrixRMaj) null);
DMatrixRMaj K2 = PerspectiveOps.pinholeToMatrix(param.getRight(), (DMatrixRMaj) null);
rectifyAlg.process(K1, new Se3_F64(), K2, leftToRight);
// rectification matrix for each image
DMatrixRMaj rect1 = rectifyAlg.getUndistToRectPixels1();
DMatrixRMaj rect2 = rectifyAlg.getUndistToRectPixels2();
// New calibration matrix,
// Both cameras have the same one after rectification.
DMatrixRMaj rectK = rectifyAlg.getCalibrationMatrix();
// Adjust the rectification to make the view area more useful
RectifyImageOps.fullViewLeft(param.left, rect1, rect2, rectK, null);
// RectifyImageOps.allInsideLeft(param.left, rect1, rect2, rectK, null);
// undistorted and rectify images
// TODO simplify code some how
var rect1_F32 = new FMatrixRMaj(3, 3);
var rect2_F32 = new FMatrixRMaj(3, 3);
ConvertMatrixData.convert(rect1, rect1_F32);
ConvertMatrixData.convert(rect2, rect2_F32);
ImageDistort<Planar<GrayF32>, Planar<GrayF32>> rectifyImageLeft = RectifyDistortImageOps.rectifyImage(param.getLeft(), rect1_F32, BorderType.SKIP, distLeft.getImageType());
ImageDistort<Planar<GrayF32>, Planar<GrayF32>> rectifyImageRight = RectifyDistortImageOps.rectifyImage(param.getRight(), rect2_F32, BorderType.SKIP, distRight.getImageType());
rectifyImageLeft.apply(distLeft, rectLeft);
rectifyImageRight.apply(distRight, rectRight);
// convert for output
BufferedImage outLeft = ConvertBufferedImage.convertTo(rectLeft, null, true);
BufferedImage outRight = ConvertBufferedImage.convertTo(rectRight, null, true);
// show results and draw a horizontal line where the user clicks to see rectification easier
var panel = new ListDisplayPanel();
panel.addItem(new RectifiedPairPanel(true, origLeft, origRight), "Original");
panel.addItem(new RectifiedPairPanel(true, outLeft, outRight), "Rectified");
ShowImages.showWindow(panel, "Stereo Rectification Calibrated", true);
}
use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class ExampleStereoDisparity3D method computeAndShowCloud.
/**
* Given already computed rectified images and known stereo parameters, create a 3D cloud and visualize it
*/
public static JComponent computeAndShowCloud(StereoParameters param, GrayU8 rectLeft, RectifyCalibrated rectAlg, GrayF32 disparity) {
// The point cloud will be in the left cameras reference frame
DMatrixRMaj rectK = rectAlg.getCalibrationMatrix();
DMatrixRMaj rectR = rectAlg.getRectifiedRotation();
// Put all the disparity parameters into one data structure
var disparityParameters = new DisparityParameters();
disparityParameters.baseline = param.getBaseline();
disparityParameters.disparityMin = disparityMin;
disparityParameters.disparityRange = disparityRange;
disparityParameters.rotateToRectified.setTo(rectR);
PerspectiveOps.matrixToPinhole(rectK, rectLeft.width, rectLeft.height, disparityParameters.pinhole);
// Iterate through each pixel in disparity image and compute its 3D coordinate
PointCloudViewer pcv = VisualizeData.createPointCloudViewer();
pcv.setTranslationStep(param.getBaseline() * 0.1);
// Next create the 3D point cloud. The function below will handle conversion from disparity into
// XYZ, then transform from rectified into normal camera coordinate system. Feel free to glance at the
// source code to understand exactly what it's doing
MultiViewStereoOps.disparityToCloud(disparity, disparityParameters, null, (pixX, pixY, x, y, z) -> {
// look up the gray value. Then convert it into RGB
int v = rectLeft.unsafe_get(pixX, pixY);
pcv.addPoint(x, y, z, v << 16 | v << 8 | v);
});
// Configure the display
// pcv.setFog(true);
// pcv.setClipDistance(baseline*45);
// PeriodicColorizer colorizer = new TwoAxisRgbPlane.Z_XY(4.0);
// colorizer.setPeriod(baseline*5);
// pcv.setColorizer(colorizer); // sometimes pseudo color can be easier to view
pcv.setDotSize(1);
pcv.setCameraHFov(PerspectiveOps.computeHFov(param.left));
// pcv.setCameraToWorld(cameraToWorld);
JComponent viewer = pcv.getComponent();
viewer.setPreferredSize(new Dimension(600, 600 * param.left.height / param.left.width));
return viewer;
}
Aggregations