Search in sources :

Example 31 with DogArray

use of org.ddogleg.struct.DogArray in project BoofCV by lessthanoptimal.

the class TestCreateCloudFromDisparityImages method oneView.

/**
 * Adds a view and compares its cloud
 */
@Test
void oneView() {
    var disparity = new GrayF32(width, height);
    GrayU8 mask = disparity.createSameShape(GrayU8.class);
    ImageMiscOps.fillUniform(disparity, rand, 0, disparityRange - 1.0f);
    // set one pixels to be invalid as a test
    disparity.set(20, 30, disparityRange);
    // mask out another arbitrary pixel
    mask.set(12, 19, 1);
    var alg = new CreateCloudFromDisparityImages();
    assertEquals(0, alg.addDisparity(disparity, mask, world_to_view, parameters, n_to_p, p_to_n));
    // Only the two pixels marked as invalid should be excluded
    assertEquals(width * height - 2, alg.cloud.size);
    DogArray<Point3D_F64> expected = new DogArray<>(Point3D_F64::new);
    MultiViewStereoOps.disparityToCloud(disparity, mask, parameters, (pixX, pixY, x, y, z) -> expected.grow().setTo(x, y, z));
    // processed in a row-major order
    for (int i = 0; i < expected.size; i++) {
        Point3D_F64 e = expected.get(i);
        SePointOps_F64.transformReverse(world_to_view, e, e);
        assertEquals(0.0, e.distance(alg.cloud.get(i)), UtilEjml.TEST_F64);
    }
}
Also used : Point3D_F64(georegression.struct.point.Point3D_F64) GrayF32(boofcv.struct.image.GrayF32) GrayU8(boofcv.struct.image.GrayU8) DogArray(org.ddogleg.struct.DogArray) Test(org.junit.jupiter.api.Test)

Example 32 with DogArray

use of org.ddogleg.struct.DogArray in project BoofCV by lessthanoptimal.

the class TestBinaryImageOps method labelToClusters.

@Test
void labelToClusters() {
    DogArray<Point2D_I32> queue = new DogArray<>(16, Point2D_I32::new);
    GrayS32 labels = new GrayS32(4, 4);
    labels.data = new int[] { 1, 2, 3, 4, 5, 0, 2, 2, 3, 4, 4, 4, 0, 0, 0, 0 };
    List<List<Point2D_I32>> ret = BinaryImageOps.labelToClusters(labels, 5, queue);
    assertEquals(5, ret.size());
    assertEquals(1, ret.get(0).size());
    assertEquals(3, ret.get(1).size());
    assertEquals(2, ret.get(2).size());
    assertEquals(4, ret.get(3).size());
    assertEquals(1, ret.get(4).size());
}
Also used : Point2D_I32(georegression.struct.point.Point2D_I32) List(java.util.List) DogArray(org.ddogleg.struct.DogArray) GrayS32(boofcv.struct.image.GrayS32) Test(org.junit.jupiter.api.Test)

Example 33 with DogArray

use of org.ddogleg.struct.DogArray in project BoofCV by lessthanoptimal.

the class ExampleTrifocalStereoUncalibrated method main.

public static void main(String[] args) {
    String name = "rock_leaves_";
    // String name = "mono_wall_";
    // String name = "minecraft_cave1_";
    // String name = "minecraft_distant_";
    // String name = "bobcats_";
    // String name = "chicken_";
    // String name = "turkey_";
    // String name = "rockview_";
    // String name = "pebbles_";
    // String name = "books_";
    // String name = "skull_";
    // String name = "triflowers_";
    BufferedImage buff01 = UtilImageIO.loadImageNotNull(UtilIO.pathExample("triple/" + name + "01.jpg"));
    BufferedImage buff02 = UtilImageIO.loadImageNotNull(UtilIO.pathExample("triple/" + name + "02.jpg"));
    BufferedImage buff03 = UtilImageIO.loadImageNotNull(UtilIO.pathExample("triple/" + name + "03.jpg"));
    Planar<GrayU8> color01 = ConvertBufferedImage.convertFrom(buff01, true, ImageType.pl(3, GrayU8.class));
    Planar<GrayU8> color02 = ConvertBufferedImage.convertFrom(buff02, true, ImageType.pl(3, GrayU8.class));
    Planar<GrayU8> color03 = ConvertBufferedImage.convertFrom(buff03, true, ImageType.pl(3, GrayU8.class));
    GrayU8 image01 = ConvertImage.average(color01, null);
    GrayU8 image02 = ConvertImage.average(color02, null);
    GrayU8 image03 = ConvertImage.average(color03, null);
    // using SURF features. Robust and fairly fast to compute
    DetectDescribePoint<GrayU8, TupleDesc_F64> detDesc = FactoryDetectDescribe.surfStable(new ConfigFastHessian(0, 4, 1000, 1, 9, 4, 2), null, null, GrayU8.class);
    // Associate features across all three views using previous example code
    var associateThree = new ExampleAssociateThreeView();
    associateThree.initialize(detDesc);
    associateThree.detectFeatures(image01, 0);
    associateThree.detectFeatures(image02, 1);
    associateThree.detectFeatures(image03, 2);
    System.out.println("features01.size = " + associateThree.features01.size);
    System.out.println("features02.size = " + associateThree.features02.size);
    System.out.println("features03.size = " + associateThree.features03.size);
    int width = image01.width, height = image01.height;
    System.out.println("Image Shape " + width + " x " + height);
    double cx = width / 2;
    double cy = height / 2;
    // The self calibration step requires that the image coordinate system be in the image center
    associateThree.locations01.forEach(p -> p.setTo(p.x - cx, p.y - cy));
    associateThree.locations02.forEach(p -> p.setTo(p.x - cx, p.y - cy));
    associateThree.locations03.forEach(p -> p.setTo(p.x - cx, p.y - cy));
    // Converting data formats for the found features into what can be processed by SFM algorithms
    // Notice how the image center is subtracted from the coordinates? In many cases a principle point
    // of zero is assumed. This is a reasonable assumption in almost all modern cameras. Errors in
    // the principle point tend to materialize as translations and are non fatal.
    // Associate features in the three views using image information alone
    DogArray<AssociatedTripleIndex> associatedIdx = associateThree.threeViewPairwiseAssociate();
    // Convert the matched indexes into AssociatedTriple which contain the actual pixel coordinates
    var associated = new DogArray<>(AssociatedTriple::new);
    associatedIdx.forEach(p -> associated.grow().setTo(associateThree.locations01.get(p.a), associateThree.locations02.get(p.b), associateThree.locations03.get(p.c)));
    System.out.println("Total Matched Triples = " + associated.size);
    var model = new TrifocalTensor();
    List<AssociatedTriple> inliers = ExampleComputeTrifocalTensor.computeTrifocal(associated, model);
    System.out.println("Remaining after RANSAC " + inliers.size());
    // Show remaining associations from RANSAC
    var triplePanel = new AssociatedTriplePanel();
    triplePanel.setPixelOffset(cx, cy);
    triplePanel.setImages(buff01, buff02, buff03);
    triplePanel.setAssociation(inliers);
    ShowImages.showWindow(triplePanel, "Associations", true);
    // estimate using all the inliers
    // No need to re-scale the input because the estimator automatically adjusts the input on its own
    var configTri = new ConfigTrifocal();
    configTri.which = EnumTrifocal.ALGEBRAIC_7;
    configTri.converge.maxIterations = 100;
    Estimate1ofTrifocalTensor trifocalEstimator = FactoryMultiView.trifocal_1(configTri);
    if (!trifocalEstimator.process(inliers, model))
        throw new RuntimeException("Estimator failed");
    model.print();
    DMatrixRMaj P1 = CommonOps_DDRM.identity(3, 4);
    DMatrixRMaj P2 = new DMatrixRMaj(3, 4);
    DMatrixRMaj P3 = new DMatrixRMaj(3, 4);
    MultiViewOps.trifocalToCameraMatrices(model, P2, P3);
    // Most of the time this refinement step makes little difference, but in some edges cases it appears
    // to help convergence
    System.out.println("Refining projective camera matrices");
    RefineThreeViewProjective refineP23 = FactoryMultiView.threeViewRefine(null);
    if (!refineP23.process(inliers, P2, P3, P2, P3))
        throw new RuntimeException("Can't refine P2 and P3!");
    var selfcalib = new SelfCalibrationLinearDualQuadratic(1.0);
    selfcalib.addCameraMatrix(P1);
    selfcalib.addCameraMatrix(P2);
    selfcalib.addCameraMatrix(P3);
    var listPinhole = new ArrayList<CameraPinhole>();
    GeometricResult result = selfcalib.solve();
    if (GeometricResult.SOLVE_FAILED != result) {
        for (int i = 0; i < 3; i++) {
            Intrinsic c = selfcalib.getIntrinsics().get(i);
            CameraPinhole p = new CameraPinhole(c.fx, c.fy, 0, 0, 0, width, height);
            listPinhole.add(p);
        }
    } else {
        System.out.println("Self calibration failed!");
        for (int i = 0; i < 3; i++) {
            CameraPinhole p = new CameraPinhole(width / 2, width / 2, 0, 0, 0, width, height);
            listPinhole.add(p);
        }
    }
    // parameter
    for (int i = 0; i < 3; i++) {
        CameraPinhole r = listPinhole.get(i);
        System.out.println("fx=" + r.fx + " fy=" + r.fy + " skew=" + r.skew);
    }
    System.out.println("Projective to metric");
    // convert camera matrix from projective to metric
    // storage for rectifying homography
    var H = new DMatrixRMaj(4, 4);
    if (!MultiViewOps.absoluteQuadraticToH(selfcalib.getQ(), H))
        throw new RuntimeException("Projective to metric failed");
    var K = new DMatrixRMaj(3, 3);
    var worldToView = new ArrayList<Se3_F64>();
    for (int i = 0; i < 3; i++) {
        worldToView.add(new Se3_F64());
    }
    // ignore K since we already have that
    MultiViewOps.projectiveToMetric(P1, H, worldToView.get(0), K);
    MultiViewOps.projectiveToMetric(P2, H, worldToView.get(1), K);
    MultiViewOps.projectiveToMetric(P3, H, worldToView.get(2), K);
    // scale is arbitrary. Set max translation to 1
    adjustTranslationScale(worldToView);
    // Construct bundle adjustment data structure
    var structure = new SceneStructureMetric(false);
    structure.initialize(3, 3, inliers.size());
    var observations = new SceneObservations();
    observations.initialize(3);
    for (int i = 0; i < listPinhole.size(); i++) {
        BundlePinholeSimplified bp = new BundlePinholeSimplified();
        bp.f = listPinhole.get(i).fx;
        structure.setCamera(i, false, bp);
        structure.setView(i, i, i == 0, worldToView.get(i));
    }
    for (int i = 0; i < inliers.size(); i++) {
        AssociatedTriple t = inliers.get(i);
        observations.getView(0).add(i, (float) t.p1.x, (float) t.p1.y);
        observations.getView(1).add(i, (float) t.p2.x, (float) t.p2.y);
        observations.getView(2).add(i, (float) t.p3.x, (float) t.p3.y);
        structure.connectPointToView(i, 0);
        structure.connectPointToView(i, 1);
        structure.connectPointToView(i, 2);
    }
    // Initial estimate for point 3D locations
    triangulatePoints(structure, observations);
    ConfigLevenbergMarquardt configLM = new ConfigLevenbergMarquardt();
    configLM.dampeningInitial = 1e-3;
    configLM.hessianScaling = false;
    ConfigBundleAdjustment configSBA = new ConfigBundleAdjustment();
    configSBA.configOptimizer = configLM;
    // Create and configure the bundle adjustment solver
    BundleAdjustment<SceneStructureMetric> bundleAdjustment = FactoryMultiView.bundleSparseMetric(configSBA);
    // prints out useful debugging information that lets you know how well it's converging
    // bundleAdjustment.setVerbose(System.out,0);
    // convergence criteria
    bundleAdjustment.configure(1e-6, 1e-6, 100);
    bundleAdjustment.setParameters(structure, observations);
    bundleAdjustment.optimize(structure);
    // See if the solution is physically possible. If not fix and run bundle adjustment again
    checkBehindCamera(structure, observations, bundleAdjustment);
    // It's very difficult to find the best solution due to the number of local minimum. In the three view
    // case it's often the problem that a small translation is virtually identical to a small rotation.
    // Convergence can be improved by considering that possibility
    // Now that we have a decent solution, prune the worst outliers to improve the fit quality even more
    var pruner = new PruneStructureFromSceneMetric(structure, observations);
    pruner.pruneObservationsByErrorRank(0.7);
    pruner.pruneViews(10);
    pruner.pruneUnusedMotions();
    pruner.prunePoints(1);
    bundleAdjustment.setParameters(structure, observations);
    bundleAdjustment.optimize(structure);
    System.out.println("Final Views");
    for (int i = 0; i < 3; i++) {
        BundlePinholeSimplified cp = structure.getCameras().get(i).getModel();
        Vector3D_F64 T = structure.getParentToView(i).T;
        System.out.printf("[ %d ] f = %5.1f T=%s\n", i, cp.f, T.toString());
    }
    System.out.println("\n\nComputing Stereo Disparity");
    BundlePinholeSimplified cp = structure.getCameras().get(0).getModel();
    var intrinsic01 = new CameraPinholeBrown();
    intrinsic01.fsetK(cp.f, cp.f, 0, cx, cy, width, height);
    intrinsic01.fsetRadial(cp.k1, cp.k2);
    cp = structure.getCameras().get(1).getModel();
    var intrinsic02 = new CameraPinholeBrown();
    intrinsic02.fsetK(cp.f, cp.f, 0, cx, cy, width, height);
    intrinsic02.fsetRadial(cp.k1, cp.k2);
    Se3_F64 leftToRight = structure.getParentToView(1);
    // TODO dynamic max disparity
    computeStereoCloud(image01, image02, color01, color02, intrinsic01, intrinsic02, leftToRight, 0, 250);
}
Also used : ConfigFastHessian(boofcv.abst.feature.detect.interest.ConfigFastHessian) ConfigTrifocal(boofcv.factory.geo.ConfigTrifocal) Estimate1ofTrifocalTensor(boofcv.abst.geo.Estimate1ofTrifocalTensor) TrifocalTensor(boofcv.struct.geo.TrifocalTensor) ExampleComputeTrifocalTensor(boofcv.examples.sfm.ExampleComputeTrifocalTensor) CameraPinholeBrown(boofcv.struct.calib.CameraPinholeBrown) BundlePinholeSimplified(boofcv.alg.geo.bundle.cameras.BundlePinholeSimplified) DMatrixRMaj(org.ejml.data.DMatrixRMaj) ArrayList(java.util.ArrayList) CameraPinhole(boofcv.struct.calib.CameraPinhole) BufferedImage(java.awt.image.BufferedImage) ConvertBufferedImage(boofcv.io.image.ConvertBufferedImage) SelfCalibrationLinearDualQuadratic(boofcv.alg.geo.selfcalib.SelfCalibrationLinearDualQuadratic) SceneStructureMetric(boofcv.abst.geo.bundle.SceneStructureMetric) AssociatedTriple(boofcv.struct.geo.AssociatedTriple) ExampleAssociateThreeView(boofcv.examples.features.ExampleAssociateThreeView) Estimate1ofTrifocalTensor(boofcv.abst.geo.Estimate1ofTrifocalTensor) AssociatedTripleIndex(boofcv.struct.feature.AssociatedTripleIndex) ConfigLevenbergMarquardt(org.ddogleg.optimization.lm.ConfigLevenbergMarquardt) GrayU8(boofcv.struct.image.GrayU8) TupleDesc_F64(boofcv.struct.feature.TupleDesc_F64) RefineThreeViewProjective(boofcv.abst.geo.RefineThreeViewProjective) PruneStructureFromSceneMetric(boofcv.abst.geo.bundle.PruneStructureFromSceneMetric) DogArray(org.ddogleg.struct.DogArray) DetectDescribePoint(boofcv.abst.feature.detdesc.DetectDescribePoint) ConfigBundleAdjustment(boofcv.factory.geo.ConfigBundleAdjustment) Vector3D_F64(georegression.struct.point.Vector3D_F64) AssociatedTriplePanel(boofcv.gui.feature.AssociatedTriplePanel) SceneObservations(boofcv.abst.geo.bundle.SceneObservations) GeometricResult(boofcv.alg.geo.GeometricResult) Intrinsic(boofcv.alg.geo.selfcalib.SelfCalibrationLinearDualQuadratic.Intrinsic) Se3_F64(georegression.struct.se.Se3_F64)

Example 34 with DogArray

use of org.ddogleg.struct.DogArray in project BoofCV by lessthanoptimal.

the class ExampleViewPointCloud method main.

public static void main(String[] args) throws IOException {
    String filePath = UtilIO.pathExample("mvs/stone_sign.ply");
    // Load the PLY file
    var cloud = new DogArray<>(Point3dRgbI_F32::new);
    PointCloudIO.load(PointCloudIO.Format.PLY, new FileInputStream(filePath), PointCloudWriter.wrapF32RGB(cloud));
    System.out.println("Total Points " + cloud.size);
    // Create the 3D viewer as a Swing panel that will have some minimal controls for adjusting the clouds
    // appearance. Since a software render is used it will get a bit sluggish (on my computer)
    // around 1,000,000 points
    PointCloudViewerPanel viewerPanel = new PointCloudViewerPanel();
    viewerPanel.setPreferredSize(new Dimension(800, 600));
    PointCloudViewer viewer = viewerPanel.getViewer();
    // Change the camera's Field-of-View
    viewer.setCameraHFov(UtilAngle.radian(60));
    // So many formats to store a 3D point and color that a functional API is used here
    viewer.addCloud((idx, p) -> ConvertFloatType.convert(cloud.get(idx), p), (idx) -> cloud.get(idx).rgb, cloud.size);
    // There are a ton of options for the viewer, but we will let the GUI handle most of them
    // Alternatively, you could use VisualizeData.createPointCloudViewer(). No controls are
    // provided if you use that.
    SwingUtilities.invokeLater(() -> {
        viewerPanel.handleControlChange();
        viewerPanel.repaint();
        ShowImages.showWindow(viewerPanel, "Point Cloud", true);
    });
}
Also used : PointCloudViewer(boofcv.visualize.PointCloudViewer) Point3dRgbI_F32(boofcv.struct.Point3dRgbI_F32) DogArray(org.ddogleg.struct.DogArray) FileInputStream(java.io.FileInputStream) PointCloudViewerPanel(boofcv.gui.PointCloudViewerPanel)

Example 35 with DogArray

use of org.ddogleg.struct.DogArray in project BoofCV by lessthanoptimal.

the class ExampleColorHistogramLookup method main.

public static void main(String[] args) {
    String imagePath = UtilIO.pathExample("recognition/vacation");
    List<String> images = UtilIO.listByPrefix(imagePath, null, ".jpg");
    Collections.sort(images);
    // Different color spaces you can try
    List<double[]> points = coupledHueSat(images);
    // List<double[]> points = independentHueSat(images);
    // List<double[]> points = coupledRGB(images);
    // List<double[]> points = histogramGray(images);
    // A few suggested image you can try searching for
    int target = 0;
    // int target = 28;
    // int target = 38;
    // int target = 46;
    // int target = 65;
    // int target = 77;
    double[] targetPoint = points.get(target);
    // Use a generic NN search algorithm. This uses Euclidean distance as a distance metric.
    NearestNeighbor<double[]> nn = FactoryNearestNeighbor.exhaustive(new KdTreeEuclideanSq_F64(targetPoint.length));
    NearestNeighbor.Search<double[]> search = nn.createSearch();
    DogArray<NnData<double[]>> results = new DogArray(NnData::new);
    nn.setPoints(points, true);
    search.findNearest(targetPoint, -1, 10, results);
    ListDisplayPanel gui = new ListDisplayPanel();
    // Add the target which the other images are being matched against
    gui.addImage(UtilImageIO.loadImageNotNull(images.get(target)), "Target", ScaleOptions.ALL);
    // The results will be the 10 best matches, but their order can be arbitrary. For display purposes
    // it's better to do it from best fit to worst fit
    Collections.sort(results.toList(), Comparator.comparingDouble((NnData o) -> o.distance));
    // Add images to GUI -- first match is always the target image, so skip it
    for (int i = 1; i < results.size; i++) {
        String file = images.get(results.get(i).index);
        double error = results.get(i).distance;
        BufferedImage image = UtilImageIO.loadImage(file);
        gui.addImage(image, String.format("Error %6.3f", error), ScaleOptions.ALL);
    }
    ShowImages.showWindow(gui, "Similar Images", true);
}
Also used : FactoryNearestNeighbor(org.ddogleg.nn.FactoryNearestNeighbor) NearestNeighbor(org.ddogleg.nn.NearestNeighbor) NnData(org.ddogleg.nn.NnData) ListDisplayPanel(boofcv.gui.ListDisplayPanel) DogArray(org.ddogleg.struct.DogArray) BufferedImage(java.awt.image.BufferedImage) ConvertBufferedImage(boofcv.io.image.ConvertBufferedImage) KdTreeEuclideanSq_F64(org.ddogleg.nn.alg.distance.KdTreeEuclideanSq_F64)

Aggregations

DogArray (org.ddogleg.struct.DogArray)79 Test (org.junit.jupiter.api.Test)48 ArrayList (java.util.ArrayList)27 Point3D_F64 (georegression.struct.point.Point3D_F64)18 DogArray_I32 (org.ddogleg.struct.DogArray_I32)17 Point2D_F64 (georegression.struct.point.Point2D_F64)16 GrayU8 (boofcv.struct.image.GrayU8)13 TupleDesc_F64 (boofcv.struct.feature.TupleDesc_F64)10 Point2D_I32 (georegression.struct.point.Point2D_I32)7 DMatrixRMaj (org.ejml.data.DMatrixRMaj)7 BufferedImage (java.awt.image.BufferedImage)6 Point3dRgbI_F64 (boofcv.struct.Point3dRgbI_F64)5 CameraPinholeBrown (boofcv.struct.calib.CameraPinholeBrown)5 List (java.util.List)5 PositionPatternNode (boofcv.alg.fiducial.qrcode.PositionPatternNode)4 ConvertBufferedImage (boofcv.io.image.ConvertBufferedImage)4 GrayF32 (boofcv.struct.image.GrayF32)4 PointCloudViewer (boofcv.visualize.PointCloudViewer)4 Se3_F64 (georegression.struct.se.Se3_F64)4 FactoryNearestNeighbor (org.ddogleg.nn.FactoryNearestNeighbor)4