Search in sources :

Example 1 with PointToPixelTransform_F64

use of boofcv.struct.distort.PointToPixelTransform_F64 in project BoofCV by lessthanoptimal.

the class MultiViewStereoFromKnownSceneStructure method computeFusedDisparityAddCloud.

/**
 * Combing stereo information from all images in this cluster, compute a disparity image and add it to the cloud
 */
boolean computeFusedDisparityAddCloud(SceneStructureMetric scene, ViewInfo center, TIntObjectMap<String> sbaIndexToName, DogArray_I32 pairIndexes) {
    if (!computeFused.process(scene, center.relations.indexSba, pairIndexes, sbaIndexToName::get)) {
        if (verbose != null)
            verbose.println("FAILED: fused disparity. center.index=" + center.index);
        return false;
    }
    // The fused disparity doesn't compute a mask since all invalid pixels are marked as invalid using
    // the disparity value
    GrayF32 disparity = computeFused.fusedDisparity;
    dummyMask.reshape(disparity);
    ImageMiscOps.fill(dummyMask, 0);
    // Pass along results to the listener
    if (listener != null) {
        listener.handleFusedDisparity(center.relations.id, disparity, dummyMask, computeFused.fusedParam);
    }
    // Convert data structures into a format which is understood by disparity to cloud
    BundleAdjustmentCamera camera = scene.cameras.get(center.metric.camera).model;
    BundleAdjustmentOps.convert(camera, disparity.width, disparity.height, brown);
    // The fused disparity is in regular pixels and not rectified
    Point2Transform2_F64 norm_to_pixel = new LensDistortionBrown(brown).distort_F64(false, true);
    Point2Transform2_F64 pixel_to_norm = new LensDistortionBrown(brown).undistort_F64(true, false);
    // world/cloud coordinates into this view
    scene.getWorldToView(center.metric, world_to_view1, tmp);
    // Use the computed disparity to add to the common point cloud while not adding points already in
    // the cloud
    disparityCloud.addDisparity(disparity, dummyMask, world_to_view1, computeFused.fusedParam, norm_to_pixel, new PointToPixelTransform_F64(pixel_to_norm));
    return true;
}
Also used : BundleAdjustmentCamera(boofcv.abst.geo.bundle.BundleAdjustmentCamera) LensDistortionBrown(boofcv.alg.distort.brown.LensDistortionBrown) Point2Transform2_F64(boofcv.struct.distort.Point2Transform2_F64) PointToPixelTransform_F64(boofcv.struct.distort.PointToPixelTransform_F64)

Example 2 with PointToPixelTransform_F64

use of boofcv.struct.distort.PointToPixelTransform_F64 in project BoofCV by lessthanoptimal.

the class BundleToRectificationStereoParameters method setView1.

/**
 * Specifies lens parameters for view-1. This is done independently since often the same view is compared against
 * multiple other views
 */
public void setView1(BundleAdjustmentCamera bundle1, int width, int height) {
    BoofMiscOps.checkTrue(width > 0);
    BoofMiscOps.checkTrue(height > 0);
    BundleAdjustmentOps.convert(bundle1, width, height, intrinsic1);
    PerspectiveOps.pinholeToMatrix(intrinsic1, K1);
    intrinsic1.width = width;
    intrinsic1.height = height;
    Point2Transform2_F64 p_to_p = new LensDistortionBrown(intrinsic1).undistort_F64(true, true);
    view1_dist_to_undist = new PointToPixelTransform_F64(p_to_p);
}
Also used : LensDistortionBrown(boofcv.alg.distort.brown.LensDistortionBrown) Point2Transform2_F64(boofcv.struct.distort.Point2Transform2_F64) PointToPixelTransform_F64(boofcv.struct.distort.PointToPixelTransform_F64)

Example 3 with PointToPixelTransform_F64

use of boofcv.struct.distort.PointToPixelTransform_F64 in project BoofCV by lessthanoptimal.

the class TestMultiViewStereoOps method disparityToCloud_consumer.

private void disparityToCloud_consumer(ImageGray<?> disparity, LensDistortionNarrowFOV distortionFactory, @Nullable Point2Transform2_F64 pointToNorm) {
    // Randomly fill in the disparity image
    GImageMiscOps.fillUniform(disparity, rand, 0, parameters.disparityRange - 1);
    // make a couple of pixels are invalid
    GeneralizedImageOps.get(disparity, 10, 12, parameters.disparityRange);
    GeneralizedImageOps.get(disparity, 2, 21, parameters.disparityRange);
    Point2Transform2_F64 pixel_to_norm = distortionFactory.undistort_F64(true, false);
    Point2D_F64 norm = new Point2D_F64();
    // Verify the results by computing the 3D point using a brute force method
    MultiViewStereoOps.disparityToCloud(disparity, parameters, pointToNorm == null ? null : new PointToPixelTransform_F64(pointToNorm), ((pixX, pixY, x, y, z) -> {
        double d = GeneralizedImageOps.get(disparity, pixX, pixY);
        double expectedZ = MultiViewOps.disparityToRange(d + parameters.disparityMin, intrinsic.fx, parameters.baseline);
        if (Double.isInfinite(expectedZ))
            assertTrue(Double.isInfinite(z));
        else {
            pixel_to_norm.compute(pixX, pixY, norm);
            assertEquals(expectedZ, z, 1e-4);
            assertEquals(expectedZ * norm.x, x, 1e-4);
            assertEquals(expectedZ * norm.y, y, 1e-4);
        }
    }));
}
Also used : Point2D_F64(georegression.struct.point.Point2D_F64) GImageMiscOps(boofcv.alg.misc.GImageMiscOps) CameraPinhole(boofcv.struct.calib.CameraPinhole) GrayF32(boofcv.struct.image.GrayF32) WorldToCameraToPixel(boofcv.alg.geo.WorldToCameraToPixel) BoofStandardJUnit(boofcv.testing.BoofStandardJUnit) LensDistortionPinhole(boofcv.alg.distort.pinhole.LensDistortionPinhole) UtilPoint3D_F64(georegression.geometry.UtilPoint3D_F64) ImageMiscOps(boofcv.alg.misc.ImageMiscOps) Point2Transform2_F64(boofcv.struct.distort.Point2Transform2_F64) CameraPinholeBrown(boofcv.struct.calib.CameraPinholeBrown) BoofMiscOps(boofcv.misc.BoofMiscOps) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) Se3_F64(georegression.struct.se.Se3_F64) MultiViewOps(boofcv.alg.geo.MultiViewOps) SpecialEuclideanOps_F64(georegression.struct.se.SpecialEuclideanOps_F64) Point3D_F64(georegression.struct.point.Point3D_F64) ImageStatistics(boofcv.alg.misc.ImageStatistics) Test(org.junit.jupiter.api.Test) Nullable(org.jetbrains.annotations.Nullable) List(java.util.List) ImageGray(boofcv.struct.image.ImageGray) LensDistortionNarrowFOV(boofcv.alg.distort.LensDistortionNarrowFOV) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) GrayU8(boofcv.struct.image.GrayU8) GeneralizedImageOps(boofcv.core.image.GeneralizedImageOps) PointToPixelTransform_F64(boofcv.struct.distort.PointToPixelTransform_F64) Point2D_F64(georegression.struct.point.Point2D_F64) Point2Transform2_F64(boofcv.struct.distort.Point2Transform2_F64) PointToPixelTransform_F64(boofcv.struct.distort.PointToPixelTransform_F64)

Example 4 with PointToPixelTransform_F64

use of boofcv.struct.distort.PointToPixelTransform_F64 in project BoofCV by lessthanoptimal.

the class ExampleMultiBaselineStereo method main.

public static void main(String[] args) {
    // Compute a sparse reconstruction. This will give us intrinsic and extrinsic for all views
    var example = new ExampleMultiViewSparseReconstruction();
    // Specifies the "center" frame to use
    int centerViewIdx = 15;
    example.compute("tree_snow_01.mp4", true);
    // example.compute("ditch_02.mp4", true);
    // example.compute("holiday_display_01.mp4"", true);
    // example.compute("log_building_02.mp4"", true);
    // example.compute("drone_park_01.mp4", false);
    // example.compute("stone_sign.mp4", true);
    // We need a way to load images based on their ID. In this particular case the ID encodes the array index.
    var imageLookup = new LookUpImageFilesByIndex(example.imageFiles);
    // Next we tell it which view to use as the "center", which acts as the common view for all disparity images.
    // The process of selecting the best views to use as centers is a problem all it's own. To keep things
    // we just pick a frame.
    SceneWorkingGraph.View center = example.working.getAllViews().get(centerViewIdx);
    // The final scene refined by bundle adjustment is created by the Working graph. However the 3D relationship
    // between views is contained in the pairwise graph. A View in the working graph has a reference to the view
    // in the pairwise graph. Using that we will find all connected views that have a 3D relationship
    var pairedViewIdxs = new DogArray_I32();
    var sbaIndexToImageID = new TIntObjectHashMap<String>();
    // This relationship between pairwise and working graphs might seem (and is) a bit convoluted. The Pairwise
    // graph is the initial crude sketch of what might be connected. The working graph is an intermediate
    // data structure for computing the metric scene. SBA is a refinement of the working graph.
    // Iterate through all connected views in the pairwise graph and mark their indexes in the working graph
    center.pview.connections.forEach((m) -> {
        // if there isn't a 3D relationship just skip it
        if (!m.is3D)
            return;
        String connectedID = m.other(center.pview).id;
        SceneWorkingGraph.View connected = example.working.views.get(connectedID);
        // Make sure the pairwise view exists in the working graph too
        if (connected == null)
            return;
        // Add this view to the index to name/ID lookup table
        sbaIndexToImageID.put(connected.index, connectedID);
        // Note that this view is one which acts as the second image in the stereo pair
        pairedViewIdxs.add(connected.index);
    });
    // Add the center camera image to the ID look up table
    sbaIndexToImageID.put(centerViewIdx, center.pview.id);
    // Configure there stereo disparity algorithm which is used
    var configDisparity = new ConfigDisparityBMBest5();
    configDisparity.validateRtoL = 1;
    configDisparity.texture = 0.5;
    configDisparity.regionRadiusX = configDisparity.regionRadiusY = 4;
    configDisparity.disparityRange = 120;
    // This is the actual MBS algorithm mentioned previously. It selects the best disparity for each pixel
    // in the original image using a median filter.
    var multiBaseline = new MultiBaselineStereoIndependent<>(imageLookup, ImageType.SB_U8);
    multiBaseline.setStereoDisparity(FactoryStereoDisparity.blockMatchBest5(configDisparity, GrayU8.class, GrayF32.class));
    // Print out verbose debugging and profile information
    multiBaseline.setVerbose(System.out, null);
    multiBaseline.setVerboseProfiling(System.out);
    // Improve stereo by removing small regions, which tends to be noise. Consider adjusting the region size.
    multiBaseline.setDisparitySmoother(FactoryStereoDisparity.removeSpeckle(null, GrayF32.class));
    // Print out debugging information from the smoother
    // Objects.requireNonNull(multiBaseline.getDisparitySmoother()).setVerbose(System.out,null);
    // Creates a list where you can switch between different images/visualizations
    var listDisplay = new ListDisplayPanel();
    listDisplay.setPreferredSize(new Dimension(1000, 300));
    ShowImages.showWindow(listDisplay, "Intermediate Results", true);
    // We will display intermediate results as they come in
    multiBaseline.setListener((leftView, rightView, rectLeft, rectRight, disparity, mask, parameters, rect) -> {
        // Visualize the rectified stereo pair. You can interact with this window and verify
        // that the y-axis is  aligned
        var rectified = new RectifiedPairPanel(true);
        rectified.setImages(ConvertBufferedImage.convertTo(rectLeft, null), ConvertBufferedImage.convertTo(rectRight, null));
        // Cleans up the disparity image by zeroing out pixels that are outside the original image bounds
        RectifyImageOps.applyMask(disparity, mask, 0);
        // Display the colorized disparity
        BufferedImage colorized = VisualizeImageData.disparity(disparity, null, parameters.disparityRange, 0);
        SwingUtilities.invokeLater(() -> {
            listDisplay.addItem(rectified, "Rectified " + leftView + " " + rightView);
            listDisplay.addImage(colorized, leftView + " " + rightView);
        });
    });
    // Process the images and compute a single combined disparity image
    if (!multiBaseline.process(example.scene, center.index, pairedViewIdxs, sbaIndexToImageID::get)) {
        throw new RuntimeException("Failed to fuse stereo views");
    }
    // Extract the point cloud from the fused disparity image
    GrayF32 fusedDisparity = multiBaseline.getFusedDisparity();
    DisparityParameters fusedParam = multiBaseline.getFusedParam();
    BufferedImage colorizedDisp = VisualizeImageData.disparity(fusedDisparity, null, fusedParam.disparityRange, 0);
    ShowImages.showWindow(colorizedDisp, "Fused Disparity");
    // Now compute the point cloud it represents and the color of each pixel.
    // For the fused image, instead of being in rectified image coordinates it's in the original image coordinates
    // this makes extracting color much easier.
    var cloud = new DogArray<>(Point3D_F64::new);
    var cloudRgb = new DogArray_I32(cloud.size);
    // Load the center image in color
    var colorImage = new InterleavedU8(1, 1, 3);
    imageLookup.loadImage(center.pview.id, colorImage);
    // Since the fused image is in the original (i.e. distorted) pixel coordinates and is not rectified,
    // that needs to be taken in account by undistorting the image to create the point cloud.
    CameraPinholeBrown intrinsic = BundleAdjustmentOps.convert(example.scene.cameras.get(center.cameraIdx).model, colorImage.width, colorImage.height, null);
    Point2Transform2_F64 pixel_to_norm = new LensDistortionBrown(intrinsic).distort_F64(true, false);
    MultiViewStereoOps.disparityToCloud(fusedDisparity, fusedParam, new PointToPixelTransform_F64(pixel_to_norm), (pixX, pixY, x, y, z) -> {
        cloud.grow().setTo(x, y, z);
        cloudRgb.add(colorImage.get24(pixX, pixY));
    });
    // Configure the point cloud viewer
    PointCloudViewer pcv = VisualizeData.createPointCloudViewer();
    pcv.setCameraHFov(UtilAngle.radian(70));
    pcv.setTranslationStep(0.15);
    pcv.addCloud(cloud.toList(), cloudRgb.data);
    // pcv.setColorizer(new SingleAxisRgb.Z().fperiod(30.0));
    JComponent viewer = pcv.getComponent();
    viewer.setPreferredSize(new Dimension(600, 600));
    ShowImages.showWindow(viewer, "Point Cloud", true);
    System.out.println("Done");
}
Also used : Point3D_F64(georegression.struct.point.Point3D_F64) InterleavedU8(boofcv.struct.image.InterleavedU8) ListDisplayPanel(boofcv.gui.ListDisplayPanel) CameraPinholeBrown(boofcv.struct.calib.CameraPinholeBrown) ConfigDisparityBMBest5(boofcv.factory.disparity.ConfigDisparityBMBest5) LensDistortionBrown(boofcv.alg.distort.brown.LensDistortionBrown) RectifiedPairPanel(boofcv.gui.stereo.RectifiedPairPanel) BufferedImage(java.awt.image.BufferedImage) ConvertBufferedImage(boofcv.io.image.ConvertBufferedImage) PointCloudViewer(boofcv.visualize.PointCloudViewer) PointToPixelTransform_F64(boofcv.struct.distort.PointToPixelTransform_F64) LookUpImageFilesByIndex(boofcv.io.image.LookUpImageFilesByIndex) GrayU8(boofcv.struct.image.GrayU8) SceneWorkingGraph(boofcv.alg.structure.SceneWorkingGraph) MultiBaselineStereoIndependent(boofcv.alg.mvs.MultiBaselineStereoIndependent) Point2Transform2_F64(boofcv.struct.distort.Point2Transform2_F64) DogArray_I32(org.ddogleg.struct.DogArray_I32) DogArray(org.ddogleg.struct.DogArray) GrayF32(boofcv.struct.image.GrayF32) TIntObjectHashMap(gnu.trove.map.hash.TIntObjectHashMap) DisparityParameters(boofcv.alg.mvs.DisparityParameters)

Example 5 with PointToPixelTransform_F64

use of boofcv.struct.distort.PointToPixelTransform_F64 in project BoofCV by lessthanoptimal.

the class ImplRectifyImageOps_F64 method allInsideLeft.

public static void allInsideLeft(int imageWidth, int imageHeight, DMatrixRMaj rectifyLeft, DMatrixRMaj rectifyRight) {
    PointTransformHomography_F64 tranLeft = new PointTransformHomography_F64(rectifyLeft);
    Point2D_F64 work = new Point2D_F64();
    RectangleLength2D_F64 bound = LensDistortionOps_F64.boundBoxInside(imageWidth, imageHeight, new PointToPixelTransform_F64(tranLeft), work);
    double scaleX = imageWidth / bound.width;
    double scaleY = imageHeight / bound.height;
    double scale = Math.max(scaleX, scaleY);
    adjustUncalibrated(rectifyLeft, rectifyRight, bound, scale);
}
Also used : Point2D_F64(georegression.struct.point.Point2D_F64) PointTransformHomography_F64(boofcv.alg.distort.PointTransformHomography_F64) PointToPixelTransform_F64(boofcv.struct.distort.PointToPixelTransform_F64) RectangleLength2D_F64(georegression.struct.shapes.RectangleLength2D_F64)

Aggregations

PointToPixelTransform_F64 (boofcv.struct.distort.PointToPixelTransform_F64)8 Point2Transform2_F64 (boofcv.struct.distort.Point2Transform2_F64)7 Point2D_F64 (georegression.struct.point.Point2D_F64)5 CameraPinholeBrown (boofcv.struct.calib.CameraPinholeBrown)4 RectangleLength2D_F64 (georegression.struct.shapes.RectangleLength2D_F64)4 LensDistortionBrown (boofcv.alg.distort.brown.LensDistortionBrown)3 SequencePoint2Transform2_F64 (boofcv.struct.distort.SequencePoint2Transform2_F64)3 PointTransformHomography_F64 (boofcv.alg.distort.PointTransformHomography_F64)2 GrayF32 (boofcv.struct.image.GrayF32)2 GrayU8 (boofcv.struct.image.GrayU8)2 Point3D_F64 (georegression.struct.point.Point3D_F64)2 BundleAdjustmentCamera (boofcv.abst.geo.bundle.BundleAdjustmentCamera)1 LensDistortionNarrowFOV (boofcv.alg.distort.LensDistortionNarrowFOV)1 LensDistortionPinhole (boofcv.alg.distort.pinhole.LensDistortionPinhole)1 MultiViewOps (boofcv.alg.geo.MultiViewOps)1 WorldToCameraToPixel (boofcv.alg.geo.WorldToCameraToPixel)1 GImageMiscOps (boofcv.alg.misc.GImageMiscOps)1 ImageMiscOps (boofcv.alg.misc.ImageMiscOps)1 ImageStatistics (boofcv.alg.misc.ImageStatistics)1 DisparityParameters (boofcv.alg.mvs.DisparityParameters)1