Search in sources :

Example 11 with DogArray_I32

use of org.ddogleg.struct.DogArray_I32 in project BoofCV by lessthanoptimal.

the class TestMetricSpawnSceneFromView method simple.

/**
 * Simple scenario that tests everything all together
 */
@Test
void simple() {
    // Create a simple scene with the camera moving along a straight line
    var dbSimilar = new MockLookupSimilarImagesRealistic();
    dbSimilar.pathLine(10, 0.1, 0.9, 2);
    var dbCams = new MockLookUpCameraInfo(dbSimilar.intrinsic);
    PairwiseImageGraph pairwise = dbSimilar.createPairwise();
    PairwiseImageGraph.View seed = pairwise.nodes.get(2);
    DogArray_I32 motions = DogArray_I32.array(0, 1, 2, 3);
    // Create the new scene
    var alg = new MetricSpawnSceneFromView(new RefineMetricWorkingGraph(), new PairwiseGraphUtils(new ConfigProjectiveReconstruction()));
    assertTrue(alg.process(dbSimilar, dbCams, pairwise, seed, motions));
    // DO some very basic tests
    SceneWorkingGraph found = alg.getScene();
    assertEquals(1, found.listCameras.size());
    assertEquals(5, found.listViews.size());
}
Also used : DogArray_I32(org.ddogleg.struct.DogArray_I32) Test(org.junit.jupiter.api.Test)

Example 12 with DogArray_I32

use of org.ddogleg.struct.DogArray_I32 in project BoofCV by lessthanoptimal.

the class TestSelectFramesForReconstruction3D method pausingAndResuming3D.

/**
 * Runs but not every frame will meet the criteria to be 3D
 */
@Test
void pausingAndResuming3D() {
    var alg = new MockFrameSelector<GrayF32>();
    // disable this check to make the test easier to write
    alg.config.minTranslation.setFixed(0);
    alg.config.minimumPairs = 0;
    // Can't call init because of null checks
    alg.width = width;
    alg.height = height;
    alg.forceKeyFrame = true;
    // Process 4 frames
    alg.next(dummy);
    alg.isStatic = true;
    // 3D check should not be called. Sanity check here
    alg.is3D = true;
    alg.next(dummy);
    alg.isStatic = false;
    alg.next(dummy);
    assertEquals(3, alg.callsCopy);
    assertEquals(2, alg.callsCreatePairs);
    assertEquals(3, alg.callsTracking);
    DogArray_I32 selected = alg.getSelectedFrames();
    assertTrue(selected.isEquals(0, 2));
}
Also used : DogArray_I32(org.ddogleg.struct.DogArray_I32) Test(org.junit.jupiter.api.Test)

Example 13 with DogArray_I32

use of org.ddogleg.struct.DogArray_I32 in project BoofCV by lessthanoptimal.

the class ColorizeMultiViewStereoResults method processScenePoints.

/**
 * Looks up the colors for all the points in the scene by reprojecting them back onto their original images.
 *
 * @param scene (Input) Scene's structure
 * @param indexToId (Input) Convert view index to view ID
 * @param indexColor (Output) RGB values are passed through to this function.
 */
public void processScenePoints(SceneStructureMetric scene, BoofLambdas.IndexToString indexToId, BoofLambdas.IndexRgbConsumer indexColor) {
    // Loading images is expensive so when we get the color of each pixel we want to process all features
    // inside the same image at once. Unfortunately there is no fast way to look up all features by image.
    // So a lookup table is constructed below
    List<DogArray_I32> lookupPointsByView = new ArrayList<>();
    for (int i = 0; i < scene.views.size; i++) {
        lookupPointsByView.add(new DogArray_I32());
    }
    // Add the first view each point was seen in to the list
    for (int pointIdx = 0; pointIdx < scene.points.size; pointIdx++) {
        SceneStructureCommon.Point p = scene.points.get(pointIdx);
        lookupPointsByView.get(p.views.get(0)).add(pointIdx);
    }
    // TODO in the future generalize this for 3D and 4D points
    var iterator = new ScenePointsSetIterator<>(new PointIndex4D_F64());
    var world_to_view = new Se3_F64();
    for (int viewIdx = 0; viewIdx < lookupPointsByView.size(); viewIdx++) {
        // Load the image
        checkTrue(lookupImages.loadImage(indexToId.process(viewIdx), image), "Failed to load image");
        // Set up the iterator for this image
        iterator.initialize(scene, lookupPointsByView.get(viewIdx));
        // Get the view that is being processed
        SceneStructureMetric.View v = scene.views.get(viewIdx);
        // Setup the camera projection model using bundle adjustment model directly
        BundleAdjustmentOps.convert(scene.getViewCamera(v).model, image.width, image.height, intrinsic);
        Point2Transform2_F64 norm_to_pixel = new LensDistortionBrown(intrinsic).distort_F64(false, true);
        // Get the transform from world/cloud to this view
        scene.getWorldToView(v, world_to_view, tmp);
        // Grab the colorized points from this view
        colorizer.process4(image, iterator, world_to_view, norm_to_pixel, indexColor);
    }
}
Also used : LensDistortionBrown(boofcv.alg.distort.brown.LensDistortionBrown) ArrayList(java.util.ArrayList) Point2Transform2_F64(boofcv.struct.distort.Point2Transform2_F64) DogArray_I32(org.ddogleg.struct.DogArray_I32) SceneStructureCommon(boofcv.abst.geo.bundle.SceneStructureCommon) SceneStructureMetric(boofcv.abst.geo.bundle.SceneStructureMetric) PointIndex4D_F64(boofcv.struct.geo.PointIndex4D_F64) Se3_F64(georegression.struct.se.Se3_F64)

Example 14 with DogArray_I32

use of org.ddogleg.struct.DogArray_I32 in project BoofCV by lessthanoptimal.

the class SceneMergingOperations method computeSceneTransform.

/**
 * Computes the transform between the two views in different scenes. This is done by computing the depth
 * for all common image features. The depth is used to estimate the scale difference between the scenes.
 * After that finding the SE3 transform is trivial.
 */
public boolean computeSceneTransform(LookUpSimilarImages dbSimilar, SceneWorkingGraph src, SceneWorkingGraph dst, SceneWorkingGraph.View selectedSrc, SceneWorkingGraph.View selectedDst, ScaleSe3_F64 src_to_dst) {
    // Sanity check
    BoofMiscOps.checkSame(selectedSrc.pview, selectedDst.pview);
    if (verbose != null)
        printInlierViews(selectedSrc, selectedDst);
    // Get the set feature indexes for the selected view that were inliers in each scene
    SceneWorkingGraph.InlierInfo inliersSrc = Objects.requireNonNull(selectedSrc.getBestInliers());
    SceneWorkingGraph.InlierInfo inliersDst = Objects.requireNonNull(selectedDst.getBestInliers());
    DogArray_I32 zeroSrcIdx = inliersSrc.observations.get(0);
    DogArray_I32 zeroDstIdx = inliersDst.observations.get(0);
    // Number of feature observations in this view
    int numObservations = selectedSrc.pview.totalObservations;
    // Find features in the target view that are common between the two scenes inlier feature sets
    int numCommon = findCommonInliers(zeroSrcIdx, zeroDstIdx, numObservations, zeroFeatureToCommonIndex);
    if (numCommon == 0)
        return false;
    // Load observation of common features in view[0]
    SceneWorkingGraph.Camera cameraSrc = src.getViewCamera(selectedSrc);
    loadViewZeroCommonObservations(dbSimilar, cameraSrc.prior, numCommon, selectedSrc.pview.id);
    List<DogArray<Point2D_F64>> listViewPixelsSrc = getCommonFeaturePixelsViews(dbSimilar, src, inliersSrc);
    List<DogArray<Point2D_F64>> listViewPixelsDst = getCommonFeaturePixelsViews(dbSimilar, dst, inliersDst);
    // Load the extrinsics and convert the intrinsics into a usable format
    loadExtrinsicsIntrinsics(src, inliersSrc, listWorldToViewSrc, listIntrinsicsSrc);
    loadExtrinsicsIntrinsics(dst, inliersDst, listWorldToViewDst, listIntrinsicsDst);
    if (verbose != null)
        verbose.println("commonInliers.size=" + numCommon + " src.size=" + zeroSrcIdx.size + " dst.size=" + zeroDstIdx.size);
    // Pass in everything to the scale resolving algorithm
    resolveScale.initialize(zeroViewPixels.size);
    resolveScale.setScene1((viewIdx, featureIdx, pixel) -> {
        if (viewIdx == 0)
            pixel.setTo(zeroViewPixels.get(featureIdx));
        else
            pixel.setTo(listViewPixelsSrc.get(viewIdx - 1).get(featureIdx));
    }, listWorldToViewSrc, (List) listIntrinsicsSrc.toList());
    resolveScale.setScene2((viewIdx, featureIdx, pixel) -> {
        if (viewIdx == 0)
            pixel.setTo(zeroViewPixels.get(featureIdx));
        else
            pixel.setTo(listViewPixelsDst.get(viewIdx - 1).get(featureIdx));
    }, listWorldToViewDst, (List) listIntrinsicsDst.toList());
    return resolveScale.process(src_to_dst);
}
Also used : DogArray_I32(org.ddogleg.struct.DogArray_I32) DogArray(org.ddogleg.struct.DogArray) VerbosePrint(org.ddogleg.struct.VerbosePrint)

Example 15 with DogArray_I32

use of org.ddogleg.struct.DogArray_I32 in project BoofCV by lessthanoptimal.

the class ProjectiveInitializeAllCommon method createObservationsForBundleAdjustment.

/**
 * Convert observations into a format which bundle adjustment will understand
 *
 * @param seedConnIdx Which edges in seed to use
 */
protected void createObservationsForBundleAdjustment(DogArray_I32 seedConnIdx) {
    DogArray_I32 inlierToSeed = inlierIndexes.get(0);
    // seed view + the motions
    utils.observations.initialize(inlierIndexes.size);
    // Observations for the seed view are a special case
    {
        SceneObservations.View obsView = utils.observations.getView(0);
        for (int i = 0; i < inlierToSeed.size; i++) {
            int id = inlierToSeed.data[i];
            // featsA is never modified after initially loaded
            Point2D_F64 o = utils.featsA.get(id);
            id = seedToStructure.data[id];
            obsView.add(id, (float) o.x, (float) o.y);
        }
    }
    // Now add observations for edges connected to the seed
    for (int motionIdx = 0; motionIdx < seedConnIdx.size(); motionIdx++) {
        SceneObservations.View obsView = utils.observations.getView(motionIdx + 1);
        Motion m = utils.seed.connections.get(seedConnIdx.get(motionIdx));
        View v = m.other(utils.seed);
        boolean seedIsSrc = m.src == utils.seed;
        utils.dbCams.lookupCalibration(utils.dbCams.viewToCamera(v.id), utils.priorCamB);
        utils.dbSimilar.lookupPixelFeats(v.id, utils.featsB);
        BoofMiscOps.offsetPixels(utils.featsB.toList(), -utils.priorCamB.cx, -utils.priorCamB.cy);
        // indicate which observation from this view contributed to which 3D feature
        DogArray_I32 connInlierIndexes = inlierIndexes.get(motionIdx + 1);
        connInlierIndexes.resize(inlierToSeed.size);
        for (int epipolarInlierIdx = 0; epipolarInlierIdx < m.inliers.size; epipolarInlierIdx++) {
            AssociatedIndex a = m.inliers.get(epipolarInlierIdx);
            // See if the feature is one of inliers computed from 3-view RANSAC
            int structId = seedToStructure.data[seedIsSrc ? a.src : a.dst];
            if (structId < 0)
                continue;
            // get the observation in this view to that feature[structId]
            connInlierIndexes.set(structId, seedIsSrc ? a.dst : a.src);
            Point2D_F64 o = utils.featsB.get(seedIsSrc ? a.dst : a.src);
            obsView.add(structId, (float) o.x, (float) o.y);
        }
    }
}
Also used : Motion(boofcv.alg.structure.PairwiseImageGraph.Motion) Point2D_F64(georegression.struct.point.Point2D_F64) SceneObservations(boofcv.abst.geo.bundle.SceneObservations) DogArray_I32(org.ddogleg.struct.DogArray_I32) View(boofcv.alg.structure.PairwiseImageGraph.View) VerbosePrint(org.ddogleg.struct.VerbosePrint) AssociatedIndex(boofcv.struct.feature.AssociatedIndex)

Aggregations

DogArray_I32 (org.ddogleg.struct.DogArray_I32)192 Test (org.junit.jupiter.api.Test)73 Point2D_I32 (georegression.struct.point.Point2D_I32)24 ArrayList (java.util.ArrayList)21 Point2D_F64 (georegression.struct.point.Point2D_F64)17 DogArray (org.ddogleg.struct.DogArray)17 TupleDesc_F64 (boofcv.struct.feature.TupleDesc_F64)15 GrayS32 (boofcv.struct.image.GrayS32)10 VerbosePrint (org.ddogleg.struct.VerbosePrint)7 View (boofcv.alg.structure.PairwiseImageGraph.View)6 AssociatedIndex (boofcv.struct.feature.AssociatedIndex)6 GrayI (boofcv.struct.image.GrayI)5 Point3D_F64 (georegression.struct.point.Point3D_F64)5 GrowArray (pabeles.concurrency.GrowArray)5 DetectDescribePoint (boofcv.abst.feature.detdesc.DetectDescribePoint)4 BTrack (boofcv.alg.sfm.d3.structure.VisOdomBundleAdjustment.BTrack)4 AssociatedTripleIndex (boofcv.struct.feature.AssociatedTripleIndex)4 SceneObservations (boofcv.abst.geo.bundle.SceneObservations)3 SceneWorkingGraph (boofcv.alg.structure.SceneWorkingGraph)3 ConfigAssociateGreedy (boofcv.factory.feature.associate.ConfigAssociateGreedy)3