Search in sources :

Example 36 with GrowQueue_I32

use of org.ddogleg.struct.GrowQueue_I32 in project BoofCV by lessthanoptimal.

the class TestReidSolomonCodes method correctErrors_hand.

/**
 * Compare against a hand computed scenario
 */
@Test
public void correctErrors_hand() {
    GrowQueue_I8 message = GrowQueue_I8.parseHex("[ 0x40, 0xd2, 0x75, 0x47, 0x76, 0x17, 0x32, 0x06, 0x27, 0x26, 0x96, 0xc6, 0xc6, 0x96, 0x70, 0xec ]");
    GrowQueue_I8 ecc = new GrowQueue_I8();
    GrowQueue_I8 syndromes = new GrowQueue_I8();
    GrowQueue_I8 errorLocator = new GrowQueue_I8();
    int nsyn = 10;
    ReidSolomonCodes alg = new ReidSolomonCodes(8, primitive8);
    alg.generator(nsyn);
    alg.computeECC(message, ecc);
    GrowQueue_I8 corrupted = message.copy();
    corrupted.data[0] = 0;
    corrupted.data[4] = 8;
    corrupted.data[5] = 9;
    alg.computeSyndromes(corrupted, ecc, syndromes);
    alg.findErrorLocatorPolynomialBM(syndromes, errorLocator);
    GrowQueue_I32 errorLocations = new GrowQueue_I32(3);
    errorLocations.data[0] = 0;
    errorLocations.data[1] = 4;
    errorLocations.data[2] = 5;
    errorLocations.size = 3;
    alg.correctErrors(corrupted, message.size + ecc.size, syndromes, errorLocator, errorLocations);
    assertEquals(corrupted.size, message.size);
    for (int j = 0; j < corrupted.size; j++) {
        assertEquals(corrupted.get(j), message.get(j));
    }
}
Also used : GrowQueue_I8(org.ddogleg.struct.GrowQueue_I8) GrowQueue_I32(org.ddogleg.struct.GrowQueue_I32) Test(org.junit.Test)

Example 37 with GrowQueue_I32

use of org.ddogleg.struct.GrowQueue_I32 in project BoofCV by lessthanoptimal.

the class ExampleMultiviewSceneReconstruction method estimateMotionPnP.

/**
 * Estimate the motion between two images.  Image A is assumed to have known features with 3D coordinates already
 * and image B is an unprocessed image with no 3D features yet.
 */
private void estimateMotionPnP(int imageA, int imageB) {
    // Mark image B as processed so that it isn't processed a second time.
    processedImage[imageB] = true;
    System.out.println("Estimating PnP motion between " + imageA + " and " + imageB);
    // initially prune features using essential matrix
    Se3_F64 dummy = new Se3_F64();
    List<AssociatedIndex> inliers = new ArrayList<>();
    if (!estimateStereoPose(imageA, imageB, dummy, inliers))
        throw new RuntimeException("The first image pair is a bad keyframe!");
    FastQueue<Point2D_F64> pixelsA = imagePixels.get(imageA);
    FastQueue<Point2D_F64> pixelsB = imagePixels.get(imageB);
    List<Feature3D> featuresA = imageFeature3D.get(imageA);
    // this should be empty
    List<Feature3D> featuresB = imageFeature3D.get(imageB);
    // create the associated pair for motion estimation
    List<Point2D3D> features = new ArrayList<>();
    List<AssociatedIndex> inputRansac = new ArrayList<>();
    List<AssociatedIndex> unmatched = new ArrayList<>();
    for (int i = 0; i < inliers.size(); i++) {
        AssociatedIndex a = inliers.get(i);
        Feature3D t = lookupFeature(featuresA, imageA, pixelsA.get(a.src));
        if (t != null) {
            Point2D_F64 p = pixelsB.get(a.dst);
            features.add(new Point2D3D(p, t.worldPt));
            inputRansac.add(a);
        } else {
            unmatched.add(a);
        }
    }
    // make sure there are enough features to estimate motion
    if (features.size() < 15) {
        System.out.println("  Too few features for PnP!!  " + features.size());
        return;
    }
    // estimate the motion between the two images
    if (!estimatePnP.process(features))
        throw new RuntimeException("Motion estimation failed");
    // refine the motion estimate using non-linear optimization
    Se3_F64 motionWorldToB = new Se3_F64();
    if (!refinePnP.fitModel(estimatePnP.getMatchSet(), estimatePnP.getModelParameters(), motionWorldToB))
        throw new RuntimeException("Refine failed!?!?");
    motionWorldToCamera[imageB].set(motionWorldToB);
    estimatedImage[imageB] = true;
    // Add all tracks in the inlier list to the B's list of 3D features
    int N = estimatePnP.getMatchSet().size();
    boolean[] inlierPnP = new boolean[features.size()];
    for (int i = 0; i < N; i++) {
        int index = estimatePnP.getInputIndex(i);
        AssociatedIndex a = inputRansac.get(index);
        // find the track that this was associated with and add it to B
        Feature3D t = lookupFeature(featuresA, imageA, pixelsA.get(a.src));
        featuresB.add(t);
        t.frame.add(imageB);
        t.obs.grow().set(pixelsB.get(a.dst));
        inlierPnP[index] = true;
    }
    // Create new tracks for all features which were a member of essential matrix but not used to estimate
    // the motion using PnP.
    Se3_F64 motionBtoWorld = motionWorldToB.invert(null);
    Se3_F64 motionWorldToA = motionWorldToCamera[imageA];
    Se3_F64 motionBtoA = motionBtoWorld.concat(motionWorldToA, null);
    Point3D_F64 pt_in_b = new Point3D_F64();
    int totalAdded = 0;
    GrowQueue_I32 colorsA = imageColors.get(imageA);
    for (AssociatedIndex a : unmatched) {
        if (!triangulate.triangulate(pixelsB.get(a.dst), pixelsA.get(a.src), motionBtoA, pt_in_b))
            continue;
        // the feature has to be in front of the camera
        if (pt_in_b.z > 0) {
            Feature3D t = new Feature3D();
            // transform from B back to world frame
            SePointOps_F64.transform(motionBtoWorld, pt_in_b, t.worldPt);
            t.color = colorsA.get(a.src);
            t.obs.grow().set(pixelsA.get(a.src));
            t.obs.grow().set(pixelsB.get(a.dst));
            t.frame.add(imageA);
            t.frame.add(imageB);
            featuresAll.add(t);
            featuresA.add(t);
            featuresB.add(t);
            totalAdded++;
        }
    }
    // out better if the 3D coordinate is re-triangulated as a new feature
    for (int i = 0; i < features.size(); i++) {
        if (inlierPnP[i])
            continue;
        AssociatedIndex a = inputRansac.get(i);
        if (!triangulate.triangulate(pixelsB.get(a.dst), pixelsA.get(a.src), motionBtoA, pt_in_b))
            continue;
        // the feature has to be in front of the camera
        if (pt_in_b.z > 0) {
            Feature3D t = new Feature3D();
            // transform from B back to world frame
            SePointOps_F64.transform(motionBtoWorld, pt_in_b, t.worldPt);
            // only add this feature to image B since a similar one already exists in A.
            t.color = colorsA.get(a.src);
            t.obs.grow().set(pixelsB.get(a.dst));
            t.frame.add(imageB);
            featuresAll.add(t);
            featuresB.add(t);
            totalAdded++;
        }
    }
    System.out.println("  New added " + totalAdded + "  tracksA.size = " + featuresA.size() + "  tracksB.size = " + featuresB.size());
}
Also used : Point3D_F64(georegression.struct.point.Point3D_F64) ArrayList(java.util.ArrayList) DetectDescribePoint(boofcv.abst.feature.detdesc.DetectDescribePoint) GrowQueue_I32(org.ddogleg.struct.GrowQueue_I32) Point2D3D(boofcv.struct.geo.Point2D3D) Point2D_F64(georegression.struct.point.Point2D_F64) Se3_F64(georegression.struct.se.Se3_F64) AssociatedIndex(boofcv.struct.feature.AssociatedIndex)

Example 38 with GrowQueue_I32

use of org.ddogleg.struct.GrowQueue_I32 in project BoofCV by lessthanoptimal.

the class ExampleMultiviewSceneReconstruction method detectImageFeatures.

/**
 * Detect image features in all the images.  Save location, description, and color
 */
private void detectImageFeatures(List<BufferedImage> colorImages) {
    System.out.println("Detecting Features in each image.  Total " + colorImages.size());
    for (int i = 0; i < colorImages.size(); i++) {
        System.out.print("*");
        BufferedImage colorImage = colorImages.get(i);
        FastQueue<BrightFeature> features = new SurfFeatureQueue(64);
        FastQueue<Point2D_F64> pixels = new FastQueue<>(Point2D_F64.class, true);
        GrowQueue_I32 colors = new GrowQueue_I32();
        detectFeatures(colorImage, features, pixels, colors);
        imageVisualFeatures.add(features);
        imagePixels.add(pixels);
        imageColors.add(colors);
    }
    System.out.println();
}
Also used : BrightFeature(boofcv.struct.feature.BrightFeature) SurfFeatureQueue(boofcv.struct.feature.SurfFeatureQueue) Point2D_F64(georegression.struct.point.Point2D_F64) FastQueue(org.ddogleg.struct.FastQueue) DetectDescribePoint(boofcv.abst.feature.detdesc.DetectDescribePoint) BufferedImage(java.awt.image.BufferedImage) ConvertBufferedImage(boofcv.io.image.ConvertBufferedImage) GrowQueue_I32(org.ddogleg.struct.GrowQueue_I32)

Example 39 with GrowQueue_I32

use of org.ddogleg.struct.GrowQueue_I32 in project BoofCV by lessthanoptimal.

the class TestRefinePolygonToContour method reverseOrder.

@Test
public void reverseOrder() {
    RectangleLength2D_I32 rect = new RectangleLength2D_I32(0, 0, 10, 5);
    List<Point2D_I32> contour = rectToContour(rect);
    GrowQueue_I32 vertexes = computeContourVertexes(rect);
    flip(vertexes.data, vertexes.size);
    RefinePolygonToContour alg = new RefinePolygonToContour();
    Polygon2D_F64 found = new Polygon2D_F64();
    alg.process(contour, vertexes, found);
    assertTrue(checkPolygon(new double[] { 0, 0, 0, 4, 9, 4, 9, 0 }, found));
}
Also used : RectangleLength2D_I32(georegression.struct.shapes.RectangleLength2D_I32) Point2D_I32(georegression.struct.point.Point2D_I32) Polygon2D_F64(georegression.struct.shapes.Polygon2D_F64) GrowQueue_I32(org.ddogleg.struct.GrowQueue_I32) Test(org.junit.Test)

Example 40 with GrowQueue_I32

use of org.ddogleg.struct.GrowQueue_I32 in project BoofCV by lessthanoptimal.

the class TestRefinePolygonToContour method basic.

@Test
public void basic() {
    RectangleLength2D_I32 rect = new RectangleLength2D_I32(0, 0, 10, 5);
    List<Point2D_I32> contour = rectToContour(rect);
    GrowQueue_I32 vertexes = computeContourVertexes(rect);
    RefinePolygonToContour alg = new RefinePolygonToContour();
    Polygon2D_F64 found = new Polygon2D_F64();
    alg.process(contour, vertexes, found);
    assertTrue(checkPolygon(new double[] { 0, 0, 9, 0, 9, 4, 0, 4 }, found));
}
Also used : RectangleLength2D_I32(georegression.struct.shapes.RectangleLength2D_I32) Point2D_I32(georegression.struct.point.Point2D_I32) Polygon2D_F64(georegression.struct.shapes.Polygon2D_F64) GrowQueue_I32(org.ddogleg.struct.GrowQueue_I32) Test(org.junit.Test)

Aggregations

GrowQueue_I32 (org.ddogleg.struct.GrowQueue_I32)60 Test (org.junit.Test)35 Point2D_I32 (georegression.struct.point.Point2D_I32)21 GrayS32 (boofcv.struct.image.GrayS32)10 ArrayList (java.util.ArrayList)7 AssociatedIndex (boofcv.struct.feature.AssociatedIndex)6 Point2D_F64 (georegression.struct.point.Point2D_F64)5 FastQueue (org.ddogleg.struct.FastQueue)5 DetectDescribePoint (boofcv.abst.feature.detdesc.DetectDescribePoint)3 ColorQueue_F32 (boofcv.struct.feature.ColorQueue_F32)3 GrayF32 (boofcv.struct.image.GrayF32)3 GrowQueue_I8 (org.ddogleg.struct.GrowQueue_I8)3 ConvertBufferedImage (boofcv.io.image.ConvertBufferedImage)2 BrightFeature (boofcv.struct.feature.BrightFeature)2 LineGeneral2D_F64 (georegression.struct.line.LineGeneral2D_F64)2 Point3D_F64 (georegression.struct.point.Point3D_F64)2 Se3_F64 (georegression.struct.se.Se3_F64)2 Polygon2D_F64 (georegression.struct.shapes.Polygon2D_F64)2 RectangleLength2D_I32 (georegression.struct.shapes.RectangleLength2D_I32)2 BufferedImage (java.awt.image.BufferedImage)2