Search in sources :

Example 1 with Homography2D_F64

use of georegression.struct.homography.Homography2D_F64 in project BoofCV by lessthanoptimal.

the class TestPointTransformHomography_F64 method compareToDirect.

/**
 * Directly computes the output
 */
@Test
public void compareToDirect() {
    Point2D_F64 input = new Point2D_F64(50, 60);
    Point2D_F64 output = new Point2D_F64();
    Point2D_F64 expected = new Point2D_F64();
    Homography2D_F64 H = new Homography2D_F64(1, 2, 3, 4, 5, 6, 7, 8, 9);
    HomographyPointOps_F64.transform(H, input, expected);
    PointTransformHomography_F64 alg = new PointTransformHomography_F64();
    alg.set(H);
    alg.compute(input.x, input.y, output);
    assertEquals(expected.x, output.x, 1e-4);
    assertEquals(expected.y, output.y, 1e-4);
}
Also used : Point2D_F64(georegression.struct.point.Point2D_F64) Homography2D_F64(georegression.struct.homography.Homography2D_F64) Test(org.junit.Test)

Example 2 with Homography2D_F64

use of georegression.struct.homography.Homography2D_F64 in project BoofCV by lessthanoptimal.

the class FactoryMotion2D method createMotion2D.

/**
 * Estimates the 2D motion of an image using different models.
 *
 * @param ransacIterations Number of RANSAC iterations
 * @param inlierThreshold Threshold which defines an inlier.
 * @param outlierPrune If a feature is an outlier for this many turns in a row it is dropped. Try 2
 * @param absoluteMinimumTracks New features will be respawned if the number of inliers drop below this number.
 * @param respawnTrackFraction If the fraction of current inliers to the original number of inliers drops below
 *                             this fraction then new features are spawned.  Try 0.3
 * @param respawnCoverageFraction If the area covered drops by this fraction then spawn more features.  Try 0.8
 * @param refineEstimate Should it refine the model estimate using all inliers.
 * @param tracker Point feature tracker.
 * @param motionModel Instance of the model model used. Affine2D_F64 or Homography2D_F64
 * @param <I> Image input type.
 * @param <IT> Model model
 * @return  ImageMotion2D
 */
public static <I extends ImageBase<I>, IT extends InvertibleTransform> ImageMotion2D<I, IT> createMotion2D(int ransacIterations, double inlierThreshold, int outlierPrune, int absoluteMinimumTracks, double respawnTrackFraction, double respawnCoverageFraction, boolean refineEstimate, PointTracker<I> tracker, IT motionModel) {
    ModelManager<IT> manager;
    ModelGenerator<IT, AssociatedPair> fitter;
    DistanceFromModel<IT, AssociatedPair> distance;
    ModelFitter<IT, AssociatedPair> modelRefiner = null;
    if (motionModel instanceof Homography2D_F64) {
        GenerateHomographyLinear mf = new GenerateHomographyLinear(true);
        manager = (ModelManager) new ModelManagerHomography2D_F64();
        fitter = (ModelGenerator) mf;
        if (refineEstimate)
            modelRefiner = (ModelFitter) mf;
        distance = (DistanceFromModel) new DistanceHomographySq();
    } else if (motionModel instanceof Affine2D_F64) {
        manager = (ModelManager) new ModelManagerAffine2D_F64();
        GenerateAffine2D mf = new GenerateAffine2D();
        fitter = (ModelGenerator) mf;
        if (refineEstimate)
            modelRefiner = (ModelFitter) mf;
        distance = (DistanceFromModel) new DistanceAffine2DSq();
    } else if (motionModel instanceof Se2_F64) {
        manager = (ModelManager) new ModelManagerSe2_F64();
        MotionTransformPoint<Se2_F64, Point2D_F64> alg = new MotionSe2PointSVD_F64();
        GenerateSe2_AssociatedPair mf = new GenerateSe2_AssociatedPair(alg);
        fitter = (ModelGenerator) mf;
        distance = (DistanceFromModel) new DistanceSe2Sq();
    // no refine, already optimal
    } else {
        throw new RuntimeException("Unknown model type: " + motionModel.getClass().getSimpleName());
    }
    ModelMatcher<IT, AssociatedPair> modelMatcher = new Ransac(123123, manager, fitter, distance, ransacIterations, inlierThreshold);
    ImageMotionPointTrackerKey<I, IT> lowlevel = new ImageMotionPointTrackerKey<>(tracker, modelMatcher, modelRefiner, motionModel, outlierPrune);
    ImageMotionPtkSmartRespawn<I, IT> smartRespawn = new ImageMotionPtkSmartRespawn<>(lowlevel, absoluteMinimumTracks, respawnTrackFraction, respawnCoverageFraction);
    return new WrapImageMotionPtkSmartRespawn<>(smartRespawn);
}
Also used : Homography2D_F64(georegression.struct.homography.Homography2D_F64) ModelManagerHomography2D_F64(georegression.fitting.homography.ModelManagerHomography2D_F64) Ransac(org.ddogleg.fitting.modelset.ransac.Ransac) MotionSe2PointSVD_F64(georegression.fitting.se.MotionSe2PointSVD_F64) ModelManagerAffine2D_F64(georegression.fitting.affine.ModelManagerAffine2D_F64) AssociatedPair(boofcv.struct.geo.AssociatedPair) ModelManagerHomography2D_F64(georegression.fitting.homography.ModelManagerHomography2D_F64) WrapImageMotionPtkSmartRespawn(boofcv.abst.sfm.d2.WrapImageMotionPtkSmartRespawn) ModelManagerSe2_F64(georegression.fitting.se.ModelManagerSe2_F64) Se2_F64(georegression.struct.se.Se2_F64) ModelManagerSe2_F64(georegression.fitting.se.ModelManagerSe2_F64) ModelManagerAffine2D_F64(georegression.fitting.affine.ModelManagerAffine2D_F64) Affine2D_F64(georegression.struct.affine.Affine2D_F64) Point2D_F64(georegression.struct.point.Point2D_F64) WrapImageMotionPtkSmartRespawn(boofcv.abst.sfm.d2.WrapImageMotionPtkSmartRespawn)

Example 3 with Homography2D_F64

use of georegression.struct.homography.Homography2D_F64 in project BoofCV by lessthanoptimal.

the class ExampleBackgroundRemovalMoving method main.

public static void main(String[] args) {
    // Example with a moving camera.  Highlights why motion estimation is sometimes required
    String fileName = UtilIO.pathExample("tracking/chipmunk.mjpeg");
    // Camera has a bit of jitter in it.  Static kinda works but motion reduces false positives
    // String fileName = UtilIO.pathExample("background/horse_jitter.mp4");
    // Comment/Uncomment to switch input image type
    ImageType imageType = ImageType.single(GrayF32.class);
    // ImageType imageType = ImageType.il(3, InterleavedF32.class);
    // ImageType imageType = ImageType.il(3, InterleavedU8.class);
    // Configure the feature detector
    ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
    confDetector.threshold = 10;
    confDetector.maxFeatures = 300;
    confDetector.radius = 6;
    // Use a KLT tracker
    PointTracker tracker = FactoryPointTracker.klt(new int[] { 1, 2, 4, 8 }, confDetector, 3, GrayF32.class, null);
    // This estimates the 2D image motion
    ImageMotion2D<GrayF32, Homography2D_F64> motion2D = FactoryMotion2D.createMotion2D(500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64());
    ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f);
    // Configuration for Gaussian model.  Note that the threshold changes depending on the number of image bands
    // 12 = gray scale and 40 = color
    ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12, 0.001f);
    configGaussian.initialVariance = 64;
    configGaussian.minimumDifference = 5;
    // Note that GMM doesn't interpolate the input image. Making it harder to model object edges.
    // However it runs faster because of this.
    ConfigBackgroundGmm configGmm = new ConfigBackgroundGmm();
    configGmm.initialVariance = 1600;
    configGmm.significantWeight = 1e-1f;
    // Comment/Uncomment to switch background mode
    BackgroundModelMoving background = FactoryBackgroundModel.movingBasic(configBasic, new PointTransformHomography_F32(), imageType);
    // FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(), imageType);
    // FactoryBackgroundModel.movingGmm(configGmm,new PointTransformHomography_F32(), imageType);
    background.setUnknownValue(1);
    MediaManager media = DefaultMediaManager.INSTANCE;
    SimpleImageSequence video = media.openVideo(fileName, background.getImageType());
    // media.openCamera(null,640,480,background.getImageType());
    // ====== Initialize Images
    // storage for segmented image.  Background = 0, Foreground = 1
    GrayU8 segmented = new GrayU8(video.getNextWidth(), video.getNextHeight());
    // Grey scale image that's the input for motion estimation
    GrayF32 grey = new GrayF32(segmented.width, segmented.height);
    // coordinate frames
    Homography2D_F32 firstToCurrent32 = new Homography2D_F32();
    Homography2D_F32 homeToWorld = new Homography2D_F32();
    homeToWorld.a13 = grey.width / 2;
    homeToWorld.a23 = grey.height / 2;
    // Create a background image twice the size of the input image.  Tell it that the home is in the center
    background.initialize(grey.width * 2, grey.height * 2, homeToWorld);
    BufferedImage visualized = new BufferedImage(segmented.width, segmented.height, BufferedImage.TYPE_INT_RGB);
    ImageGridPanel gui = new ImageGridPanel(1, 2);
    gui.setImages(visualized, visualized);
    ShowImages.showWindow(gui, "Detections", true);
    double fps = 0;
    // smoothing factor for FPS
    double alpha = 0.01;
    while (video.hasNext()) {
        ImageBase input = video.next();
        long before = System.nanoTime();
        GConvertImage.convert(input, grey);
        if (!motion2D.process(grey)) {
            throw new RuntimeException("Should handle this scenario");
        }
        Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent();
        ConvertMatrixData.convert(firstToCurrent64, firstToCurrent32);
        background.segment(firstToCurrent32, input, segmented);
        background.updateBackground(firstToCurrent32, input);
        long after = System.nanoTime();
        fps = (1.0 - alpha) * fps + alpha * (1.0 / ((after - before) / 1e9));
        VisualizeBinaryData.renderBinary(segmented, false, visualized);
        gui.setImage(0, 0, (BufferedImage) video.getGuiImage());
        gui.setImage(0, 1, visualized);
        gui.repaint();
        System.out.println("FPS = " + fps);
        try {
            Thread.sleep(5);
        } catch (InterruptedException e) {
        }
    }
}
Also used : ConfigBackgroundBasic(boofcv.factory.background.ConfigBackgroundBasic) BackgroundModelMoving(boofcv.alg.background.BackgroundModelMoving) SimpleImageSequence(boofcv.io.image.SimpleImageSequence) PointTransformHomography_F32(boofcv.alg.distort.PointTransformHomography_F32) ConfigGeneralDetector(boofcv.abst.feature.detect.interest.ConfigGeneralDetector) Homography2D_F32(georegression.struct.homography.Homography2D_F32) Homography2D_F64(georegression.struct.homography.Homography2D_F64) BufferedImage(java.awt.image.BufferedImage) ImageType(boofcv.struct.image.ImageType) ConfigBackgroundGaussian(boofcv.factory.background.ConfigBackgroundGaussian) GrayF32(boofcv.struct.image.GrayF32) MediaManager(boofcv.io.MediaManager) DefaultMediaManager(boofcv.io.wrapper.DefaultMediaManager) ConfigBackgroundGmm(boofcv.factory.background.ConfigBackgroundGmm) GrayU8(boofcv.struct.image.GrayU8) ImageGridPanel(boofcv.gui.image.ImageGridPanel) PointTracker(boofcv.abst.feature.tracker.PointTracker) FactoryPointTracker(boofcv.factory.feature.tracker.FactoryPointTracker) ImageBase(boofcv.struct.image.ImageBase)

Example 4 with Homography2D_F64

use of georegression.struct.homography.Homography2D_F64 in project BoofCV by lessthanoptimal.

the class TestGenerateHomographyLinear method createRandomModel.

@Override
public Homography2D_F64 createRandomModel() {
    Homography2D_F64 model = new Homography2D_F64();
    model.a11 = rand.nextDouble();
    model.a12 = rand.nextDouble();
    model.a13 = rand.nextDouble();
    model.a21 = rand.nextDouble();
    model.a22 = rand.nextDouble();
    model.a23 = rand.nextDouble();
    model.a31 = rand.nextDouble();
    model.a32 = rand.nextDouble();
    model.a33 = rand.nextDouble();
    return model;
}
Also used : Homography2D_F64(georegression.struct.homography.Homography2D_F64)

Example 5 with Homography2D_F64

use of georegression.struct.homography.Homography2D_F64 in project BoofCV by lessthanoptimal.

the class TestDistanceHomographySq method createRandomModel.

@Override
public Homography2D_F64 createRandomModel() {
    Homography2D_F64 h = new Homography2D_F64();
    h.a11 = rand.nextDouble() * 5;
    h.a12 = rand.nextDouble() * 5;
    h.a13 = rand.nextDouble() * 5;
    h.a21 = rand.nextDouble() * 5;
    h.a22 = rand.nextDouble() * 5;
    h.a23 = rand.nextDouble() * 5;
    h.a31 = rand.nextDouble() * 5;
    h.a32 = rand.nextDouble() * 5;
    h.a33 = rand.nextDouble() * 5;
    return h;
}
Also used : Homography2D_F64(georegression.struct.homography.Homography2D_F64)

Aggregations

Homography2D_F64 (georegression.struct.homography.Homography2D_F64)18 Point2D_F64 (georegression.struct.point.Point2D_F64)5 GrayF32 (boofcv.struct.image.GrayF32)4 BufferedImage (java.awt.image.BufferedImage)4 ConfigGeneralDetector (boofcv.abst.feature.detect.interest.ConfigGeneralDetector)3 ImageGridPanel (boofcv.gui.image.ImageGridPanel)3 MediaManager (boofcv.io.MediaManager)3 ConvertBufferedImage (boofcv.io.image.ConvertBufferedImage)3 DefaultMediaManager (boofcv.io.wrapper.DefaultMediaManager)3 AssociatedPair (boofcv.struct.geo.AssociatedPair)3 Planar (boofcv.struct.image.Planar)3 ModelManagerHomography2D_F64 (georegression.fitting.homography.ModelManagerHomography2D_F64)3 Affine2D_F64 (georegression.struct.affine.Affine2D_F64)3 PlToGrayMotion2D (boofcv.abst.sfm.d2.PlToGrayMotion2D)2 PixelTransformAffine_F32 (boofcv.alg.distort.PixelTransformAffine_F32)2 PixelTransformHomography_F32 (boofcv.alg.distort.PixelTransformHomography_F32)2 DistanceHomographySq (boofcv.alg.geo.robust.DistanceHomographySq)2 GenerateHomographyLinear (boofcv.alg.geo.robust.GenerateHomographyLinear)2 PixelTransform2_F32 (boofcv.struct.distort.PixelTransform2_F32)2 Affine2D_F32 (georegression.struct.affine.Affine2D_F32)2