Search in sources :

Example 11 with ImageType

use of boofcv.struct.image.ImageType in project BoofCV by lessthanoptimal.

the class ExampleBackgroundRemovalMoving method main.

public static void main(String[] args) {
    // Example with a moving camera.  Highlights why motion estimation is sometimes required
    String fileName = UtilIO.pathExample("tracking/chipmunk.mjpeg");
    // Camera has a bit of jitter in it.  Static kinda works but motion reduces false positives
    // String fileName = UtilIO.pathExample("background/horse_jitter.mp4");
    // Comment/Uncomment to switch input image type
    ImageType imageType = ImageType.single(GrayF32.class);
    // ImageType imageType = ImageType.il(3, InterleavedF32.class);
    // ImageType imageType = ImageType.il(3, InterleavedU8.class);
    // Configure the feature detector
    ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
    confDetector.threshold = 10;
    confDetector.maxFeatures = 300;
    confDetector.radius = 6;
    // Use a KLT tracker
    PointTracker tracker = FactoryPointTracker.klt(new int[] { 1, 2, 4, 8 }, confDetector, 3, GrayF32.class, null);
    // This estimates the 2D image motion
    ImageMotion2D<GrayF32, Homography2D_F64> motion2D = FactoryMotion2D.createMotion2D(500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64());
    ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f);
    // Configuration for Gaussian model.  Note that the threshold changes depending on the number of image bands
    // 12 = gray scale and 40 = color
    ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12, 0.001f);
    configGaussian.initialVariance = 64;
    configGaussian.minimumDifference = 5;
    // Note that GMM doesn't interpolate the input image. Making it harder to model object edges.
    // However it runs faster because of this.
    ConfigBackgroundGmm configGmm = new ConfigBackgroundGmm();
    configGmm.initialVariance = 1600;
    configGmm.significantWeight = 1e-1f;
    // Comment/Uncomment to switch background mode
    BackgroundModelMoving background = FactoryBackgroundModel.movingBasic(configBasic, new PointTransformHomography_F32(), imageType);
    // FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(), imageType);
    // FactoryBackgroundModel.movingGmm(configGmm,new PointTransformHomography_F32(), imageType);
    background.setUnknownValue(1);
    MediaManager media = DefaultMediaManager.INSTANCE;
    SimpleImageSequence video = media.openVideo(fileName, background.getImageType());
    // media.openCamera(null,640,480,background.getImageType());
    // ====== Initialize Images
    // storage for segmented image.  Background = 0, Foreground = 1
    GrayU8 segmented = new GrayU8(video.getNextWidth(), video.getNextHeight());
    // Grey scale image that's the input for motion estimation
    GrayF32 grey = new GrayF32(segmented.width, segmented.height);
    // coordinate frames
    Homography2D_F32 firstToCurrent32 = new Homography2D_F32();
    Homography2D_F32 homeToWorld = new Homography2D_F32();
    homeToWorld.a13 = grey.width / 2;
    homeToWorld.a23 = grey.height / 2;
    // Create a background image twice the size of the input image.  Tell it that the home is in the center
    background.initialize(grey.width * 2, grey.height * 2, homeToWorld);
    BufferedImage visualized = new BufferedImage(segmented.width, segmented.height, BufferedImage.TYPE_INT_RGB);
    ImageGridPanel gui = new ImageGridPanel(1, 2);
    gui.setImages(visualized, visualized);
    ShowImages.showWindow(gui, "Detections", true);
    double fps = 0;
    // smoothing factor for FPS
    double alpha = 0.01;
    while (video.hasNext()) {
        ImageBase input = video.next();
        long before = System.nanoTime();
        GConvertImage.convert(input, grey);
        if (!motion2D.process(grey)) {
            throw new RuntimeException("Should handle this scenario");
        }
        Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent();
        ConvertMatrixData.convert(firstToCurrent64, firstToCurrent32);
        background.segment(firstToCurrent32, input, segmented);
        background.updateBackground(firstToCurrent32, input);
        long after = System.nanoTime();
        fps = (1.0 - alpha) * fps + alpha * (1.0 / ((after - before) / 1e9));
        VisualizeBinaryData.renderBinary(segmented, false, visualized);
        gui.setImage(0, 0, (BufferedImage) video.getGuiImage());
        gui.setImage(0, 1, visualized);
        gui.repaint();
        System.out.println("FPS = " + fps);
        try {
            Thread.sleep(5);
        } catch (InterruptedException e) {
        }
    }
}
Also used : ConfigBackgroundBasic(boofcv.factory.background.ConfigBackgroundBasic) BackgroundModelMoving(boofcv.alg.background.BackgroundModelMoving) SimpleImageSequence(boofcv.io.image.SimpleImageSequence) PointTransformHomography_F32(boofcv.alg.distort.PointTransformHomography_F32) ConfigGeneralDetector(boofcv.abst.feature.detect.interest.ConfigGeneralDetector) Homography2D_F32(georegression.struct.homography.Homography2D_F32) Homography2D_F64(georegression.struct.homography.Homography2D_F64) BufferedImage(java.awt.image.BufferedImage) ImageType(boofcv.struct.image.ImageType) ConfigBackgroundGaussian(boofcv.factory.background.ConfigBackgroundGaussian) GrayF32(boofcv.struct.image.GrayF32) MediaManager(boofcv.io.MediaManager) DefaultMediaManager(boofcv.io.wrapper.DefaultMediaManager) ConfigBackgroundGmm(boofcv.factory.background.ConfigBackgroundGmm) GrayU8(boofcv.struct.image.GrayU8) ImageGridPanel(boofcv.gui.image.ImageGridPanel) PointTracker(boofcv.abst.feature.tracker.PointTracker) FactoryPointTracker(boofcv.factory.feature.tracker.FactoryPointTracker) ImageBase(boofcv.struct.image.ImageBase)

Example 12 with ImageType

use of boofcv.struct.image.ImageType in project BoofCV by lessthanoptimal.

the class ExampleBackgroundRemovalStationary method main.

public static void main(String[] args) {
    String fileName = UtilIO.pathExample("background/street_intersection.mp4");
    // String fileName = UtilIO.pathExample("background/rubixfire.mp4"); // dynamic background
    // String fileName = UtilIO.pathExample("background/horse_jitter.mp4"); // degraded performance because of jitter
    // String fileName = UtilIO.pathExample("tracking/chipmunk.mjpeg"); // Camera moves.  Stationary will fail here
    // Comment/Uncomment to switch input image type
    ImageType imageType = ImageType.single(GrayF32.class);
    // ImageType imageType = ImageType.il(3, InterleavedF32.class);
    // ImageType imageType = ImageType.il(3, InterleavedU8.class);
    ConfigBackgroundGmm configGmm = new ConfigBackgroundGmm();
    // Comment/Uncomment to switch algorithms
    BackgroundModelStationary background = FactoryBackgroundModel.stationaryBasic(new ConfigBackgroundBasic(35, 0.005f), imageType);
    // FactoryBackgroundModel.stationaryGmm(configGmm, imageType);
    MediaManager media = DefaultMediaManager.INSTANCE;
    SimpleImageSequence video = media.openVideo(fileName, background.getImageType());
    // media.openCamera(null,640,480,background.getImageType());
    // Declare storage for segmented image.  1 = moving foreground and 0 = background
    GrayU8 segmented = new GrayU8(video.getNextWidth(), video.getNextHeight());
    BufferedImage visualized = new BufferedImage(segmented.width, segmented.height, BufferedImage.TYPE_INT_RGB);
    ImageGridPanel gui = new ImageGridPanel(1, 2);
    gui.setImages(visualized, visualized);
    ShowImages.showWindow(gui, "Static Scene: Background Segmentation", true);
    double fps = 0;
    // smoothing factor for FPS
    double alpha = 0.01;
    while (video.hasNext()) {
        ImageBase input = video.next();
        long before = System.nanoTime();
        background.updateBackground(input, segmented);
        long after = System.nanoTime();
        fps = (1.0 - alpha) * fps + alpha * (1.0 / ((after - before) / 1e9));
        VisualizeBinaryData.renderBinary(segmented, false, visualized);
        gui.setImage(0, 0, (BufferedImage) video.getGuiImage());
        gui.setImage(0, 1, visualized);
        gui.repaint();
        System.out.println("FPS = " + fps);
        try {
            Thread.sleep(5);
        } catch (InterruptedException e) {
        }
    }
    System.out.println("done!");
}
Also used : ConfigBackgroundBasic(boofcv.factory.background.ConfigBackgroundBasic) SimpleImageSequence(boofcv.io.image.SimpleImageSequence) BufferedImage(java.awt.image.BufferedImage) ImageType(boofcv.struct.image.ImageType) BackgroundModelStationary(boofcv.alg.background.BackgroundModelStationary) MediaManager(boofcv.io.MediaManager) DefaultMediaManager(boofcv.io.wrapper.DefaultMediaManager) ConfigBackgroundGmm(boofcv.factory.background.ConfigBackgroundGmm) GrayU8(boofcv.struct.image.GrayU8) ImageGridPanel(boofcv.gui.image.ImageGridPanel) ImageBase(boofcv.struct.image.ImageBase)

Example 13 with ImageType

use of boofcv.struct.image.ImageType in project BoofCV by lessthanoptimal.

the class CompareHessianToConvolution method setKernel.

public void setKernel(int which, Kernel1D horizontal, Kernel1D vertical) {
    ImageType _inputType = ImageType.single(inputType);
    FilterImageInterface<?, ?> f1 = FactoryConvolve.convolve(horizontal, _inputType, _inputType, BorderType.EXTENDED, true);
    FilterImageInterface<?, ?> f2 = FactoryConvolve.convolve(vertical, _inputType, _inputType, BorderType.EXTENDED, false);
    outputFilters[which] = new FilterSequence(f1, f2);
    if (borderSize < horizontal.getRadius())
        borderSize = horizontal.getRadius();
    if (borderSize < vertical.getRadius())
        borderSize = vertical.getRadius();
}
Also used : FilterSequence(boofcv.abst.filter.FilterSequence) ImageType(boofcv.struct.image.ImageType)

Example 14 with ImageType

use of boofcv.struct.image.ImageType in project BoofCV by lessthanoptimal.

the class GenericFiducialDetectorChecks method checkRemoveIntrinsic.

/**
 * Provide an intrinsic model then remove it
 */
@Test
public void checkRemoveIntrinsic() {
    for (ImageType type : types) {
        ImageBase image = loadImage(type);
        // detect with no intrinsics
        FiducialDetector detector = createDetector(type);
        detector.detect(image);
        assertFalse(detector.is3D());
        assertTrue(detector.totalFound() >= 1);
        Results expected = extractResults(detector);
        checkBounds(detector);
        // detect with intrinsics
        detector.setLensDistortion(loadDistortion(true), image.width, image.height);
        assertTrue(detector.is3D());
        assertTrue(detector.totalFound() >= 1);
        // detect without intrinsics again
        detector.setLensDistortion(null, 0, 0);
        assertFalse(detector.is3D());
        assertTrue(detector.totalFound() >= 1);
        Results found = extractResults(detector);
        // compare results
        assertEquals(expected.id.length, found.id.length);
        for (int i = 0; i < expected.id.length; i++) {
            assertEquals(expected.id[i], found.id[i]);
            assertTrue(expected.pose.get(i).T.distance(found.pose.get(i).T) <= 1e-4);
            assertTrue(MatrixFeatures_DDRM.isIdentical(expected.pose.get(i).getR(), found.pose.get(i).R, 1e-4));
            assertTrue(found.pixel.get(i).distance(expected.pixel.get(i)) <= 1e-4);
        }
    }
}
Also used : ImageBase(boofcv.struct.image.ImageBase) ImageType(boofcv.struct.image.ImageType) Test(org.junit.Test)

Example 15 with ImageType

use of boofcv.struct.image.ImageType in project BoofCV by lessthanoptimal.

the class GenericFiducialDetectorChecks method clearLensDistortion.

/**
 * Make sure lens distortion is removed if it was set previously and then removed
 */
// TODO remove test?  This should be a non-issue now
@Test
public void clearLensDistortion() {
    for (ImageType type : types) {
        ImageBase image = loadImage(type);
        FiducialDetector detector = createDetector(type);
        // save the results
        detector.setLensDistortion(loadDistortion(false), image.width, image.height);
        detector.detect(image);
        assertTrue(detector.totalFound() >= 1);
        Results before = extractResults(detector);
        // run with lens distortion
        detector.setLensDistortion(loadDistortion(true), image.width, image.height);
        detector.detect(image);
        // remove lens distortion
        detector.setLensDistortion(loadDistortion(false), image.width, image.height);
        detector.detect(image);
        Results after = extractResults(detector);
        // see if it's the same
        for (int i = 0; i < after.id.length; i++) {
            assertEquals(before.id[i], after.id[i]);
            assertEquals(0, before.pose.get(i).T.distance(after.pose.get(i).T), 1e-8);
            assertTrue(MatrixFeatures_DDRM.isIdentical(before.pose.get(i).R, after.pose.get(i).R, 1e-8));
            assertEquals(0, before.pixel.get(i).distance(after.pixel.get(i)), 1e-8);
        }
    }
}
Also used : ImageBase(boofcv.struct.image.ImageBase) ImageType(boofcv.struct.image.ImageType) Test(org.junit.Test)

Aggregations

ImageType (boofcv.struct.image.ImageType)36 Test (org.junit.Test)19 ImageBase (boofcv.struct.image.ImageBase)14 ArrayList (java.util.ArrayList)11 File (java.io.File)10 PathLabel (boofcv.io.PathLabel)7 BackgroundModelMoving (boofcv.alg.background.BackgroundModelMoving)4 Homography2D_F32 (georegression.struct.homography.Homography2D_F32)4 Se3_F64 (georegression.struct.se.Se3_F64)4 BufferedImage (java.awt.image.BufferedImage)4 GrayU8 (boofcv.struct.image.GrayU8)3 BackgroundModelStationary (boofcv.alg.background.BackgroundModelStationary)2 ConfigBackgroundBasic (boofcv.factory.background.ConfigBackgroundBasic)2 ConfigBackgroundGaussian (boofcv.factory.background.ConfigBackgroundGaussian)2 ConfigBackgroundGmm (boofcv.factory.background.ConfigBackgroundGmm)2 ImageGridPanel (boofcv.gui.image.ImageGridPanel)2 MediaManager (boofcv.io.MediaManager)2 ConvertBufferedImage (boofcv.io.image.ConvertBufferedImage)2 SimpleImageSequence (boofcv.io.image.SimpleImageSequence)2 DefaultMediaManager (boofcv.io.wrapper.DefaultMediaManager)2