Search in sources :

Example 16 with ConfigGeneralDetector

use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.

the class ShowFeatureOrientationApp method setActiveAlgorithm.

@Override
public synchronized void setActiveAlgorithm(int indexFamily, String name, Object cookie) {
    if (input == null)
        return;
    RegionOrientation orientation = (RegionOrientation) cookie;
    orientation.setObjectRadius(10);
    T workImage = ConvertBufferedImage.convertFromSingle(input, null, imageType);
    AnyImageDerivative<T, D> deriv = GImageDerivativeOps.derivativeForScaleSpace(imageType, derivType);
    deriv.setInput(workImage);
    int r = 2;
    GeneralFeatureDetector<T, D> detector = FactoryDetectPoint.createHarris(new ConfigGeneralDetector(NUM_FEATURES, r, 1), false, derivType);
    D derivX = null, derivY = null, derivXX = null, derivYY = null, derivXY = null;
    if (detector.getRequiresGradient()) {
        derivX = deriv.getDerivative(true);
        derivY = deriv.getDerivative(false);
    } else if (detector.getRequiresHessian()) {
        derivXX = deriv.getDerivative(true, true);
        derivYY = deriv.getDerivative(false, false);
        derivXY = deriv.getDerivative(true, false);
    }
    detector.process(workImage, derivX, derivY, derivXX, derivYY, derivXY);
    QueueCorner points = detector.getMaximums();
    FancyInterestPointRender render = new FancyInterestPointRender();
    if (orientation instanceof OrientationGradient) {
        ((OrientationGradient<D>) orientation).setImage(deriv.getDerivative(true), deriv.getDerivative(false));
    } else if (orientation instanceof OrientationIntegral) {
        T ii = GIntegralImageOps.transform(workImage, null);
        ((OrientationIntegral<T>) orientation).setImage(ii);
    } else if (orientation instanceof OrientationImageAverage) {
        ((OrientationImageAverage) orientation).setImage(workImage);
    } else {
        throw new IllegalArgumentException("Unknown algorithm type.");
    }
    for (int i = 0; i < points.size; i++) {
        Point2D_I16 p = points.get(i);
        double angle = orientation.compute(p.x, p.y);
        render.addCircle(p.x, p.y, radius, Color.RED, angle);
    }
    BufferedImage temp = new BufferedImage(input.getWidth(), input.getHeight(), input.getType());
    Graphics2D g2 = (Graphics2D) temp.getGraphics();
    g2.drawImage(input, 0, 0, null);
    g2.setStroke(new BasicStroke(2.5f));
    render.draw(g2);
    panel.setImage(temp);
    panel.repaint();
}
Also used : Point2D_I16(georegression.struct.point.Point2D_I16) FancyInterestPointRender(boofcv.gui.feature.FancyInterestPointRender) ConfigGeneralDetector(boofcv.abst.feature.detect.interest.ConfigGeneralDetector) OrientationImageAverage(boofcv.alg.feature.orientation.OrientationImageAverage) FactoryDetectPoint(boofcv.factory.feature.detect.interest.FactoryDetectPoint) BufferedImage(java.awt.image.BufferedImage) ConvertBufferedImage(boofcv.io.image.ConvertBufferedImage) QueueCorner(boofcv.struct.QueueCorner)

Example 17 with ConfigGeneralDetector

use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.

the class IntensityPointFeatureApp method handleAlgorithmChanged.

private void handleAlgorithmChanged() {
    ConfigGeneralDetector config = new ConfigGeneralDetector();
    config.radius = controlPanel.radius;
    config.maxFeatures = 200;
    synchronized (lockAlgorithm) {
        switch(controlPanel.selected) {
            case "Laplacian":
                config.detectMinimums = true;
                detector = FactoryDetectPoint.createHessian(HessianBlobIntensity.Type.TRACE, config, derivType);
                break;
            case "Hessian Det":
                detector = FactoryDetectPoint.createHessian(HessianBlobIntensity.Type.DETERMINANT, config, derivType);
                break;
            case "Harris":
                detector = FactoryDetectPoint.createHarris(config, controlPanel.weighted, derivType);
                break;
            case "Shi Tomasi":
                detector = FactoryDetectPoint.createShiTomasi(config, controlPanel.weighted, derivType);
                break;
            case "FAST":
                detector = FactoryDetectPoint.createFast(null, config, imageType);
                break;
            case "KitRos":
                detector = FactoryDetectPoint.createKitRos(config, derivType);
                break;
            case "Median":
                detector = FactoryDetectPoint.createMedian(config, imageType);
                break;
            default:
                throw new RuntimeException("Unknown exception");
        }
    }
    reprocessImageOnly();
}
Also used : ConfigGeneralDetector(boofcv.abst.feature.detect.interest.ConfigGeneralDetector)

Example 18 with ConfigGeneralDetector

use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.

the class ExampleVisualOdometryDepth method main.

public static void main(String[] args) throws IOException {
    MediaManager media = DefaultMediaManager.INSTANCE;
    String directory = UtilIO.pathExample("kinect/straight");
    // load camera description and the video sequence
    VisualDepthParameters param = CalibrationIO.load(media.openFile(directory + "visualdepth.yaml"));
    // specify how the image features are going to be tracked
    PkltConfig configKlt = new PkltConfig();
    configKlt.pyramidScaling = new int[] { 1, 2, 4, 8 };
    configKlt.templateRadius = 3;
    PointTrackerTwoPass<GrayU8> tracker = FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(600, 3, 1), GrayU8.class, GrayS16.class);
    DepthSparse3D<GrayU16> sparseDepth = new DepthSparse3D.I<>(1e-3);
    // declares the algorithm
    DepthVisualOdometry<GrayU8, GrayU16> visualOdometry = FactoryVisualOdometry.depthDepthPnP(1.5, 120, 2, 200, 50, true, sparseDepth, tracker, GrayU8.class, GrayU16.class);
    // Pass in intrinsic/extrinsic calibration.  This can be changed in the future.
    visualOdometry.setCalibration(param.visualParam, new DoNothing2Transform2_F32());
    // Process the video sequence and output the location plus number of inliers
    SimpleImageSequence<GrayU8> videoVisual = media.openVideo(directory + "rgb.mjpeg", ImageType.single(GrayU8.class));
    SimpleImageSequence<GrayU16> videoDepth = media.openVideo(directory + "depth.mpng", ImageType.single(GrayU16.class));
    while (videoVisual.hasNext()) {
        GrayU8 visual = videoVisual.next();
        GrayU16 depth = videoDepth.next();
        if (!visualOdometry.process(visual, depth)) {
            throw new RuntimeException("VO Failed!");
        }
        Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
        Vector3D_F64 T = leftToWorld.getT();
        System.out.printf("Location %8.2f %8.2f %8.2f      inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
    }
}
Also used : VisualDepthParameters(boofcv.struct.calib.VisualDepthParameters) GrayU16(boofcv.struct.image.GrayU16) PkltConfig(boofcv.alg.tracker.klt.PkltConfig) ConfigGeneralDetector(boofcv.abst.feature.detect.interest.ConfigGeneralDetector) DoNothing2Transform2_F32(boofcv.struct.distort.DoNothing2Transform2_F32) Vector3D_F64(georegression.struct.point.Vector3D_F64) MediaManager(boofcv.io.MediaManager) DefaultMediaManager(boofcv.io.wrapper.DefaultMediaManager) GrayU8(boofcv.struct.image.GrayU8) Se3_F64(georegression.struct.se.Se3_F64)

Example 19 with ConfigGeneralDetector

use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.

the class ExampleVisualOdometryMonocularPlane method main.

public static void main(String[] args) {
    MediaManager media = DefaultMediaManager.INSTANCE;
    String directory = UtilIO.pathExample("vo/drc/");
    // load camera description and the video sequence
    MonoPlaneParameters calibration = CalibrationIO.load(media.openFile(directory + "mono_plane.yaml"));
    SimpleImageSequence<GrayU8> video = media.openVideo(directory + "left.mjpeg", ImageType.single(GrayU8.class));
    // specify how the image features are going to be tracked
    PkltConfig configKlt = new PkltConfig();
    configKlt.pyramidScaling = new int[] { 1, 2, 4, 8 };
    configKlt.templateRadius = 3;
    ConfigGeneralDetector configDetector = new ConfigGeneralDetector(600, 3, 1);
    PointTracker<GrayU8> tracker = FactoryPointTracker.klt(configKlt, configDetector, GrayU8.class, null);
    // declares the algorithm
    MonocularPlaneVisualOdometry<GrayU8> visualOdometry = FactoryVisualOdometry.monoPlaneInfinity(75, 2, 1.5, 200, tracker, ImageType.single(GrayU8.class));
    // Pass in intrinsic/extrinsic calibration.  This can be changed in the future.
    visualOdometry.setCalibration(calibration);
    // Process the video sequence and output the location plus number of inliers
    while (video.hasNext()) {
        GrayU8 image = video.next();
        if (!visualOdometry.process(image)) {
            System.out.println("Fault!");
            visualOdometry.reset();
        }
        Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
        Vector3D_F64 T = leftToWorld.getT();
        System.out.printf("Location %8.2f %8.2f %8.2f      inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
    }
}
Also used : Vector3D_F64(georegression.struct.point.Vector3D_F64) PkltConfig(boofcv.alg.tracker.klt.PkltConfig) MediaManager(boofcv.io.MediaManager) DefaultMediaManager(boofcv.io.wrapper.DefaultMediaManager) ConfigGeneralDetector(boofcv.abst.feature.detect.interest.ConfigGeneralDetector) GrayU8(boofcv.struct.image.GrayU8) MonoPlaneParameters(boofcv.struct.calib.MonoPlaneParameters) Se3_F64(georegression.struct.se.Se3_F64)

Example 20 with ConfigGeneralDetector

use of boofcv.abst.feature.detect.interest.ConfigGeneralDetector in project BoofCV by lessthanoptimal.

the class ExampleVisualOdometryStereo method main.

public static void main(String[] args) {
    MediaManager media = DefaultMediaManager.INSTANCE;
    String directory = UtilIO.pathExample("vo/backyard/");
    // load camera description and the video sequence
    StereoParameters stereoParam = CalibrationIO.load(media.openFile(directory + "stereo.yaml"));
    SimpleImageSequence<GrayU8> video1 = media.openVideo(directory + "left.mjpeg", ImageType.single(GrayU8.class));
    SimpleImageSequence<GrayU8> video2 = media.openVideo(directory + "right.mjpeg", ImageType.single(GrayU8.class));
    // specify how the image features are going to be tracked
    PkltConfig configKlt = new PkltConfig();
    configKlt.pyramidScaling = new int[] { 1, 2, 4, 8 };
    configKlt.templateRadius = 3;
    PointTrackerTwoPass<GrayU8> tracker = FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(600, 3, 1), GrayU8.class, GrayS16.class);
    // computes the depth of each point
    StereoDisparitySparse<GrayU8> disparity = FactoryStereoDisparity.regionSparseWta(0, 150, 3, 3, 30, -1, true, GrayU8.class);
    // declares the algorithm
    StereoVisualOdometry<GrayU8> visualOdometry = FactoryVisualOdometry.stereoDepth(1.5, 120, 2, 200, 50, true, disparity, tracker, GrayU8.class);
    // Pass in intrinsic/extrinsic calibration.  This can be changed in the future.
    visualOdometry.setCalibration(stereoParam);
    // Process the video sequence and output the location plus number of inliers
    while (video1.hasNext()) {
        GrayU8 left = video1.next();
        GrayU8 right = video2.next();
        if (!visualOdometry.process(left, right)) {
            throw new RuntimeException("VO Failed!");
        }
        Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
        Vector3D_F64 T = leftToWorld.getT();
        System.out.printf("Location %8.2f %8.2f %8.2f      inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
    }
}
Also used : PkltConfig(boofcv.alg.tracker.klt.PkltConfig) ConfigGeneralDetector(boofcv.abst.feature.detect.interest.ConfigGeneralDetector) Vector3D_F64(georegression.struct.point.Vector3D_F64) MediaManager(boofcv.io.MediaManager) DefaultMediaManager(boofcv.io.wrapper.DefaultMediaManager) GrayU8(boofcv.struct.image.GrayU8) StereoParameters(boofcv.struct.calib.StereoParameters) Se3_F64(georegression.struct.se.Se3_F64)

Aggregations

ConfigGeneralDetector (boofcv.abst.feature.detect.interest.ConfigGeneralDetector)28 PkltConfig (boofcv.alg.tracker.klt.PkltConfig)16 GrayF32 (boofcv.struct.image.GrayF32)10 GrayU8 (boofcv.struct.image.GrayU8)8 ScoreAssociateHamming_B (boofcv.abst.feature.associate.ScoreAssociateHamming_B)6 MediaManager (boofcv.io.MediaManager)6 DefaultMediaManager (boofcv.io.wrapper.DefaultMediaManager)6 BufferedImage (java.awt.image.BufferedImage)6 ConvertBufferedImage (boofcv.io.image.ConvertBufferedImage)5 DescribeRegionPoint (boofcv.abst.feature.describe.DescribeRegionPoint)4 PointTracker (boofcv.abst.feature.tracker.PointTracker)4 GeneralFeatureDetector (boofcv.alg.feature.detect.interest.GeneralFeatureDetector)4 FactoryPointTracker (boofcv.factory.feature.tracker.FactoryPointTracker)4 FactoryDescribeRegionPoint (boofcv.factory.feature.describe.FactoryDescribeRegionPoint)3 ImageGridPanel (boofcv.gui.image.ImageGridPanel)3 TupleDesc_B (boofcv.struct.feature.TupleDesc_B)3 Planar (boofcv.struct.image.Planar)3 Homography2D_F64 (georegression.struct.homography.Homography2D_F64)3 Vector3D_F64 (georegression.struct.point.Vector3D_F64)3 Se3_F64 (georegression.struct.se.Se3_F64)3