Search in sources :

Example 31 with Planar

use of boofcv.struct.image.Planar in project BoofCV by lessthanoptimal.

the class ExampleImageStitching method renderStitching.

/**
 * Renders and displays the stitched together images
 */
public static void renderStitching(BufferedImage imageA, BufferedImage imageB, Homography2D_F64 fromAtoB) {
    // specify size of output image
    double scale = 0.5;
    // Convert into a BoofCV color format
    Planar<GrayF32> colorA = ConvertBufferedImage.convertFromPlanar(imageA, null, true, GrayF32.class);
    Planar<GrayF32> colorB = ConvertBufferedImage.convertFromPlanar(imageB, null, true, GrayF32.class);
    // Where the output images are rendered into
    Planar<GrayF32> work = colorA.createSameShape();
    // Adjust the transform so that the whole image can appear inside of it
    Homography2D_F64 fromAToWork = new Homography2D_F64(scale, 0, colorA.width / 4, 0, scale, colorA.height / 4, 0, 0, 1);
    Homography2D_F64 fromWorkToA = fromAToWork.invert(null);
    // Used to render the results onto an image
    PixelTransformHomography_F32 model = new PixelTransformHomography_F32();
    InterpolatePixelS<GrayF32> interp = FactoryInterpolation.bilinearPixelS(GrayF32.class, BorderType.ZERO);
    ImageDistort<Planar<GrayF32>, Planar<GrayF32>> distort = DistortSupport.createDistortPL(GrayF32.class, model, interp, false);
    distort.setRenderAll(false);
    // Render first image
    model.set(fromWorkToA);
    distort.apply(colorA, work);
    // Render second image
    Homography2D_F64 fromWorkToB = fromWorkToA.concat(fromAtoB, null);
    model.set(fromWorkToB);
    distort.apply(colorB, work);
    // Convert the rendered image into a BufferedImage
    BufferedImage output = new BufferedImage(work.width, work.height, imageA.getType());
    ConvertBufferedImage.convertTo(work, output, true);
    Graphics2D g2 = output.createGraphics();
    // draw lines around the distorted image to make it easier to see
    Homography2D_F64 fromBtoWork = fromWorkToB.invert(null);
    Point2D_I32[] corners = new Point2D_I32[4];
    corners[0] = renderPoint(0, 0, fromBtoWork);
    corners[1] = renderPoint(colorB.width, 0, fromBtoWork);
    corners[2] = renderPoint(colorB.width, colorB.height, fromBtoWork);
    corners[3] = renderPoint(0, colorB.height, fromBtoWork);
    g2.setColor(Color.ORANGE);
    g2.setStroke(new BasicStroke(4));
    g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
    g2.drawLine(corners[0].x, corners[0].y, corners[1].x, corners[1].y);
    g2.drawLine(corners[1].x, corners[1].y, corners[2].x, corners[2].y);
    g2.drawLine(corners[2].x, corners[2].y, corners[3].x, corners[3].y);
    g2.drawLine(corners[3].x, corners[3].y, corners[0].x, corners[0].y);
    ShowImages.showWindow(output, "Stitched Images", true);
}
Also used : PixelTransformHomography_F32(boofcv.alg.distort.PixelTransformHomography_F32) Homography2D_F64(georegression.struct.homography.Homography2D_F64) BufferedImage(java.awt.image.BufferedImage) ConvertBufferedImage(boofcv.io.image.ConvertBufferedImage) GrayF32(boofcv.struct.image.GrayF32) Planar(boofcv.struct.image.Planar) Point2D_I32(georegression.struct.point.Point2D_I32)

Example 32 with Planar

use of boofcv.struct.image.Planar in project BoofCV by lessthanoptimal.

the class ExampleOverheadView method main.

public static void main(String[] args) {
    BufferedImage input = UtilImageIO.loadImage(UtilIO.pathExample("road/left01.png"));
    Planar<GrayU8> imageRGB = ConvertBufferedImage.convertFromPlanar(input, null, true, GrayU8.class);
    StereoParameters stereoParam = CalibrationIO.load(UtilIO.pathExample("road/stereo01.yaml"));
    Se3_F64 groundToLeft = CalibrationIO.load(UtilIO.pathExample("road/ground_to_left_01.yaml"));
    CreateSyntheticOverheadView<Planar<GrayU8>> generateOverhead = new CreateSyntheticOverheadViewPL<>(InterpolationType.BILINEAR, 3, GrayU8.class);
    // size of cells in the overhead image in world units
    double cellSize = 0.05;
    // You can use this to automatically select reasonable values for the overhead image
    SelectOverheadParameters selectMapSize = new SelectOverheadParameters(cellSize, 20, 0.5);
    selectMapSize.process(stereoParam.left, groundToLeft);
    int overheadWidth = selectMapSize.getOverheadWidth();
    int overheadHeight = selectMapSize.getOverheadHeight();
    Planar<GrayU8> overheadRGB = new Planar<>(GrayU8.class, overheadWidth, overheadHeight, 3);
    generateOverhead.configure(stereoParam.left, groundToLeft, selectMapSize.getCenterX(), selectMapSize.getCenterY(), cellSize, overheadRGB.width, overheadRGB.height);
    generateOverhead.process(imageRGB, overheadRGB);
    // note that the left/right values are swapped in the overhead image.  This is an artifact of the plane's
    // 2D coordinate system having +y pointing up, while images have +y pointing down.
    BufferedImage output = ConvertBufferedImage.convertTo(overheadRGB, null, true);
    ShowImages.showWindow(input, "Input Image", true);
    ShowImages.showWindow(output, "Overhead Image", true);
}
Also used : CreateSyntheticOverheadViewPL(boofcv.alg.sfm.overhead.CreateSyntheticOverheadViewPL) SelectOverheadParameters(boofcv.alg.sfm.overhead.SelectOverheadParameters) Planar(boofcv.struct.image.Planar) GrayU8(boofcv.struct.image.GrayU8) StereoParameters(boofcv.struct.calib.StereoParameters) BufferedImage(java.awt.image.BufferedImage) ConvertBufferedImage(boofcv.io.image.ConvertBufferedImage) Se3_F64(georegression.struct.se.Se3_F64)

Example 33 with Planar

use of boofcv.struct.image.Planar in project BoofCV by lessthanoptimal.

the class ExampleVideoMosaic method main.

public static void main(String[] args) {
    // Configure the feature detector
    ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
    confDetector.threshold = 1;
    confDetector.maxFeatures = 300;
    confDetector.radius = 3;
    // Use a KLT tracker
    PointTracker<GrayF32> tracker = FactoryPointTracker.klt(new int[] { 1, 2, 4, 8 }, confDetector, 3, GrayF32.class, GrayF32.class);
    // This estimates the 2D image motion
    // An Affine2D_F64 model also works quite well.
    ImageMotion2D<GrayF32, Homography2D_F64> motion2D = FactoryMotion2D.createMotion2D(220, 3, 2, 30, 0.6, 0.5, false, tracker, new Homography2D_F64());
    // wrap it so it output color images while estimating motion from gray
    ImageMotion2D<Planar<GrayF32>, Homography2D_F64> motion2DColor = new PlToGrayMotion2D<>(motion2D, GrayF32.class);
    // This fuses the images together
    StitchingFromMotion2D<Planar<GrayF32>, Homography2D_F64> stitch = FactoryMotion2D.createVideoStitch(0.5, motion2DColor, ImageType.pl(3, GrayF32.class));
    // Load an image sequence
    MediaManager media = DefaultMediaManager.INSTANCE;
    String fileName = UtilIO.pathExample("mosaic/airplane01.mjpeg");
    SimpleImageSequence<Planar<GrayF32>> video = media.openVideo(fileName, ImageType.pl(3, GrayF32.class));
    Planar<GrayF32> frame = video.next();
    // shrink the input image and center it
    Homography2D_F64 shrink = new Homography2D_F64(0.5, 0, frame.width / 4, 0, 0.5, frame.height / 4, 0, 0, 1);
    shrink = shrink.invert(null);
    // The mosaic will be larger in terms of pixels but the image will be scaled down.
    // To change this into stabilization just make it the same size as the input with no shrink.
    stitch.configure(frame.width, frame.height, shrink);
    // process the first frame
    stitch.process(frame);
    // Create the GUI for displaying the results + input image
    ImageGridPanel gui = new ImageGridPanel(1, 2);
    gui.setImage(0, 0, new BufferedImage(frame.width, frame.height, BufferedImage.TYPE_INT_RGB));
    gui.setImage(0, 1, new BufferedImage(frame.width, frame.height, BufferedImage.TYPE_INT_RGB));
    gui.setPreferredSize(new Dimension(3 * frame.width, frame.height * 2));
    ShowImages.showWindow(gui, "Example Mosaic", true);
    boolean enlarged = false;
    // process the video sequence one frame at a time
    while (video.hasNext()) {
        frame = video.next();
        if (!stitch.process(frame))
            throw new RuntimeException("You should handle failures");
        // if the current image is close to the image border recenter the mosaic
        StitchingFromMotion2D.Corners corners = stitch.getImageCorners(frame.width, frame.height, null);
        if (nearBorder(corners.p0, stitch) || nearBorder(corners.p1, stitch) || nearBorder(corners.p2, stitch) || nearBorder(corners.p3, stitch)) {
            stitch.setOriginToCurrent();
            // only enlarge the image once
            if (!enlarged) {
                enlarged = true;
                // double the image size and shift it over to keep it centered
                int widthOld = stitch.getStitchedImage().width;
                int heightOld = stitch.getStitchedImage().height;
                int widthNew = widthOld * 2;
                int heightNew = heightOld * 2;
                int tranX = (widthNew - widthOld) / 2;
                int tranY = (heightNew - heightOld) / 2;
                Homography2D_F64 newToOldStitch = new Homography2D_F64(1, 0, -tranX, 0, 1, -tranY, 0, 0, 1);
                stitch.resizeStitchImage(widthNew, heightNew, newToOldStitch);
                gui.setImage(0, 1, new BufferedImage(widthNew, heightNew, BufferedImage.TYPE_INT_RGB));
            }
            corners = stitch.getImageCorners(frame.width, frame.height, null);
        }
        // display the mosaic
        ConvertBufferedImage.convertTo(frame, gui.getImage(0, 0), true);
        ConvertBufferedImage.convertTo(stitch.getStitchedImage(), gui.getImage(0, 1), true);
        // draw a red quadrilateral around the current frame in the mosaic
        Graphics2D g2 = gui.getImage(0, 1).createGraphics();
        g2.setColor(Color.RED);
        g2.drawLine((int) corners.p0.x, (int) corners.p0.y, (int) corners.p1.x, (int) corners.p1.y);
        g2.drawLine((int) corners.p1.x, (int) corners.p1.y, (int) corners.p2.x, (int) corners.p2.y);
        g2.drawLine((int) corners.p2.x, (int) corners.p2.y, (int) corners.p3.x, (int) corners.p3.y);
        g2.drawLine((int) corners.p3.x, (int) corners.p3.y, (int) corners.p0.x, (int) corners.p0.y);
        gui.repaint();
        // throttle the speed just in case it's on a fast computer
        BoofMiscOps.pause(50);
    }
}
Also used : StitchingFromMotion2D(boofcv.alg.sfm.d2.StitchingFromMotion2D) PlToGrayMotion2D(boofcv.abst.sfm.d2.PlToGrayMotion2D) ConfigGeneralDetector(boofcv.abst.feature.detect.interest.ConfigGeneralDetector) Homography2D_F64(georegression.struct.homography.Homography2D_F64) BufferedImage(java.awt.image.BufferedImage) ConvertBufferedImage(boofcv.io.image.ConvertBufferedImage) GrayF32(boofcv.struct.image.GrayF32) MediaManager(boofcv.io.MediaManager) DefaultMediaManager(boofcv.io.wrapper.DefaultMediaManager) Planar(boofcv.struct.image.Planar) ImageGridPanel(boofcv.gui.image.ImageGridPanel)

Example 34 with Planar

use of boofcv.struct.image.Planar in project BoofCV by lessthanoptimal.

the class ExampleVideoStabilization method main.

public static void main(String[] args) {
    // Configure the feature detector
    ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
    confDetector.threshold = 10;
    confDetector.maxFeatures = 300;
    confDetector.radius = 2;
    // Use a KLT tracker
    PointTracker<GrayF32> tracker = FactoryPointTracker.klt(new int[] { 1, 2, 4, 8 }, confDetector, 3, GrayF32.class, GrayF32.class);
    // This estimates the 2D image motion
    // An Affine2D_F64 model also works quite well.
    ImageMotion2D<GrayF32, Homography2D_F64> motion2D = FactoryMotion2D.createMotion2D(200, 3, 2, 30, 0.6, 0.5, false, tracker, new Homography2D_F64());
    // wrap it so it output color images while estimating motion from gray
    ImageMotion2D<Planar<GrayF32>, Homography2D_F64> motion2DColor = new PlToGrayMotion2D<>(motion2D, GrayF32.class);
    // This fuses the images together
    StitchingFromMotion2D<Planar<GrayF32>, Homography2D_F64> stabilize = FactoryMotion2D.createVideoStitch(0.5, motion2DColor, ImageType.pl(3, GrayF32.class));
    // Load an image sequence
    MediaManager media = DefaultMediaManager.INSTANCE;
    String fileName = UtilIO.pathExample("shake.mjpeg");
    SimpleImageSequence<Planar<GrayF32>> video = media.openVideo(fileName, ImageType.pl(3, GrayF32.class));
    Planar<GrayF32> frame = video.next();
    // The output image size is the same as the input image size
    stabilize.configure(frame.width, frame.height, null);
    // process the first frame
    stabilize.process(frame);
    // Create the GUI for displaying the results + input image
    ImageGridPanel gui = new ImageGridPanel(1, 2);
    gui.setImage(0, 0, new BufferedImage(frame.width, frame.height, BufferedImage.TYPE_INT_RGB));
    gui.setImage(0, 1, new BufferedImage(frame.width, frame.height, BufferedImage.TYPE_INT_RGB));
    gui.autoSetPreferredSize();
    ShowImages.showWindow(gui, "Example Stabilization", true);
    // process the video sequence one frame at a time
    while (video.hasNext()) {
        if (!stabilize.process(video.next()))
            throw new RuntimeException("Don't forget to handle failures!");
        // display the stabilized image
        ConvertBufferedImage.convertTo(frame, gui.getImage(0, 0), true);
        ConvertBufferedImage.convertTo(stabilize.getStitchedImage(), gui.getImage(0, 1), true);
        gui.repaint();
        // throttle the speed just in case it's on a fast computer
        BoofMiscOps.pause(50);
    }
}
Also used : PlToGrayMotion2D(boofcv.abst.sfm.d2.PlToGrayMotion2D) ConfigGeneralDetector(boofcv.abst.feature.detect.interest.ConfigGeneralDetector) Homography2D_F64(georegression.struct.homography.Homography2D_F64) BufferedImage(java.awt.image.BufferedImage) ConvertBufferedImage(boofcv.io.image.ConvertBufferedImage) GrayF32(boofcv.struct.image.GrayF32) MediaManager(boofcv.io.MediaManager) DefaultMediaManager(boofcv.io.wrapper.DefaultMediaManager) Planar(boofcv.struct.image.Planar) ImageGridPanel(boofcv.gui.image.ImageGridPanel)

Example 35 with Planar

use of boofcv.struct.image.Planar in project BoofCV by lessthanoptimal.

the class ExampleFisheyeToEquirectangular method main.

public static void main(String[] args) {
    // Path to image data and calibration data
    String fisheyePath = UtilIO.pathExample("fisheye/theta");
    // load the fisheye camera parameters
    CameraUniversalOmni model0 = CalibrationIO.load(new File(fisheyePath, "front.yaml"));
    CameraUniversalOmni model1 = CalibrationIO.load(new File(fisheyePath, "back.yaml"));
    LensDistortionWideFOV distort0 = new LensDistortionUniversalOmni(model0);
    LensDistortionWideFOV distort1 = new LensDistortionUniversalOmni(model1);
    ImageType<Planar<GrayF32>> imageType = ImageType.pl(3, GrayF32.class);
    InterpolatePixel<Planar<GrayF32>> interp = FactoryInterpolation.createPixel(0, 255, InterpolationType.BILINEAR, BorderType.ZERO, imageType);
    ImageDistort<Planar<GrayF32>, Planar<GrayF32>> distort = FactoryDistort.distort(false, interp, imageType);
    // This will create an equirectangular image with 800 x 400 pixels
    MultiCameraToEquirectangular<Planar<GrayF32>> alg = new MultiCameraToEquirectangular<>(distort, 800, 400, imageType);
    // this is an important parameter and is used to filter out falsely mirrored pixels
    alg.setMaskToleranceAngle(UtilAngle.radian(0.1f));
    // camera has a known FOV of 185 degrees
    GrayU8 mask0 = createMask(model0, distort0, UtilAngle.radian(182));
    // the edges are likely to be noisy,
    GrayU8 mask1 = createMask(model1, distort1, UtilAngle.radian(182));
    // so crop it a bit..
    // Rotate camera axis so that +x is forward and not +z and make it visually pleasing
    FMatrixRMaj adjR = ConvertRotation3D_F32.eulerToMatrix(EulerType.XYZ, GrlConstants.F_PI / 2, 0, 0, null);
    // Rotation from the front camera to the back facing camera.
    // This is only an approximation.  Should be determined through calibration.
    FMatrixRMaj f2b = ConvertRotation3D_F32.eulerToMatrix(EulerType.ZYX, GrlConstants.F_PI, 0, 0, null);
    Se3_F32 frontToFront = new Se3_F32();
    frontToFront.setRotation(adjR);
    Se3_F32 frontToBack = new Se3_F32();
    CommonOps_FDRM.mult(f2b, adjR, frontToBack.R);
    // add the camera and specify which pixels are valid.  These functions precompute the entire transform
    // and can be relatively slow, but generating the equirectangular image should be much faster
    alg.addCamera(frontToBack, distort0, mask0);
    alg.addCamera(frontToFront, distort1, mask1);
    // Load fisheye RGB image
    BufferedImage buffered0 = UtilImageIO.loadImage(fisheyePath, "front_table.jpg");
    Planar<GrayF32> fisheye0 = ConvertBufferedImage.convertFrom(buffered0, true, ImageType.pl(3, GrayF32.class));
    BufferedImage buffered1 = UtilImageIO.loadImage(fisheyePath, "back_table.jpg");
    Planar<GrayF32> fisheye1 = ConvertBufferedImage.convertFrom(buffered1, true, ImageType.pl(3, GrayF32.class));
    List<Planar<GrayF32>> images = new ArrayList<>();
    images.add(fisheye0);
    images.add(fisheye1);
    alg.render(images);
    BufferedImage equiOut = ConvertBufferedImage.convertTo(alg.getRenderedImage(), null, true);
    ShowImages.showWindow(equiOut, "Dual Fisheye to Equirectangular", true);
}
Also used : FMatrixRMaj(org.ejml.data.FMatrixRMaj) LensDistortionWideFOV(boofcv.alg.distort.LensDistortionWideFOV) ArrayList(java.util.ArrayList) BufferedImage(java.awt.image.BufferedImage) ConvertBufferedImage(boofcv.io.image.ConvertBufferedImage) GrayF32(boofcv.struct.image.GrayF32) MultiCameraToEquirectangular(boofcv.alg.distort.spherical.MultiCameraToEquirectangular) CameraUniversalOmni(boofcv.struct.calib.CameraUniversalOmni) Planar(boofcv.struct.image.Planar) GrayU8(boofcv.struct.image.GrayU8) LensDistortionUniversalOmni(boofcv.alg.distort.universal.LensDistortionUniversalOmni) File(java.io.File) Se3_F32(georegression.struct.se.Se3_F32)

Aggregations

Planar (boofcv.struct.image.Planar)73 Test (org.junit.Test)39 GrayF32 (boofcv.struct.image.GrayF32)34 GrayU8 (boofcv.struct.image.GrayU8)28 BufferedImage (java.awt.image.BufferedImage)21 ConvertBufferedImage (boofcv.io.image.ConvertBufferedImage)20 RectangleLength2D_I32 (georegression.struct.shapes.RectangleLength2D_I32)12 File (java.io.File)9 Bitmap (android.graphics.Bitmap)4 ListDisplayPanel (boofcv.gui.ListDisplayPanel)4 ImageGray (boofcv.struct.image.ImageGray)4 ConfigGeneralDetector (boofcv.abst.feature.detect.interest.ConfigGeneralDetector)3 LensDistortionUniversalOmni (boofcv.alg.distort.universal.LensDistortionUniversalOmni)3 MediaManager (boofcv.io.MediaManager)3 DefaultMediaManager (boofcv.io.wrapper.DefaultMediaManager)3 CameraPinhole (boofcv.struct.calib.CameraPinhole)3 GrayU16 (boofcv.struct.image.GrayU16)3 Homography2D_F64 (georegression.struct.homography.Homography2D_F64)3 Se3_F64 (georegression.struct.se.Se3_F64)3 DescribeRegionPoint (boofcv.abst.feature.describe.DescribeRegionPoint)2