use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class TestInterleavedImageOps method split2.
@Test
public void split2() {
InterleavedF32 interleaved = new InterleavedF32(2, 4, 2);
for (int i = 0; i < interleaved.data.length; i++) {
interleaved.data[i] = i + 1;
}
GrayF32 a = new GrayF32(2, 4);
GrayF32 b = new GrayF32(2, 4);
InterleavedImageOps.split2(interleaved, a, b);
for (int y = 0; y < interleaved.height; y++) {
for (int x = 0; x < interleaved.width; x++) {
assertEquals(interleaved.getBand(x, y, 0), a.get(x, y), 1e-8);
assertEquals(interleaved.getBand(x, y, 1), b.get(x, y), 1e-8);
}
}
}
use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class ExampleRectifyCalibratedStereo method main.
public static void main(String[] args) {
String dir = UtilIO.pathExample("calibration/stereo/Bumblebee2_Chess/");
StereoParameters param = CalibrationIO.load(new File(dir, "stereo.yaml"));
// load images
BufferedImage origLeft = UtilImageIO.loadImage(dir, "left05.jpg");
BufferedImage origRight = UtilImageIO.loadImage(dir, "right05.jpg");
// distorted images
Planar<GrayF32> distLeft = ConvertBufferedImage.convertFromPlanar(origLeft, null, true, GrayF32.class);
Planar<GrayF32> distRight = ConvertBufferedImage.convertFromPlanar(origRight, null, true, GrayF32.class);
// storage for undistorted + rectified images
Planar<GrayF32> rectLeft = distLeft.createSameShape();
Planar<GrayF32> rectRight = distRight.createSameShape();
// Compute rectification
RectifyCalibrated rectifyAlg = RectifyImageOps.createCalibrated();
Se3_F64 leftToRight = param.getRightToLeft().invert(null);
// original camera calibration matrices
DMatrixRMaj K1 = PerspectiveOps.calibrationMatrix(param.getLeft(), (DMatrixRMaj) null);
DMatrixRMaj K2 = PerspectiveOps.calibrationMatrix(param.getRight(), (DMatrixRMaj) null);
rectifyAlg.process(K1, new Se3_F64(), K2, leftToRight);
// rectification matrix for each image
DMatrixRMaj rect1 = rectifyAlg.getRect1();
DMatrixRMaj rect2 = rectifyAlg.getRect2();
// New calibration matrix,
// Both cameras have the same one after rectification.
DMatrixRMaj rectK = rectifyAlg.getCalibrationMatrix();
// Adjust the rectification to make the view area more useful
RectifyImageOps.fullViewLeft(param.left, rect1, rect2, rectK);
// RectifyImageOps.allInsideLeft(param.left, leftHanded, rect1, rect2, rectK);
// undistorted and rectify images
// TODO simplify code some how
FMatrixRMaj rect1_F32 = new FMatrixRMaj(3, 3);
FMatrixRMaj rect2_F32 = new FMatrixRMaj(3, 3);
ConvertMatrixData.convert(rect1, rect1_F32);
ConvertMatrixData.convert(rect2, rect2_F32);
ImageDistort rectifyImageLeft = RectifyImageOps.rectifyImage(param.getLeft(), rect1_F32, BorderType.SKIP, distLeft.getImageType());
ImageDistort rectifyImageRight = RectifyImageOps.rectifyImage(param.getRight(), rect2_F32, BorderType.SKIP, distRight.getImageType());
rectifyImageLeft.apply(distLeft, rectLeft);
rectifyImageRight.apply(distRight, rectRight);
// convert for output
BufferedImage outLeft = ConvertBufferedImage.convertTo(rectLeft, null, true);
BufferedImage outRight = ConvertBufferedImage.convertTo(rectRight, null, true);
// show results and draw a horizontal line where the user clicks to see rectification easier
ListDisplayPanel panel = new ListDisplayPanel();
panel.addItem(new RectifiedPairPanel(true, origLeft, origRight), "Original");
panel.addItem(new RectifiedPairPanel(true, outLeft, outRight), "Rectified");
ShowImages.showWindow(panel, "Stereo Rectification Calibrated", true);
}
use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class ExampleRectifyUncalibratedStereo method rectify.
/**
* Rectifies the image using the provided fundamental matrix. Both the fundamental matrix
* and set of inliers need to be accurate. Small errors will cause large distortions.
*
* @param F Fundamental matrix
* @param inliers Set of associated pairs between the two images.
* @param origLeft Original input image. Used for output purposes.
* @param origRight Original input image. Used for output purposes.
*/
public static void rectify(DMatrixRMaj F, List<AssociatedPair> inliers, BufferedImage origLeft, BufferedImage origRight) {
// Unrectified images
Planar<GrayF32> unrectLeft = ConvertBufferedImage.convertFromPlanar(origLeft, null, true, GrayF32.class);
Planar<GrayF32> unrectRight = ConvertBufferedImage.convertFromPlanar(origRight, null, true, GrayF32.class);
// storage for rectified images
Planar<GrayF32> rectLeft = unrectLeft.createSameShape();
Planar<GrayF32> rectRight = unrectRight.createSameShape();
// Compute rectification
RectifyFundamental rectifyAlg = RectifyImageOps.createUncalibrated();
rectifyAlg.process(F, inliers, origLeft.getWidth(), origLeft.getHeight());
// rectification matrix for each image
DMatrixRMaj rect1 = rectifyAlg.getRect1();
DMatrixRMaj rect2 = rectifyAlg.getRect2();
// Adjust the rectification to make the view area more useful
RectifyImageOps.fullViewLeft(origLeft.getWidth(), origLeft.getHeight(), rect1, rect2);
// RectifyImageOps.allInsideLeft(origLeft.getWidth(),origLeft.getHeight(), rect1, rect2 );
// undistorted and rectify images
// TODO simplify code some how
FMatrixRMaj rect1_F32 = new FMatrixRMaj(3, 3);
FMatrixRMaj rect2_F32 = new FMatrixRMaj(3, 3);
ConvertMatrixData.convert(rect1, rect1_F32);
ConvertMatrixData.convert(rect2, rect2_F32);
ImageDistort<GrayF32, GrayF32> imageDistortLeft = RectifyImageOps.rectifyImage(rect1_F32, BorderType.SKIP, GrayF32.class);
ImageDistort<GrayF32, GrayF32> imageDistortRight = RectifyImageOps.rectifyImage(rect2_F32, BorderType.SKIP, GrayF32.class);
DistortImageOps.distortPL(unrectLeft, rectLeft, imageDistortLeft);
DistortImageOps.distortPL(unrectRight, rectRight, imageDistortRight);
// convert for output
BufferedImage outLeft = ConvertBufferedImage.convertTo(rectLeft, null, true);
BufferedImage outRight = ConvertBufferedImage.convertTo(rectRight, null, true);
// show results and draw a horizontal line where the user clicks to see rectification easier
// Don't worry if the image appears upside down
ShowImages.showWindow(new RectifiedPairPanel(true, outLeft, outRight), "Rectified");
}
use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class ExampleStereoDisparity3D method main.
public static void main(String[] args) {
// ------------- Compute Stereo Correspondence
// Load camera images and stereo camera parameters
String calibDir = UtilIO.pathExample("calibration/stereo/Bumblebee2_Chess/");
String imageDir = UtilIO.pathExample("stereo/");
StereoParameters param = CalibrationIO.load(new File(calibDir, "stereo.yaml"));
// load and convert images into a BoofCV format
BufferedImage origLeft = UtilImageIO.loadImage(imageDir, "chair01_left.jpg");
BufferedImage origRight = UtilImageIO.loadImage(imageDir, "chair01_right.jpg");
GrayU8 distLeft = ConvertBufferedImage.convertFrom(origLeft, (GrayU8) null);
GrayU8 distRight = ConvertBufferedImage.convertFrom(origRight, (GrayU8) null);
// re-scale input images
GrayU8 scaledLeft = new GrayU8((int) (distLeft.width * scale), (int) (distLeft.height * scale));
GrayU8 scaledRight = new GrayU8((int) (distRight.width * scale), (int) (distRight.height * scale));
new FDistort(distLeft, scaledLeft).scaleExt().apply();
new FDistort(distRight, scaledRight).scaleExt().apply();
// Don't forget to adjust camera parameters for the change in scale!
PerspectiveOps.scaleIntrinsic(param.left, scale);
PerspectiveOps.scaleIntrinsic(param.right, scale);
// rectify images and compute disparity
GrayU8 rectLeft = new GrayU8(scaledLeft.width, scaledLeft.height);
GrayU8 rectRight = new GrayU8(scaledRight.width, scaledRight.height);
RectifyCalibrated rectAlg = ExampleStereoDisparity.rectify(scaledLeft, scaledRight, param, rectLeft, rectRight);
// GrayU8 disparity = ExampleStereoDisparity.denseDisparity(rectLeft, rectRight, 3,minDisparity, maxDisparity);
GrayF32 disparity = ExampleStereoDisparity.denseDisparitySubpixel(rectLeft, rectRight, 3, minDisparity, maxDisparity);
// ------------- Convert disparity image into a 3D point cloud
// The point cloud will be in the left cameras reference frame
DMatrixRMaj rectK = rectAlg.getCalibrationMatrix();
DMatrixRMaj rectR = rectAlg.getRectifiedRotation();
// used to display the point cloud
PointCloudViewer viewer = new PointCloudViewer(rectK, 10);
viewer.setPreferredSize(new Dimension(rectLeft.width, rectLeft.height));
// extract intrinsic parameters from rectified camera
double baseline = param.getBaseline();
double fx = rectK.get(0, 0);
double fy = rectK.get(1, 1);
double cx = rectK.get(0, 2);
double cy = rectK.get(1, 2);
// Iterate through each pixel in disparity image and compute its 3D coordinate
Point3D_F64 pointRect = new Point3D_F64();
Point3D_F64 pointLeft = new Point3D_F64();
for (int y = 0; y < disparity.height; y++) {
for (int x = 0; x < disparity.width; x++) {
double d = disparity.unsafe_get(x, y) + minDisparity;
// skip over pixels were no correspondence was found
if (d >= rangeDisparity)
continue;
// Coordinate in rectified camera frame
pointRect.z = baseline * fx / d;
pointRect.x = pointRect.z * (x - cx) / fx;
pointRect.y = pointRect.z * (y - cy) / fy;
// rotate into the original left camera frame
GeometryMath_F64.multTran(rectR, pointRect, pointLeft);
// add pixel to the view for display purposes and sets its gray scale value
int v = rectLeft.unsafe_get(x, y);
viewer.addPoint(pointLeft.x, pointLeft.y, pointLeft.z, v << 16 | v << 8 | v);
}
}
// display the results. Click and drag to change point cloud camera
BufferedImage visualized = VisualizeImageData.disparity(disparity, null, minDisparity, maxDisparity, 0);
ShowImages.showWindow(visualized, "Disparity");
ShowImages.showWindow(viewer, "Point Cloud");
}
use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class ExampleBackgroundRemovalMoving method main.
public static void main(String[] args) {
// Example with a moving camera. Highlights why motion estimation is sometimes required
String fileName = UtilIO.pathExample("tracking/chipmunk.mjpeg");
// Camera has a bit of jitter in it. Static kinda works but motion reduces false positives
// String fileName = UtilIO.pathExample("background/horse_jitter.mp4");
// Comment/Uncomment to switch input image type
ImageType imageType = ImageType.single(GrayF32.class);
// ImageType imageType = ImageType.il(3, InterleavedF32.class);
// ImageType imageType = ImageType.il(3, InterleavedU8.class);
// Configure the feature detector
ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
confDetector.threshold = 10;
confDetector.maxFeatures = 300;
confDetector.radius = 6;
// Use a KLT tracker
PointTracker tracker = FactoryPointTracker.klt(new int[] { 1, 2, 4, 8 }, confDetector, 3, GrayF32.class, null);
// This estimates the 2D image motion
ImageMotion2D<GrayF32, Homography2D_F64> motion2D = FactoryMotion2D.createMotion2D(500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64());
ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f);
// Configuration for Gaussian model. Note that the threshold changes depending on the number of image bands
// 12 = gray scale and 40 = color
ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12, 0.001f);
configGaussian.initialVariance = 64;
configGaussian.minimumDifference = 5;
// Note that GMM doesn't interpolate the input image. Making it harder to model object edges.
// However it runs faster because of this.
ConfigBackgroundGmm configGmm = new ConfigBackgroundGmm();
configGmm.initialVariance = 1600;
configGmm.significantWeight = 1e-1f;
// Comment/Uncomment to switch background mode
BackgroundModelMoving background = FactoryBackgroundModel.movingBasic(configBasic, new PointTransformHomography_F32(), imageType);
// FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(), imageType);
// FactoryBackgroundModel.movingGmm(configGmm,new PointTransformHomography_F32(), imageType);
background.setUnknownValue(1);
MediaManager media = DefaultMediaManager.INSTANCE;
SimpleImageSequence video = media.openVideo(fileName, background.getImageType());
// media.openCamera(null,640,480,background.getImageType());
// ====== Initialize Images
// storage for segmented image. Background = 0, Foreground = 1
GrayU8 segmented = new GrayU8(video.getNextWidth(), video.getNextHeight());
// Grey scale image that's the input for motion estimation
GrayF32 grey = new GrayF32(segmented.width, segmented.height);
// coordinate frames
Homography2D_F32 firstToCurrent32 = new Homography2D_F32();
Homography2D_F32 homeToWorld = new Homography2D_F32();
homeToWorld.a13 = grey.width / 2;
homeToWorld.a23 = grey.height / 2;
// Create a background image twice the size of the input image. Tell it that the home is in the center
background.initialize(grey.width * 2, grey.height * 2, homeToWorld);
BufferedImage visualized = new BufferedImage(segmented.width, segmented.height, BufferedImage.TYPE_INT_RGB);
ImageGridPanel gui = new ImageGridPanel(1, 2);
gui.setImages(visualized, visualized);
ShowImages.showWindow(gui, "Detections", true);
double fps = 0;
// smoothing factor for FPS
double alpha = 0.01;
while (video.hasNext()) {
ImageBase input = video.next();
long before = System.nanoTime();
GConvertImage.convert(input, grey);
if (!motion2D.process(grey)) {
throw new RuntimeException("Should handle this scenario");
}
Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent();
ConvertMatrixData.convert(firstToCurrent64, firstToCurrent32);
background.segment(firstToCurrent32, input, segmented);
background.updateBackground(firstToCurrent32, input);
long after = System.nanoTime();
fps = (1.0 - alpha) * fps + alpha * (1.0 / ((after - before) / 1e9));
VisualizeBinaryData.renderBinary(segmented, false, visualized);
gui.setImage(0, 0, (BufferedImage) video.getGuiImage());
gui.setImage(0, 1, visualized);
gui.repaint();
System.out.println("FPS = " + fps);
try {
Thread.sleep(5);
} catch (InterruptedException e) {
}
}
}
Aggregations