use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class ShowRectifyCalibratedApp method addRectified.
private void addRectified(final String name, final DMatrixRMaj rect1, final DMatrixRMaj rect2) {
// TODO simplify code some how
FMatrixRMaj rect1_F32 = new FMatrixRMaj(3, 3);
FMatrixRMaj rect2_F32 = new FMatrixRMaj(3, 3);
ConvertMatrixData.convert(rect1, rect1_F32);
ConvertMatrixData.convert(rect2, rect2_F32);
// Will rectify the image
ImageType<GrayF32> imageType = ImageType.single(GrayF32.class);
ImageDistort<GrayF32, GrayF32> imageDistortLeft = RectifyImageOps.rectifyImage(param.getLeft(), rect1_F32, BorderType.ZERO, imageType);
ImageDistort<GrayF32, GrayF32> imageDistortRight = RectifyImageOps.rectifyImage(param.getRight(), rect2_F32, BorderType.ZERO, imageType);
// Fill the image with all black
GImageMiscOps.fill(rectLeft, 0);
GImageMiscOps.fill(rectRight, 0);
// Render the rectified image
DistortImageOps.distortPL(distLeft, rectLeft, imageDistortLeft);
DistortImageOps.distortPL(distRight, rectRight, imageDistortRight);
// convert for output
final BufferedImage outLeft = ConvertBufferedImage.convertTo(rectLeft, null, true);
final BufferedImage outRight = ConvertBufferedImage.convertTo(rectRight, null, true);
// Add this rectified image
SwingUtilities.invokeLater(new Runnable() {
public void run() {
gui.addItem(new RectifiedPairPanel(true, outLeft, outRight), name);
}
});
}
use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class CompareConvertedDescriptionsApp method visualize.
public static <TD extends TupleDesc> void visualize(String title, BufferedImage image1, BufferedImage image2, InterestPointDetector<GrayF32> detector, DescribeRegionPoint<GrayF32, TD> describe, ScoreAssociation<TD> scorer) {
AssociateDescription<TD> assoc = FactoryAssociation.greedy(scorer, Double.MAX_VALUE, false);
List<Point2D_F64> locationSrc = new ArrayList<>();
List<Point2D_F64> locationDst = new ArrayList<>();
GrayF32 input1 = ConvertBufferedImage.convertFrom(image1, (GrayF32) null);
GrayF32 input2 = ConvertBufferedImage.convertFrom(image2, (GrayF32) null);
FastQueue<TD> listSrc = describeImage(input1, detector, describe, locationSrc);
FastQueue<TD> listDst = describeImage(input2, detector, describe, locationDst);
assoc.setSource(listSrc);
assoc.setDestination(listDst);
assoc.associate();
FastQueue<AssociatedIndex> matches = assoc.getMatches();
AssociationPanel panel = new AssociationPanel(20);
panel.setImages(image1, image2);
panel.setAssociation(locationSrc, locationDst, matches);
ShowImages.showWindow(panel, title);
}
use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class CompareConvertedDescriptionsApp method main.
public static void main(String[] args) {
String file1 = UtilIO.pathExample("stitch/kayak_01.jpg");
String file2 = UtilIO.pathExample("stitch/kayak_02.jpg");
InterestPointDetector<GrayF32> detector = FactoryInterestPoint.fastHessian(new ConfigFastHessian(1, 10, -1, 2, 9, 4, 4));
DescribeRegionPoint<GrayF32, TupleDesc_F64> describeA = (DescribeRegionPoint) FactoryDescribeRegionPoint.surfStable(null, GrayF32.class);
ConvertTupleDesc<TupleDesc_F64, TupleDesc_S8> converter = FactoryConvertTupleDesc.real_F64_S8(describeA.createDescription().size());
DescribeRegionPoint<GrayF32, TupleDesc_S8> describeB = new DescribeRegionPointConvert<>(describeA, converter);
ScoreAssociation<TupleDesc_F64> scoreA = FactoryAssociation.scoreSad(TupleDesc_F64.class);
ScoreAssociation<TupleDesc_S8> scoreB = FactoryAssociation.scoreSad(TupleDesc_S8.class);
BufferedImage image1 = UtilImageIO.loadImage(file1);
BufferedImage image2 = UtilImageIO.loadImage(file2);
visualize("Original", image1, image2, detector, describeA, scoreA);
visualize("Modified", image1, image2, detector, describeB, scoreB);
System.out.println("Done");
}
use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class DetectLineApp method main.
public static void main(String[] args) {
Class imageType = GrayF32.class;
Class derivType = GrayF32.class;
DetectLineApp app = new DetectLineApp(imageType, derivType);
java.util.List<PathLabel> inputs = new ArrayList<>();
inputs.add(new PathLabel("Objects", UtilIO.pathExample("simple_objects.jpg")));
inputs.add(new PathLabel("Indoors", UtilIO.pathExample("lines_indoors.jpg")));
app.setInputList(inputs);
// wait for it to process one image so that the size isn't all screwed up
while (!app.getHasProcessedImage()) {
Thread.yield();
}
ShowImages.showWindow(app, "Line Detection", true);
}
use of boofcv.struct.image.GrayF32 in project BoofCV by lessthanoptimal.
the class MultiCameraToEquirectangular method addCamera.
/**
* Adds a camera and attempts to compute the mask from the provided distortion model. if a pixel is rendered
* outside the bounds in the input image then it is masked out. If the forwards/backwards transform is too
* different then it is masked out.
*
* @param cameraToCommon Rigid body transform from this camera to the common frame the equirectangular image
* is in
* @param factory Distortion model
* @param width Input image width
* @param height Input image height
*/
public void addCamera(Se3_F32 cameraToCommon, LensDistortionWideFOV factory, int width, int height) {
Point2Transform3_F32 p2s = factory.undistortPtoS_F32();
Point3Transform2_F32 s2p = factory.distortStoP_F32();
EquiToCamera equiToCamera = new EquiToCamera(cameraToCommon.getR(), s2p);
GrayF32 equiMask = new GrayF32(equiWidth, equHeight);
PixelTransform2_F32 transformEquiToCam = new PixelTransformCached_F32(equiWidth, equHeight, new PointToPixelTransform_F32(equiToCamera));
Point3D_F32 p3b = new Point3D_F32();
Point2D_F32 p2 = new Point2D_F32();
for (int row = 0; row < equHeight; row++) {
for (int col = 0; col < equiWidth; col++) {
equiToCamera.compute(col, row, p2);
int camX = (int) (p2.x + 0.5f);
int camY = (int) (p2.y + 0.5f);
if (Double.isNaN(p2.x) || Double.isNaN(p2.y) || camX < 0 || camY < 0 || camX >= width || camY >= height)
continue;
p2s.compute(p2.x, p2.y, p3b);
if (Double.isNaN(p3b.x) || Double.isNaN(p3b.y) || Double.isNaN(p3b.z))
continue;
double angle = UtilVector3D_F32.acute(equiToCamera.unitCam, p3b);
if (angle < maskToleranceAngle) {
equiMask.set(col, row, 1);
}
}
}
cameras.add(new Camera(equiMask, transformEquiToCam));
}
Aggregations