use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class TestLensDistortionOps_F64 method transformChangeModel_FULLVIEW_modified.
/**
* Checks to see if the returned modified model is correct
*/
@Test
void transformChangeModel_FULLVIEW_modified() {
// distorted pixel in original image
double pixelX = 12.5, pixelY = height - 3;
CameraPinholeBrown orig = new CameraPinholeBrown().fsetK(300, 320, 0, 150, 130, width, height).fsetRadial(0.1, 0.05);
CameraPinhole desired = new CameraPinhole(orig);
Point2Transform2_F64 distToNorm = LensDistortionFactory.narrow(orig).undistort_F64(true, false);
Point2D_F64 norm = new Point2D_F64();
distToNorm.compute(pixelX, pixelY, norm);
CameraPinholeBrown adjusted = new CameraPinholeBrown();
Point2Transform2_F64 distToAdj = LensDistortionOps_F64.transformChangeModel(AdjustmentType.FULL_VIEW, orig, desired, false, adjusted);
Point2D_F64 adjPixel = new Point2D_F64();
Point2D_F64 normFound = new Point2D_F64();
distToAdj.compute(pixelX, pixelY, adjPixel);
PerspectiveOps.convertPixelToNorm(adjusted, adjPixel, normFound);
// see if the normalized image coordinates are the same
assertEquals(norm.x, normFound.x, 1e-3);
assertEquals(norm.y, normFound.y, 1e-3);
}
use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class TestLensDistortionOps_F64 method transformChangeModel_FULLVIEW.
/**
* Checks the border of the returned transform. Makes sure that the entire original image is visible.
* Also makes sure that the requested inverse transform is actually the inverse.
*/
@Test
void transformChangeModel_FULLVIEW() {
CameraPinholeBrown param = new CameraPinholeBrown().fsetK(300, 320, 0, 150, 130, width, height).fsetRadial(0.1, 0.05);
CameraPinhole desired = new CameraPinhole(param);
Point2Transform2_F64 adjToDist = LensDistortionOps_F64.transformChangeModel(AdjustmentType.FULL_VIEW, param, desired, true, null);
Point2Transform2_F64 distToAdj = LensDistortionOps_F64.transformChangeModel(AdjustmentType.FULL_VIEW, param, desired, false, null);
checkBorderOutside(adjToDist, distToAdj);
param = new CameraPinholeBrown().fsetK(300, 320, 0, 150, 130, width, height).fsetRadial(-0.1, -0.05);
desired = new CameraPinhole(param);
adjToDist = LensDistortionOps_F64.transformChangeModel(AdjustmentType.FULL_VIEW, param, desired, true, null);
distToAdj = LensDistortionOps_F64.transformChangeModel(AdjustmentType.FULL_VIEW, param, desired, false, null);
checkBorderOutside(adjToDist, distToAdj);
}
use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class TestLensDistortionOps_F64 method transformChangeModel_NONE_modified.
@Test
void transformChangeModel_NONE_modified() {
// distorted pixel in original image
double pixelX = 12.5, pixelY = height - 3;
CameraPinholeBrown orig = new CameraPinholeBrown().fsetK(300, 320, 0, 150, 130, width, height).fsetRadial(0.1, 0.05);
CameraPinhole desired = new CameraPinhole(orig);
Point2Transform2_F64 distToNorm = LensDistortionFactory.narrow(orig).undistort_F64(true, false);
Point2D_F64 norm = new Point2D_F64();
distToNorm.compute(pixelX, pixelY, norm);
CameraPinhole adjusted = new CameraPinhole();
Point2Transform2_F64 distToAdj = LensDistortionOps_F64.transformChangeModel(AdjustmentType.NONE, orig, desired, false, adjusted);
Point2D_F64 adjPixel = new Point2D_F64();
Point2D_F64 normFound = new Point2D_F64();
distToAdj.compute(pixelX, pixelY, adjPixel);
PerspectiveOps.convertPixelToNorm(adjusted, adjPixel, normFound);
// see if the normalized image coordinates are the same
assertEquals(norm.x, normFound.x, 1e-3);
assertEquals(norm.y, normFound.y, 1e-3);
}
use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class VisualizeSquareFiducial method process.
public void process(String nameImage, @Nullable String nameIntrinsic) {
CameraPinholeBrown intrinsic = nameIntrinsic == null ? null : (CameraPinholeBrown) CalibrationIO.load(nameIntrinsic);
GrayF32 input = Objects.requireNonNull(UtilImageIO.loadImage(nameImage, GrayF32.class));
GrayF32 undistorted = new GrayF32(input.width, input.height);
Detector detector = new Detector();
if (intrinsic != null) {
var paramUndist = new CameraPinholeBrown();
ImageDistort<GrayF32, GrayF32> undistorter = LensDistortionOps.changeCameraModel(AdjustmentType.EXPAND, BorderType.EXTENDED, intrinsic, new CameraPinhole(intrinsic), paramUndist, ImageType.single(GrayF32.class));
detector.configure(new LensDistortionBrown(paramUndist), paramUndist.width, paramUndist.height, false);
undistorter.apply(input, undistorted);
} else {
undistorted.setTo(input);
}
detector.process(undistorted);
System.out.println("Total Found: " + detector.squares.size());
DogArray<FoundFiducial> fiducials = detector.getFound();
int N = Math.min(20, detector.squares.size());
ListDisplayPanel squares = new ListDisplayPanel();
for (int i = 0; i < N; i++) {
squares.addImage(ConvertBufferedImage.convertTo(detector.squares.get(i), null), " " + i);
}
BufferedImage output = new BufferedImage(input.width, input.height, BufferedImage.TYPE_INT_RGB);
VisualizeBinaryData.renderBinary(detector.getBinary(), false, output);
Graphics2D g2 = output.createGraphics();
g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
g2.setColor(Color.RED);
g2.setStroke(new BasicStroke(2));
if (intrinsic != null) {
Point2Transform2_F64 add_p_to_p = LensDistortionFactory.narrow(intrinsic).distort_F64(true, true);
for (int i = 0; i < N; i++) {
// add back in lens distortion
Quadrilateral_F64 q = fiducials.get(i).distortedPixels;
apply(add_p_to_p, q.a, q.a);
apply(add_p_to_p, q.b, q.b);
apply(add_p_to_p, q.c, q.c);
apply(add_p_to_p, q.d, q.d);
VisualizeShapes.draw(q, g2);
}
}
BufferedImage outputGray = new BufferedImage(input.width, input.height, BufferedImage.TYPE_INT_RGB);
ConvertBufferedImage.convertTo(undistorted, outputGray);
g2 = outputGray.createGraphics();
g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
for (int i = 0; i < N; i++) {
// add back in lens distortion
Quadrilateral_F64 q = fiducials.get(i).distortedPixels;
// g2.setStroke(new BasicStroke(2));
// VisualizeBinaryData.render(detector.getSquareDetector().getUsedContours(),Color.BLUE,outputGray);
VisualizeShapes.drawArrowSubPixel(q, 3, 1, g2);
}
ShowImages.showWindow(output, "Binary");
ShowImages.showWindow(outputGray, "Gray");
ShowImages.showWindow(squares, "Candidates");
}
use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class ImplRectifyImageOps_F64 method transformPixelToRect.
public static Point2Transform2_F64 transformPixelToRect(CameraPinholeBrown param, DMatrixRMaj rectify) {
Point2Transform2_F64 remove_p_to_p = narrow(param).undistort_F64(true, true);
PointTransformHomography_F64 rectifyDistort = new PointTransformHomography_F64(rectify);
return new SequencePoint2Transform2_F64(remove_p_to_p, rectifyDistort);
}
Aggregations