use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class TestSceneStructureMetric method projectToPixel_3D.
@Test
void projectToPixel_3D() {
var scene = new SceneStructureMetric(false);
var intrinsic = new CameraPinhole(100, 100, 0, 0, 0, 300, 300);
var world_to_view = SpecialEuclideanOps_F64.eulerXyz(1, 0, 0, 0, 0.01, -0.04, null);
scene.initialize(1, 1, 1);
scene.setView(0, 0, true, world_to_view);
scene.setCamera(0, true, intrinsic);
scene.setPoint(0, 0.1, -0.5, 1.1);
// Point in front of camera
var pixel = new Point2D_F64();
assertTrue(scene.projectToPixel(0, 0, new Se3_F64(), new Se3_F64(), new Point3D_F64(), pixel));
var expected = new Point2D_F64();
PerspectiveOps.renderPixel(world_to_view, intrinsic, new Point3D_F64(0.1, -0.5, 1.1), expected);
assertEquals(expected.x, pixel.x, UtilEjml.TEST_F64);
assertEquals(expected.y, pixel.y, UtilEjml.TEST_F64);
// sanity check for behind
scene.setPoint(0, 0.1, -0.5, -1.1);
assertFalse(scene.projectToPixel(0, 0, new Se3_F64(), new Se3_F64(), new Point3D_F64(), pixel));
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class ExampleRemoveLensDistortion method main.
public static void main(String[] args) {
String calibDir = UtilIO.pathExample("calibration/mono/Sony_DSC-HX5V_Chess/");
String imageDir = UtilIO.pathExample("structure/");
// load calibration parameters from the previously calibrated camera
CameraPinholeBrown param = CalibrationIO.load(new File(calibDir, "intrinsic.yaml"));
// Specify a transform that has no lens distortion that you wish to re-render the image as having
CameraPinhole desired = new CameraPinhole(param);
// load images and convert the image into a color BoofCV format
BufferedImage orig = UtilImageIO.loadImage(imageDir, "dist_cyto_01.jpg");
Planar<GrayF32> distortedImg = ConvertBufferedImage.convertFromPlanar(orig, null, true, GrayF32.class);
int numBands = distortedImg.getNumBands();
// create new transforms which optimize view area in different ways.
// EXPAND makes sure there are no black outside of image pixels inside the image
// FULL_VIEW will include the entire original image
// The border is VALUE, which defaults to black, just so you can see it
ImageDistort allInside = LensDistortionOps.changeCameraModel(AdjustmentType.EXPAND, BorderType.ZERO, param, desired, null, ImageType.pl(numBands, GrayF32.class));
ImageDistort fullView = LensDistortionOps.changeCameraModel(AdjustmentType.FULL_VIEW, BorderType.ZERO, param, desired, null, ImageType.pl(numBands, GrayF32.class));
// NOTE: After lens distortion has been removed the intrinsic parameters is changed. If you pass
// in a set of IntrinsicParameters to the 4th variable it will save it there.
// NOTE: Type information was stripped from ImageDistort simply because it becomes too verbose with it here.
// Would be nice if this verbosity issue was addressed by the Java language.
// render and display the different types of views in a window
displayResults(orig, distortedImg, allInside, fullView);
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class ExampleEquirectangularToPinhole method main.
public static void main(String[] args) {
// Specify what the pinhole camera should look like
CameraPinhole pinholeModel = new CameraPinhole(200, 200, 0, 250, 250, 500, 500);
// Load equirectangular RGB image
BufferedImage bufferedEqui = UtilImageIO.loadImageNotNull(UtilIO.pathExample("spherical/equirectangular_half_dome_01.jpg"));
Planar<GrayU8> equiImage = ConvertBufferedImage.convertFrom(bufferedEqui, true, ImageType.pl(3, GrayU8.class));
// Declare storage for pinhole camera image
Planar<GrayU8> pinholeImage = equiImage.createNew(pinholeModel.width, pinholeModel.height);
// Create the image distorter which will render the image
InterpolatePixel<Planar<GrayU8>> interp = FactoryInterpolation.createPixel(0, 255, InterpolationType.BILINEAR, BorderType.EXTENDED, equiImage.getImageType());
ImageDistort<Planar<GrayU8>, Planar<GrayU8>> distorter = FactoryDistort.distort(false, interp, equiImage.getImageType());
// This is where the magic is done. It defines the transform rfom equirectangular to pinhole
CameraToEquirectangular_F32 pinholeToEqui = new CameraToEquirectangular_F32();
pinholeToEqui.setEquirectangularShape(equiImage.width, equiImage.height);
pinholeToEqui.setCameraModel(pinholeModel);
// Pass in the transform to the image distorter
distorter.setModel(pinholeToEqui);
// change the orientation of the camera to make the view better
ConvertRotation3D_F32.eulerToMatrix(EulerType.YXZ, 0, 1.45f, 2.2f, pinholeToEqui.getRotation());
// Render the image
distorter.apply(equiImage, pinholeImage);
BufferedImage bufferedPinhole0 = ConvertBufferedImage.convertTo(pinholeImage, null, true);
// Let's look at another view
ConvertRotation3D_F32.eulerToMatrix(EulerType.YXZ, 0, 1.25f, -1.25f, pinholeToEqui.getRotation());
distorter.apply(equiImage, pinholeImage);
BufferedImage bufferedPinhole1 = ConvertBufferedImage.convertTo(pinholeImage, null, true);
// Display the results
ListDisplayPanel panel = new ListDisplayPanel();
panel.addImage(bufferedPinhole0, "Pinehole View 0");
panel.addImage(bufferedPinhole1, "Pinehole View 1");
panel.addImage(bufferedEqui, "Equirectangular");
panel.setPreferredSize(new Dimension(equiImage.width, equiImage.height));
ShowImages.showWindow(panel, "Equirectangular to Pinhole", true);
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class CommonAutoCalibrationChecks method renderStationary.
public void renderStationary(CameraPinhole camera) {
List<CameraPinhole> cameras = new ArrayList<>();
for (int i = 0; i < 11; i++) {
Se3_F64 cameraToWorld = new Se3_F64();
listCameraToWorld.add(cameraToWorld);
cameras.add(camera);
}
render(cameras);
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class CommonAutoCalibrationChecks method renderRotationOnly.
public void renderRotationOnly(CameraPinhole camera) {
List<CameraPinhole> cameras = new ArrayList<>();
for (int i = 0; i < 11; i++) {
double yaw = Math.PI * i / 9.0;
double pitch = Math.PI * i / 20.0;
double roll = rand.nextGaussian() * 0.1;
Se3_F64 cameraToWorld = new Se3_F64();
ConvertRotation3D_F64.eulerToMatrix(EulerType.YZX, yaw, roll, pitch, cameraToWorld.R);
listCameraToWorld.add(cameraToWorld);
cameras.add(camera);
}
render(cameras);
}
Aggregations