use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class BatchRemoveLensDistortion method process.
public void process() {
cancel = false;
System.out.println("AdjustmentType = " + adjustmentType);
System.out.println("rename = " + rename);
System.out.println("input pattern = " + inputPattern);
System.out.println("output dir = " + outputPath);
File fileOutputDir = new File(outputPath);
if (!fileOutputDir.exists()) {
if (!fileOutputDir.mkdirs()) {
throw new RuntimeException("Output directory did not exist and failed to create it");
} else {
System.out.println(" created output directory");
}
}
CameraPinholeBrown param = CalibrationIO.load(pathIntrinsic);
CameraPinholeBrown paramAdj = new CameraPinholeBrown();
List<String> paths = UtilIO.listSmartImages(inputPattern, false);
if (paths.isEmpty())
System.out.println("No inputs found. Bad path or pattern? " + inputPattern);
System.out.println("Found a total of " + paths.size() + " matching files");
Planar<GrayF32> distoredImg = new Planar<>(GrayF32.class, param.width, param.height, 3);
Planar<GrayF32> undistoredImg = new Planar<>(GrayF32.class, param.width, param.height, 3);
ImageDistort distort = LensDistortionOps.changeCameraModel(adjustmentType, BorderType.ZERO, param, new CameraPinhole(param), paramAdj, (ImageType) distoredImg.getImageType());
CalibrationIO.save(paramAdj, new File(outputPath, "intrinsicUndistorted.yaml").getAbsolutePath());
BufferedImage out = new BufferedImage(param.width, param.height, BufferedImage.TYPE_INT_RGB);
int numDigits = BoofMiscOps.numDigits(paths.size() - 1);
String format = "%0" + numDigits + "d";
for (int i = 0; i < paths.size(); i++) {
File file = new File(paths.get(i));
System.out.println("processing " + file.getName());
BufferedImage orig = UtilImageIO.loadImage(file.getAbsolutePath());
if (orig == null) {
throw new RuntimeException("Can't load file: " + file.getAbsolutePath());
}
if (orig.getWidth() != param.width || orig.getHeight() != param.height) {
System.err.println("intrinsic parameters and image size do not match!");
System.exit(-1);
}
if (listener != null)
listener.loadedImage(orig, file.getName());
ConvertBufferedImage.convertFromPlanar(orig, distoredImg, true, GrayF32.class);
distort.apply(distoredImg, undistoredImg);
ConvertBufferedImage.convertTo(undistoredImg, out, true);
String nameOut;
if (rename) {
nameOut = String.format("image" + format + ".png", i);
} else {
nameOut = file.getName().split("\\.")[0] + "_undistorted.png";
}
UtilImageIO.saveImage(out, new File(outputPath, nameOut).getAbsolutePath());
if (cancel) {
break;
}
}
if (listener != null)
listener.finishedConverting();
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class VisualizeSquareBinaryFiducial method process.
public void process(String nameImage, @Nullable String nameIntrinsic) {
CameraPinholeBrown intrinsic = nameIntrinsic == null ? null : (CameraPinholeBrown) CalibrationIO.load(nameIntrinsic);
GrayF32 input = Objects.requireNonNull(UtilImageIO.loadImage(nameImage, GrayF32.class));
var undistorted = new GrayF32(input.width, input.height);
InputToBinary<GrayF32> inputToBinary = FactoryThresholdBinary.globalOtsu(0, 255, 1.0, true, GrayF32.class);
Detector detector = new Detector(gridWidth, borderWidth, inputToBinary);
detector.setLengthSide(0.1);
if (intrinsic != null) {
CameraPinholeBrown paramUndist = new CameraPinholeBrown();
ImageDistort<GrayF32, GrayF32> undistorter = LensDistortionOps.changeCameraModel(AdjustmentType.EXPAND, BorderType.EXTENDED, intrinsic, new CameraPinhole(intrinsic), paramUndist, ImageType.single(GrayF32.class));
detector.configure(new LensDistortionBrown(paramUndist), paramUndist.width, paramUndist.height, false);
undistorter.apply(input, undistorted);
detector.process(undistorted);
} else {
detector.process(input);
}
System.out.println("Total Found: " + detector.squares.size());
DogArray<FoundFiducial> fiducials = detector.getFound();
int N = Math.min(20, detector.squares.size());
ListDisplayPanel squares = new ListDisplayPanel();
for (int i = 0; i < N; i++) {
squares.addImage(VisualizeBinaryData.renderBinary(detector.squares.get(i), false, null), " " + i);
squares.addImage(ConvertBufferedImage.convertTo(detector.squaresGray.get(i), null), " " + i);
}
BufferedImage output = new BufferedImage(input.width, input.height, BufferedImage.TYPE_INT_RGB);
ConvertBufferedImage.convertTo(input, output);
Graphics2D g2 = output.createGraphics();
g2.setColor(Color.RED);
g2.setStroke(new BasicStroke(2));
for (int i = 0; i < N; i++) {
VisualizeShapes.drawArrowSubPixel(fiducials.get(i).distortedPixels, 3, 1, g2);
}
ShowImages.showWindow(output, "Binary", true);
ShowImages.showWindow(squares, "Candidates", true);
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class ExampleStereoTwoViewsOneCamera method showPointCloud.
/**
* Show results as a point cloud
*/
public static void showPointCloud(ImageGray disparity, BufferedImage left, Se3_F64 motion, DMatrixRMaj rectifiedK, DMatrixRMaj rectifiedR, int disparityMin, int disparityRange) {
DisparityToColorPointCloud d2c = new DisparityToColorPointCloud();
PointCloudWriter.CloudArraysF32 cloud = new PointCloudWriter.CloudArraysF32();
double baseline = motion.getT().norm();
d2c.configure(baseline, rectifiedK, rectifiedR, new DoNothing2Transform2_F64(), disparityMin, disparityRange);
d2c.process(disparity, UtilDisparitySwing.wrap(left), cloud);
CameraPinhole rectifiedPinhole = PerspectiveOps.matrixToPinhole(rectifiedK, disparity.width, disparity.height, null);
// skew the view to make the structure easier to see
Se3_F64 cameraToWorld = SpecialEuclideanOps_F64.eulerXyz(-baseline * 5, 0, 0, 0, 0.2, 0, null);
PointCloudViewer pcv = VisualizeData.createPointCloudViewer();
pcv.setCameraHFov(PerspectiveOps.computeHFov(rectifiedPinhole));
pcv.setCameraToWorld(cameraToWorld);
pcv.setTranslationStep(baseline / 3);
pcv.addCloud(cloud.cloudXyz, cloud.cloudRgb);
pcv.setDotSize(1);
pcv.setTranslationStep(baseline / 10);
pcv.getComponent().setPreferredSize(new Dimension(left.getWidth(), left.getHeight()));
ShowImages.showWindow(pcv.getComponent(), "Point Cloud", true);
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class ExampleTrifocalStereoUncalibrated method main.
public static void main(String[] args) {
String name = "rock_leaves_";
// String name = "mono_wall_";
// String name = "minecraft_cave1_";
// String name = "minecraft_distant_";
// String name = "bobcats_";
// String name = "chicken_";
// String name = "turkey_";
// String name = "rockview_";
// String name = "pebbles_";
// String name = "books_";
// String name = "skull_";
// String name = "triflowers_";
BufferedImage buff01 = UtilImageIO.loadImageNotNull(UtilIO.pathExample("triple/" + name + "01.jpg"));
BufferedImage buff02 = UtilImageIO.loadImageNotNull(UtilIO.pathExample("triple/" + name + "02.jpg"));
BufferedImage buff03 = UtilImageIO.loadImageNotNull(UtilIO.pathExample("triple/" + name + "03.jpg"));
Planar<GrayU8> color01 = ConvertBufferedImage.convertFrom(buff01, true, ImageType.pl(3, GrayU8.class));
Planar<GrayU8> color02 = ConvertBufferedImage.convertFrom(buff02, true, ImageType.pl(3, GrayU8.class));
Planar<GrayU8> color03 = ConvertBufferedImage.convertFrom(buff03, true, ImageType.pl(3, GrayU8.class));
GrayU8 image01 = ConvertImage.average(color01, null);
GrayU8 image02 = ConvertImage.average(color02, null);
GrayU8 image03 = ConvertImage.average(color03, null);
// using SURF features. Robust and fairly fast to compute
DetectDescribePoint<GrayU8, TupleDesc_F64> detDesc = FactoryDetectDescribe.surfStable(new ConfigFastHessian(0, 4, 1000, 1, 9, 4, 2), null, null, GrayU8.class);
// Associate features across all three views using previous example code
var associateThree = new ExampleAssociateThreeView();
associateThree.initialize(detDesc);
associateThree.detectFeatures(image01, 0);
associateThree.detectFeatures(image02, 1);
associateThree.detectFeatures(image03, 2);
System.out.println("features01.size = " + associateThree.features01.size);
System.out.println("features02.size = " + associateThree.features02.size);
System.out.println("features03.size = " + associateThree.features03.size);
int width = image01.width, height = image01.height;
System.out.println("Image Shape " + width + " x " + height);
double cx = width / 2;
double cy = height / 2;
// The self calibration step requires that the image coordinate system be in the image center
associateThree.locations01.forEach(p -> p.setTo(p.x - cx, p.y - cy));
associateThree.locations02.forEach(p -> p.setTo(p.x - cx, p.y - cy));
associateThree.locations03.forEach(p -> p.setTo(p.x - cx, p.y - cy));
// Converting data formats for the found features into what can be processed by SFM algorithms
// Notice how the image center is subtracted from the coordinates? In many cases a principle point
// of zero is assumed. This is a reasonable assumption in almost all modern cameras. Errors in
// the principle point tend to materialize as translations and are non fatal.
// Associate features in the three views using image information alone
DogArray<AssociatedTripleIndex> associatedIdx = associateThree.threeViewPairwiseAssociate();
// Convert the matched indexes into AssociatedTriple which contain the actual pixel coordinates
var associated = new DogArray<>(AssociatedTriple::new);
associatedIdx.forEach(p -> associated.grow().setTo(associateThree.locations01.get(p.a), associateThree.locations02.get(p.b), associateThree.locations03.get(p.c)));
System.out.println("Total Matched Triples = " + associated.size);
var model = new TrifocalTensor();
List<AssociatedTriple> inliers = ExampleComputeTrifocalTensor.computeTrifocal(associated, model);
System.out.println("Remaining after RANSAC " + inliers.size());
// Show remaining associations from RANSAC
var triplePanel = new AssociatedTriplePanel();
triplePanel.setPixelOffset(cx, cy);
triplePanel.setImages(buff01, buff02, buff03);
triplePanel.setAssociation(inliers);
ShowImages.showWindow(triplePanel, "Associations", true);
// estimate using all the inliers
// No need to re-scale the input because the estimator automatically adjusts the input on its own
var configTri = new ConfigTrifocal();
configTri.which = EnumTrifocal.ALGEBRAIC_7;
configTri.converge.maxIterations = 100;
Estimate1ofTrifocalTensor trifocalEstimator = FactoryMultiView.trifocal_1(configTri);
if (!trifocalEstimator.process(inliers, model))
throw new RuntimeException("Estimator failed");
model.print();
DMatrixRMaj P1 = CommonOps_DDRM.identity(3, 4);
DMatrixRMaj P2 = new DMatrixRMaj(3, 4);
DMatrixRMaj P3 = new DMatrixRMaj(3, 4);
MultiViewOps.trifocalToCameraMatrices(model, P2, P3);
// Most of the time this refinement step makes little difference, but in some edges cases it appears
// to help convergence
System.out.println("Refining projective camera matrices");
RefineThreeViewProjective refineP23 = FactoryMultiView.threeViewRefine(null);
if (!refineP23.process(inliers, P2, P3, P2, P3))
throw new RuntimeException("Can't refine P2 and P3!");
var selfcalib = new SelfCalibrationLinearDualQuadratic(1.0);
selfcalib.addCameraMatrix(P1);
selfcalib.addCameraMatrix(P2);
selfcalib.addCameraMatrix(P3);
var listPinhole = new ArrayList<CameraPinhole>();
GeometricResult result = selfcalib.solve();
if (GeometricResult.SOLVE_FAILED != result) {
for (int i = 0; i < 3; i++) {
Intrinsic c = selfcalib.getIntrinsics().get(i);
CameraPinhole p = new CameraPinhole(c.fx, c.fy, 0, 0, 0, width, height);
listPinhole.add(p);
}
} else {
System.out.println("Self calibration failed!");
for (int i = 0; i < 3; i++) {
CameraPinhole p = new CameraPinhole(width / 2, width / 2, 0, 0, 0, width, height);
listPinhole.add(p);
}
}
// parameter
for (int i = 0; i < 3; i++) {
CameraPinhole r = listPinhole.get(i);
System.out.println("fx=" + r.fx + " fy=" + r.fy + " skew=" + r.skew);
}
System.out.println("Projective to metric");
// convert camera matrix from projective to metric
// storage for rectifying homography
var H = new DMatrixRMaj(4, 4);
if (!MultiViewOps.absoluteQuadraticToH(selfcalib.getQ(), H))
throw new RuntimeException("Projective to metric failed");
var K = new DMatrixRMaj(3, 3);
var worldToView = new ArrayList<Se3_F64>();
for (int i = 0; i < 3; i++) {
worldToView.add(new Se3_F64());
}
// ignore K since we already have that
MultiViewOps.projectiveToMetric(P1, H, worldToView.get(0), K);
MultiViewOps.projectiveToMetric(P2, H, worldToView.get(1), K);
MultiViewOps.projectiveToMetric(P3, H, worldToView.get(2), K);
// scale is arbitrary. Set max translation to 1
adjustTranslationScale(worldToView);
// Construct bundle adjustment data structure
var structure = new SceneStructureMetric(false);
structure.initialize(3, 3, inliers.size());
var observations = new SceneObservations();
observations.initialize(3);
for (int i = 0; i < listPinhole.size(); i++) {
BundlePinholeSimplified bp = new BundlePinholeSimplified();
bp.f = listPinhole.get(i).fx;
structure.setCamera(i, false, bp);
structure.setView(i, i, i == 0, worldToView.get(i));
}
for (int i = 0; i < inliers.size(); i++) {
AssociatedTriple t = inliers.get(i);
observations.getView(0).add(i, (float) t.p1.x, (float) t.p1.y);
observations.getView(1).add(i, (float) t.p2.x, (float) t.p2.y);
observations.getView(2).add(i, (float) t.p3.x, (float) t.p3.y);
structure.connectPointToView(i, 0);
structure.connectPointToView(i, 1);
structure.connectPointToView(i, 2);
}
// Initial estimate for point 3D locations
triangulatePoints(structure, observations);
ConfigLevenbergMarquardt configLM = new ConfigLevenbergMarquardt();
configLM.dampeningInitial = 1e-3;
configLM.hessianScaling = false;
ConfigBundleAdjustment configSBA = new ConfigBundleAdjustment();
configSBA.configOptimizer = configLM;
// Create and configure the bundle adjustment solver
BundleAdjustment<SceneStructureMetric> bundleAdjustment = FactoryMultiView.bundleSparseMetric(configSBA);
// prints out useful debugging information that lets you know how well it's converging
// bundleAdjustment.setVerbose(System.out,0);
// convergence criteria
bundleAdjustment.configure(1e-6, 1e-6, 100);
bundleAdjustment.setParameters(structure, observations);
bundleAdjustment.optimize(structure);
// See if the solution is physically possible. If not fix and run bundle adjustment again
checkBehindCamera(structure, observations, bundleAdjustment);
// It's very difficult to find the best solution due to the number of local minimum. In the three view
// case it's often the problem that a small translation is virtually identical to a small rotation.
// Convergence can be improved by considering that possibility
// Now that we have a decent solution, prune the worst outliers to improve the fit quality even more
var pruner = new PruneStructureFromSceneMetric(structure, observations);
pruner.pruneObservationsByErrorRank(0.7);
pruner.pruneViews(10);
pruner.pruneUnusedMotions();
pruner.prunePoints(1);
bundleAdjustment.setParameters(structure, observations);
bundleAdjustment.optimize(structure);
System.out.println("Final Views");
for (int i = 0; i < 3; i++) {
BundlePinholeSimplified cp = structure.getCameras().get(i).getModel();
Vector3D_F64 T = structure.getParentToView(i).T;
System.out.printf("[ %d ] f = %5.1f T=%s\n", i, cp.f, T.toString());
}
System.out.println("\n\nComputing Stereo Disparity");
BundlePinholeSimplified cp = structure.getCameras().get(0).getModel();
var intrinsic01 = new CameraPinholeBrown();
intrinsic01.fsetK(cp.f, cp.f, 0, cx, cy, width, height);
intrinsic01.fsetRadial(cp.k1, cp.k2);
cp = structure.getCameras().get(1).getModel();
var intrinsic02 = new CameraPinholeBrown();
intrinsic02.fsetK(cp.f, cp.f, 0, cx, cy, width, height);
intrinsic02.fsetRadial(cp.k1, cp.k2);
Se3_F64 leftToRight = structure.getParentToView(1);
// TODO dynamic max disparity
computeStereoCloud(image01, image02, color01, color02, intrinsic01, intrinsic02, leftToRight, 0, 250);
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class ExampleFisheyeToPinhole method main.
public static void main(String[] args) {
// Path to image data and calibration data
String fisheyePath = UtilIO.pathExample("fisheye/theta/");
// load the fisheye camera parameters
CameraUniversalOmni fisheyeModel = CalibrationIO.load(new File(fisheyePath, "front.yaml"));
// Specify what the pinhole camera should look like
CameraPinhole pinholeModel = new CameraPinhole(400, 400, 0, 300, 300, 600, 600);
// Create the transform from pinhole to fisheye views
LensDistortionNarrowFOV pinholeDistort = new LensDistortionPinhole(pinholeModel);
LensDistortionWideFOV fisheyeDistort = new LensDistortionUniversalOmni(fisheyeModel);
NarrowToWidePtoP_F32 transform = new NarrowToWidePtoP_F32(pinholeDistort, fisheyeDistort);
// Load fisheye RGB image
BufferedImage bufferedFisheye = UtilImageIO.loadImage(fisheyePath, "front_table.jpg");
Planar<GrayU8> fisheyeImage = ConvertBufferedImage.convertFrom(bufferedFisheye, true, ImageType.pl(3, GrayU8.class));
// Create the image distorter which will render the image
InterpolatePixel<Planar<GrayU8>> interp = FactoryInterpolation.createPixel(0, 255, InterpolationType.BILINEAR, BorderType.ZERO, fisheyeImage.getImageType());
ImageDistort<Planar<GrayU8>, Planar<GrayU8>> distorter = FactoryDistort.distort(false, interp, fisheyeImage.getImageType());
// Pass in the transform created above
distorter.setModel(new PointToPixelTransform_F32(transform));
// Render the image. The camera will have a rotation of 0 and will thus be looking straight forward
Planar<GrayU8> pinholeImage = fisheyeImage.createNew(pinholeModel.width, pinholeModel.height);
distorter.apply(fisheyeImage, pinholeImage);
BufferedImage bufferedPinhole0 = ConvertBufferedImage.convertTo(pinholeImage, null, true);
// rotate the virtual pinhole camera to the right
transform.setRotationWideToNarrow(ConvertRotation3D_F32.eulerToMatrix(EulerType.YXZ, 0.8f, 0, 0, null));
distorter.apply(fisheyeImage, pinholeImage);
BufferedImage bufferedPinhole1 = ConvertBufferedImage.convertTo(pinholeImage, null, true);
// Display the results
ListDisplayPanel panel = new ListDisplayPanel();
panel.addImage(bufferedPinhole0, "Pinehole Forward");
panel.addImage(bufferedPinhole1, "Pinehole Right");
panel.addImage(bufferedFisheye, "Fisheye");
panel.setPreferredSize(new Dimension(600, 450));
ShowImages.showWindow(panel, "Fisheye to Pinhole", true);
}
Aggregations