use of boofcv.struct.calib.CameraPinholeBrown in project BoofCV by lessthanoptimal.
the class TestLensDistortionOps_F64 method transformChangeModel_NONE_modified.
@Test
void transformChangeModel_NONE_modified() {
// distorted pixel in original image
double pixelX = 12.5, pixelY = height - 3;
CameraPinholeBrown orig = new CameraPinholeBrown().fsetK(300, 320, 0, 150, 130, width, height).fsetRadial(0.1, 0.05);
CameraPinhole desired = new CameraPinhole(orig);
Point2Transform2_F64 distToNorm = LensDistortionFactory.narrow(orig).undistort_F64(true, false);
Point2D_F64 norm = new Point2D_F64();
distToNorm.compute(pixelX, pixelY, norm);
CameraPinhole adjusted = new CameraPinhole();
Point2Transform2_F64 distToAdj = LensDistortionOps_F64.transformChangeModel(AdjustmentType.NONE, orig, desired, false, adjusted);
Point2D_F64 adjPixel = new Point2D_F64();
Point2D_F64 normFound = new Point2D_F64();
distToAdj.compute(pixelX, pixelY, adjPixel);
PerspectiveOps.convertPixelToNorm(adjusted, adjPixel, normFound);
// see if the normalized image coordinates are the same
assertEquals(norm.x, normFound.x, 1e-3);
assertEquals(norm.y, normFound.y, 1e-3);
}
use of boofcv.struct.calib.CameraPinholeBrown in project BoofCV by lessthanoptimal.
the class TestCalibrateMonoPlanar method fullBasic.
/**
* Give it a fake feature detector and a fairly benign scenario and see if it can correctly
* estimate the camera parameters.
*/
@Test
void fullBasic() {
CalibrateMonoPlanar alg = new CalibrateMonoPlanar(layout);
// alg.setVerbose(System.out,0);
alg.configurePinhole(true, 2, true);
for (int i = 0; i < targetToCamera.size(); i++) {
alg.addImage(createFakeObservations(i));
}
CameraPinholeBrown found = alg.process();
// NOTE: When optimization switched from using a dense method + SVD to sparse using cholesky
// it's ability to handle the test scenario got worse. I've noticed no change in real world data
assertEquals(intrinsic.fx, found.fx, intrinsic.width * 1e-3);
assertEquals(intrinsic.fy, found.fy, intrinsic.width * 1e-3);
assertEquals(intrinsic.cx, found.cx, intrinsic.width * 1e-3);
assertEquals(intrinsic.cy, found.cy, intrinsic.width * 1e-3);
assertEquals(intrinsic.skew, found.skew, intrinsic.width * 1e-3);
assertEquals(intrinsic.radial[0], found.radial[0], 1e-5);
assertEquals(intrinsic.radial[1], found.radial[1], 1e-5);
assertEquals(intrinsic.t1, found.t1, 1e-5);
assertEquals(intrinsic.t2, found.t2, 1e-5);
assertEquals(intrinsic.width, found.width, 1e-3);
assertEquals(intrinsic.height, found.height, 1e-3);
}
use of boofcv.struct.calib.CameraPinholeBrown in project BoofCV by lessthanoptimal.
the class TestMultiBaselineStereoIndependent method simulate_constant_disparity.
/**
* The plane being viewed and the camera's image plane are parallel causing the disparity to have a constant value
* making it easy check for correctness.
*
* @param tolFilled What fraction of fused image should be filled
* @param tolCorrect Out of the filled pixels what fraction need to have the correct disparity
*/
void simulate_constant_disparity(Se3_F64 world_to_view1, Se3_F64 world_to_view2, double tolFilled, double tolCorrect) {
// Each camera is different.
List<CameraPinholeBrown> listIntrinsic = new ArrayList<>();
listIntrinsic.add(new CameraPinholeBrown().fsetK(150, 140, 0, 105, 100, 210, 200).fsetRadial(0.02, -0.003));
listIntrinsic.add(new CameraPinholeBrown().fsetK(151, 142, 0, 107.5, 102.5, 215, 205).fsetRadial(0.03, -0.001));
listIntrinsic.add(new CameraPinholeBrown().fsetK(149, 141, 0, 102.5, 107.5, 205, 215).fsetRadial(0.001, 0.003));
// Create the scene. This will be used as input into MultiViewToFusedDisparity and in the simulator
var scene = new SceneStructureMetric(true);
scene.initialize(3, 3, 0);
scene.setCamera(0, true, listIntrinsic.get(0));
scene.setCamera(1, true, listIntrinsic.get(1));
scene.setCamera(2, true, listIntrinsic.get(2));
// All views are looking head on at the target. The 2nd and third view have been offset to ensure full coverage and that
// it's incorporating all the views, otherwise there would be large gaps
scene.setView(0, 0, true, eulerXyz(0, 0, 0, 0.0, 0, 0, null));
scene.setView(1, 1, true, world_to_view1);
scene.setView(2, 2, true, world_to_view2);
var lookup = new MockLookUp();
var alg = new MultiBaselineStereoIndependent<>(lookup, ImageType.SB_F32);
// Not mocking disparity because of how complex that would be to pull off. This makes it a bit of an inexact
// science to ensure fill in
var configDisp = new ConfigDisparityBMBest5();
configDisp.errorType = DisparityError.SAD;
configDisp.texture = 0.05;
configDisp.disparityMin = 20;
configDisp.disparityRange = 80;
alg.stereoDisparity = FactoryStereoDisparity.blockMatchBest5(configDisp, GrayF32.class, GrayF32.class);
// Textured target that stereo will work well on
var texture = new GrayF32(100, 100);
ImageMiscOps.fillUniform(texture, rand, 50, 255);
SimulatePlanarWorld sim = new SimulatePlanarWorld();
sim.addSurface(eulerXyz(0, 0, 2, 0, Math.PI, 0, null), 3, texture);
List<GrayF32> images = new ArrayList<>();
TIntObjectMap<String> sbaIndexToViewID = new TIntObjectHashMap<>();
for (int i = 0; i < listIntrinsic.size(); i++) {
sbaIndexToViewID.put(i, i + "");
sim.setCamera(listIntrinsic.get(i));
sim.setWorldToCamera(scene.motions.get(i).motion);
images.add(sim.render().clone());
if (visualize)
ShowImages.showWindow(images.get(images.size() - 1), "Frame " + i);
}
lookup.images = images;
assertTrue(alg.process(scene, 0, DogArray_I32.array(1, 2), sbaIndexToViewID::get));
GrayF32 found = alg.getFusedDisparity();
assertEquals(listIntrinsic.get(0).width, found.width);
assertEquals(listIntrinsic.get(0).height, found.height);
if (visualize) {
ShowImages.showWindow(VisualizeImageData.disparity(found, null, 100, 0x00FF00), "Disparity");
BoofMiscOps.sleep(60_000);
}
DisparityParameters param = alg.getFusedParam();
// Check the results. Since the target fills the view and is a known constant Z away we can that here.
// however since a real disparity algorithm is being used its inputs will not be perfect
int totalFilled = 0;
int totalCorrect = 0;
for (int y = 0; y < found.height; y++) {
for (int x = 0; x < found.width; x++) {
float d = found.get(x, y);
assertTrue(d >= 0);
if (d >= param.disparityRange)
continue;
double Z = param.baseline * param.pinhole.fx / (d + param.disparityMin);
if (Math.abs(Z - 2.0) <= 0.1)
totalCorrect++;
totalFilled++;
}
}
int N = found.width * found.height;
assertTrue(N * tolFilled <= totalFilled);
assertTrue(totalFilled * tolCorrect <= totalCorrect);
}
use of boofcv.struct.calib.CameraPinholeBrown in project BoofCV by lessthanoptimal.
the class TestMultiBaselineStereoIndependent method handleOneCameraManyViews.
/**
* In this scene there is only one camera for several views
*/
@Test
void handleOneCameraManyViews() {
var scene = new SceneStructureMetric(true);
scene.initialize(1, 3, 0);
scene.setCamera(0, true, new CameraPinholeBrown().fsetK(30, 30, 0, 25, 25, 50, 50));
for (int i = 0; i < 3; i++) {
scene.setView(i, 0, true, eulerXyz(i, 0, 0, 0, 0, 0, null));
}
var alg = new MultiBaselineStereoIndependent<>(ImageType.SB_F32);
var configDisp = new ConfigDisparityBMBest5();
configDisp.errorType = DisparityError.SAD;
configDisp.disparityRange = 5;
alg.stereoDisparity = FactoryStereoDisparity.blockMatchBest5(configDisp, GrayF32.class, GrayF32.class);
List<GrayF32> images = new ArrayList<>();
TIntObjectMap<String> sbaIndexToViewID = new TIntObjectHashMap<>();
for (int i = 0; i < 3; i++) {
images.add(new GrayF32(50, 50));
sbaIndexToViewID.put(i, i + "");
}
alg.lookUpImages = new MockLookUp(images);
// Override so that it will always be happy
alg.performFusion = new MultiBaselineDisparityMedian() {
@Override
public boolean process(GrayF32 disparity) {
return true;
}
};
// just see if it blows up
assertTrue(alg.process(scene, 0, DogArray_I32.array(1, 2), sbaIndexToViewID::get));
}
use of boofcv.struct.calib.CameraPinholeBrown in project BoofCV by lessthanoptimal.
the class VisualizeSquareFiducial method process.
public void process(String nameImage, @Nullable String nameIntrinsic) {
CameraPinholeBrown intrinsic = nameIntrinsic == null ? null : (CameraPinholeBrown) CalibrationIO.load(nameIntrinsic);
GrayF32 input = Objects.requireNonNull(UtilImageIO.loadImage(nameImage, GrayF32.class));
GrayF32 undistorted = new GrayF32(input.width, input.height);
Detector detector = new Detector();
if (intrinsic != null) {
var paramUndist = new CameraPinholeBrown();
ImageDistort<GrayF32, GrayF32> undistorter = LensDistortionOps.changeCameraModel(AdjustmentType.EXPAND, BorderType.EXTENDED, intrinsic, new CameraPinhole(intrinsic), paramUndist, ImageType.single(GrayF32.class));
detector.configure(new LensDistortionBrown(paramUndist), paramUndist.width, paramUndist.height, false);
undistorter.apply(input, undistorted);
} else {
undistorted.setTo(input);
}
detector.process(undistorted);
System.out.println("Total Found: " + detector.squares.size());
DogArray<FoundFiducial> fiducials = detector.getFound();
int N = Math.min(20, detector.squares.size());
ListDisplayPanel squares = new ListDisplayPanel();
for (int i = 0; i < N; i++) {
squares.addImage(ConvertBufferedImage.convertTo(detector.squares.get(i), null), " " + i);
}
BufferedImage output = new BufferedImage(input.width, input.height, BufferedImage.TYPE_INT_RGB);
VisualizeBinaryData.renderBinary(detector.getBinary(), false, output);
Graphics2D g2 = output.createGraphics();
g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
g2.setColor(Color.RED);
g2.setStroke(new BasicStroke(2));
if (intrinsic != null) {
Point2Transform2_F64 add_p_to_p = LensDistortionFactory.narrow(intrinsic).distort_F64(true, true);
for (int i = 0; i < N; i++) {
// add back in lens distortion
Quadrilateral_F64 q = fiducials.get(i).distortedPixels;
apply(add_p_to_p, q.a, q.a);
apply(add_p_to_p, q.b, q.b);
apply(add_p_to_p, q.c, q.c);
apply(add_p_to_p, q.d, q.d);
VisualizeShapes.draw(q, g2);
}
}
BufferedImage outputGray = new BufferedImage(input.width, input.height, BufferedImage.TYPE_INT_RGB);
ConvertBufferedImage.convertTo(undistorted, outputGray);
g2 = outputGray.createGraphics();
g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
for (int i = 0; i < N; i++) {
// add back in lens distortion
Quadrilateral_F64 q = fiducials.get(i).distortedPixels;
// g2.setStroke(new BasicStroke(2));
// VisualizeBinaryData.render(detector.getSquareDetector().getUsedContours(),Color.BLUE,outputGray);
VisualizeShapes.drawArrowSubPixel(q, 3, 1, g2);
}
ShowImages.showWindow(output, "Binary");
ShowImages.showWindow(outputGray, "Gray");
ShowImages.showWindow(squares, "Candidates");
}
Aggregations