use of boofcv.alg.distort.brown.LensDistortionBrown in project BoofCV by lessthanoptimal.
the class GenericFiducialTrackerChecks method checkTracking.
/**
* Send it through a sequence of image and see if it blows up.
*
* Could improve the tracking test by resetting the tracker and seeing if the output changes.
*/
@Test
void checkTracking() {
boolean distorted = false;
LensDistortionBrown lensDistorted = new LensDistortionBrown(loadDistortion(distorted));
for (ImageType type : types) {
FiducialDetector detector = createDetector(type);
Se3_F64 pose = new Se3_F64();
for (int timeStep = 0; timeStep < 10; timeStep++) {
Se3_F64 markerToWorld = eulerXyz(-0.05 * timeStep, 0, 1.5, 0.1, Math.PI, 0.15 * timeStep, null);
ImageBase imageD = renderImage(loadDistortion(distorted), markerToWorld, type);
detector.setLensDistortion(lensDistorted, imageD.width, imageD.height);
detector.detect(imageD);
// ShowImages.showBlocking(imageD,"Distorted", 1_000);
assertEquals(1, detector.totalFound());
detector.getFiducialToCamera(0, pose);
pose.T.scale(markerToWorld.T.norm() / pose.T.norm());
Se3_F64 diff = markerToWorld.concat(pose.invert(null), null);
double theta = ConvertRotation3D_F64.matrixToRodrigues(diff.R, null).theta;
assertEquals(0, diff.T.norm(), tolAccuracyT);
assertEquals(Math.PI, theta, tolAccuracyTheta);
}
}
}
use of boofcv.alg.distort.brown.LensDistortionBrown in project BoofCV by lessthanoptimal.
the class SimulatePlanarWorld method setCamera.
public void setCamera(CameraPinholeBrown model) {
LensDistortionNarrowFOV factory = new LensDistortionBrown(model);
setCamera(factory, model.width, model.height);
}
use of boofcv.alg.distort.brown.LensDistortionBrown in project BoofCV by lessthanoptimal.
the class ColorizeMultiViewStereoResults method processScenePoints.
/**
* Looks up the colors for all the points in the scene by reprojecting them back onto their original images.
*
* @param scene (Input) Scene's structure
* @param indexToId (Input) Convert view index to view ID
* @param indexColor (Output) RGB values are passed through to this function.
*/
public void processScenePoints(SceneStructureMetric scene, BoofLambdas.IndexToString indexToId, BoofLambdas.IndexRgbConsumer indexColor) {
// Loading images is expensive so when we get the color of each pixel we want to process all features
// inside the same image at once. Unfortunately there is no fast way to look up all features by image.
// So a lookup table is constructed below
List<DogArray_I32> lookupPointsByView = new ArrayList<>();
for (int i = 0; i < scene.views.size; i++) {
lookupPointsByView.add(new DogArray_I32());
}
// Add the first view each point was seen in to the list
for (int pointIdx = 0; pointIdx < scene.points.size; pointIdx++) {
SceneStructureCommon.Point p = scene.points.get(pointIdx);
lookupPointsByView.get(p.views.get(0)).add(pointIdx);
}
// TODO in the future generalize this for 3D and 4D points
var iterator = new ScenePointsSetIterator<>(new PointIndex4D_F64());
var world_to_view = new Se3_F64();
for (int viewIdx = 0; viewIdx < lookupPointsByView.size(); viewIdx++) {
// Load the image
checkTrue(lookupImages.loadImage(indexToId.process(viewIdx), image), "Failed to load image");
// Set up the iterator for this image
iterator.initialize(scene, lookupPointsByView.get(viewIdx));
// Get the view that is being processed
SceneStructureMetric.View v = scene.views.get(viewIdx);
// Setup the camera projection model using bundle adjustment model directly
BundleAdjustmentOps.convert(scene.getViewCamera(v).model, image.width, image.height, intrinsic);
Point2Transform2_F64 norm_to_pixel = new LensDistortionBrown(intrinsic).distort_F64(false, true);
// Get the transform from world/cloud to this view
scene.getWorldToView(v, world_to_view, tmp);
// Grab the colorized points from this view
colorizer.process4(image, iterator, world_to_view, norm_to_pixel, indexColor);
}
}
use of boofcv.alg.distort.brown.LensDistortionBrown in project BoofCV by lessthanoptimal.
the class MultiViewStereoFromKnownSceneStructure method computeFusedDisparityAddCloud.
/**
* Combing stereo information from all images in this cluster, compute a disparity image and add it to the cloud
*/
boolean computeFusedDisparityAddCloud(SceneStructureMetric scene, ViewInfo center, TIntObjectMap<String> sbaIndexToName, DogArray_I32 pairIndexes) {
if (!computeFused.process(scene, center.relations.indexSba, pairIndexes, sbaIndexToName::get)) {
if (verbose != null)
verbose.println("FAILED: fused disparity. center.index=" + center.index);
return false;
}
// The fused disparity doesn't compute a mask since all invalid pixels are marked as invalid using
// the disparity value
GrayF32 disparity = computeFused.fusedDisparity;
dummyMask.reshape(disparity);
ImageMiscOps.fill(dummyMask, 0);
// Pass along results to the listener
if (listener != null) {
listener.handleFusedDisparity(center.relations.id, disparity, dummyMask, computeFused.fusedParam);
}
// Convert data structures into a format which is understood by disparity to cloud
BundleAdjustmentCamera camera = scene.cameras.get(center.metric.camera).model;
BundleAdjustmentOps.convert(camera, disparity.width, disparity.height, brown);
// The fused disparity is in regular pixels and not rectified
Point2Transform2_F64 norm_to_pixel = new LensDistortionBrown(brown).distort_F64(false, true);
Point2Transform2_F64 pixel_to_norm = new LensDistortionBrown(brown).undistort_F64(true, false);
// world/cloud coordinates into this view
scene.getWorldToView(center.metric, world_to_view1, tmp);
// Use the computed disparity to add to the common point cloud while not adding points already in
// the cloud
disparityCloud.addDisparity(disparity, dummyMask, world_to_view1, computeFused.fusedParam, norm_to_pixel, new PointToPixelTransform_F64(pixel_to_norm));
return true;
}
use of boofcv.alg.distort.brown.LensDistortionBrown in project BoofCV by lessthanoptimal.
the class VisualizeSquareFiducial method process.
public void process(String nameImage, @Nullable String nameIntrinsic) {
CameraPinholeBrown intrinsic = nameIntrinsic == null ? null : (CameraPinholeBrown) CalibrationIO.load(nameIntrinsic);
GrayF32 input = Objects.requireNonNull(UtilImageIO.loadImage(nameImage, GrayF32.class));
GrayF32 undistorted = new GrayF32(input.width, input.height);
Detector detector = new Detector();
if (intrinsic != null) {
var paramUndist = new CameraPinholeBrown();
ImageDistort<GrayF32, GrayF32> undistorter = LensDistortionOps.changeCameraModel(AdjustmentType.EXPAND, BorderType.EXTENDED, intrinsic, new CameraPinhole(intrinsic), paramUndist, ImageType.single(GrayF32.class));
detector.configure(new LensDistortionBrown(paramUndist), paramUndist.width, paramUndist.height, false);
undistorter.apply(input, undistorted);
} else {
undistorted.setTo(input);
}
detector.process(undistorted);
System.out.println("Total Found: " + detector.squares.size());
DogArray<FoundFiducial> fiducials = detector.getFound();
int N = Math.min(20, detector.squares.size());
ListDisplayPanel squares = new ListDisplayPanel();
for (int i = 0; i < N; i++) {
squares.addImage(ConvertBufferedImage.convertTo(detector.squares.get(i), null), " " + i);
}
BufferedImage output = new BufferedImage(input.width, input.height, BufferedImage.TYPE_INT_RGB);
VisualizeBinaryData.renderBinary(detector.getBinary(), false, output);
Graphics2D g2 = output.createGraphics();
g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
g2.setColor(Color.RED);
g2.setStroke(new BasicStroke(2));
if (intrinsic != null) {
Point2Transform2_F64 add_p_to_p = LensDistortionFactory.narrow(intrinsic).distort_F64(true, true);
for (int i = 0; i < N; i++) {
// add back in lens distortion
Quadrilateral_F64 q = fiducials.get(i).distortedPixels;
apply(add_p_to_p, q.a, q.a);
apply(add_p_to_p, q.b, q.b);
apply(add_p_to_p, q.c, q.c);
apply(add_p_to_p, q.d, q.d);
VisualizeShapes.draw(q, g2);
}
}
BufferedImage outputGray = new BufferedImage(input.width, input.height, BufferedImage.TYPE_INT_RGB);
ConvertBufferedImage.convertTo(undistorted, outputGray);
g2 = outputGray.createGraphics();
g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
for (int i = 0; i < N; i++) {
// add back in lens distortion
Quadrilateral_F64 q = fiducials.get(i).distortedPixels;
// g2.setStroke(new BasicStroke(2));
// VisualizeBinaryData.render(detector.getSquareDetector().getUsedContours(),Color.BLUE,outputGray);
VisualizeShapes.drawArrowSubPixel(q, 3, 1, g2);
}
ShowImages.showWindow(output, "Binary");
ShowImages.showWindow(outputGray, "Gray");
ShowImages.showWindow(squares, "Candidates");
}
Aggregations