use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class VideoSequenceSimulator method createSquares.
protected void createSquares(int total, double minZ, double maxZ) {
squares.clear();
double t = 0.1;
Point2D_F64 n = new Point2D_F64();
Point2Transform2_F64 tranNorm = LensDistortionFactory.narrow(intrinsic).undistort_F64(true, false);
for (int i = 0; i < total; i++) {
// generate the squares uniformally inside the FOV
tranNorm.compute(rand.nextDouble() * (intrinsic.width - 1), rand.nextDouble() * (intrinsic.height - 1), n);
double z = rand.nextDouble() * (maxZ - minZ) + minZ;
double x = n.x * z;
double y = n.y * z;
Square s = new Square();
s.a.setTo(x, y, z);
s.b.setTo(x + t, y, z);
s.c.setTo(x + t, y + t, z);
s.d.setTo(x, y + t, z);
s.gray = rand.nextInt(200) + 55;
squares.add(s);
}
// sort by depth so that objects farther way are rendered first and obstructed by objects closer in view
Collections.sort(squares, new Comparator<Square>() {
@Override
public int compare(Square o1, Square o2) {
if (o1.a.z < o2.a.z)
return -1;
if (o1.a.z > o2.a.z)
return 1;
else
return 0;
}
});
}
use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class TestVisOdomBundleAdjustment method createPerfectScene.
private void createPerfectScene(VisOdomBundleAdjustment<BTrack> vsba) {
vsba.reset();
List<Point3D_F64> cloud = UtilPoint3D_F64.random(new Point3D_F64(0, 0, 1.5), -1.5, 1.5, -0.5, 0.5, -0.2, 0.2, 500, rand);
LensDistortionPinhole distortion = new LensDistortionPinhole(pinhole);
Point2Transform2_F64 n2n = distortion.distort_F64(false, false);
vsba.addCamera(pinhole);
// 3D point in view reference frame
Point3D_F64 Xv = new Point3D_F64();
// normalized image coordinate
Point2D_F64 n = new Point2D_F64();
// pixel coordinate
Point2D_F64 p = new Point2D_F64();
for (int i = 0; i < cloud.size(); i++) {
Point3D_F64 X = cloud.get(i);
vsba.addTrack(X.x, X.y, X.z, 1.0).hasBeenInlier = true;
}
for (int viewidx = 0; viewidx < 5; viewidx++) {
BFrame frame = vsba.addFrame(viewidx);
frame.frame_to_world.setTo(-1 + 2.0 * viewidx / 4.0, 0, 0, EulerType.XYZ, rand.nextGaussian() * 0.1, 0, 0);
// add visible features to each view
for (int i = 0; i < cloud.size(); i++) {
Point3D_F64 X = cloud.get(i);
frame.frame_to_world.transformReverse(X, Xv);
if (Xv.z <= 0)
continue;
n2n.compute(Xv.x / Xv.z, Xv.y / Xv.z, n);
PerspectiveOps.convertNormToPixel(pinhole, n.x, n.y, p);
if (!pinhole.isInside(p.x, p.y))
continue;
BTrack track = vsba.tracks.get(i);
vsba.addObservation(frame, track, p.x, p.y);
}
}
}
use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class ColorizeMultiViewStereoResults method processMvsCloud.
/**
* Extracts color information for the point cloud on a view by view basis.
*
* @param scene (Input) Geometric description of the scene
* @param mvs (Input) Contains the 3D point cloud
* @param indexColor (Output) RGB values are passed through to this function.
*/
public void processMvsCloud(SceneStructureMetric scene, MultiViewStereoFromKnownSceneStructure<?> mvs, BoofLambdas.IndexRgbConsumer indexColor) {
// Get a list of views that were used as "centers"
List<ViewInfo> centers = mvs.getListCenters();
// Get the point cloud
DogArray<Point3D_F64> cloud = mvs.getDisparityCloud().getCloud();
// Step through each "center" view
for (int centerIdx = 0; centerIdx < centers.size(); centerIdx++) {
ViewInfo center = centers.get(centerIdx);
if (!lookupImages.loadImage(center.relations.id, image))
throw new RuntimeException("Couldn't find image: " + center.relations.id);
// Which points came from this view/center
int idx0 = mvs.getDisparityCloud().viewPointIdx.get(centerIdx);
int idx1 = mvs.getDisparityCloud().viewPointIdx.get(centerIdx + 1);
// Setup the camera projection model using bundle adjustment model directly
BundleAdjustmentOps.convert(scene.getViewCamera(center.metric).model, image.width, image.height, intrinsic);
Point2Transform2_F64 norm_to_pixel = new LensDistortionBrown(intrinsic).distort_F64(false, true);
// Get the transform from world/cloud to this view
scene.getWorldToView(center.metric, world_to_view, tmp);
// Grab the colorized points from this view
colorizer.process3(image, cloud.toList(), idx0, idx1, world_to_view, norm_to_pixel, indexColor);
}
}
use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class CreateSyntheticOverheadView method configure.
/**
* Specifies camera configurations.
*
* @param intrinsic Intrinsic camera parameters
* @param planeToCamera Transform from the plane to the camera. This is the extrinsic parameters.
* @param centerX X-coordinate of camera center in the overhead image in world units.
* @param centerY Y-coordinate of camera center in the overhead image in world units.
* @param cellSize Size of each cell in the overhead image in world units.
* @param overheadWidth Number of columns in overhead image
* @param overheadHeight Number of rows in overhead image
*/
public void configure(CameraPinholeBrown intrinsic, Se3_F64 planeToCamera, double centerX, double centerY, double cellSize, int overheadWidth, int overheadHeight) {
this.overheadWidth = overheadWidth;
this.overheadHeight = overheadHeight;
Point2Transform2_F64 normToPixel = LensDistortionFactory.narrow(intrinsic).distort_F64(false, true);
// Declare storage for precomputed pixel locations
int overheadPixels = overheadHeight * overheadWidth;
if (mapPixels == null || mapPixels.length < overheadPixels) {
mapPixels = new Point2D_F32[overheadPixels];
}
points.reset();
// -------- storage for intermediate results
Point2D_F64 pixel = new Point2D_F64();
// coordinate on the plane
Point3D_F64 pt_plane = new Point3D_F64();
// coordinate in camera reference frame
Point3D_F64 pt_cam = new Point3D_F64();
int indexOut = 0;
for (int i = 0; i < overheadHeight; i++) {
pt_plane.x = -(i * cellSize - centerY);
for (int j = 0; j < overheadWidth; j++, indexOut++) {
pt_plane.z = j * cellSize - centerX;
// plane to camera reference frame
SePointOps_F64.transform(planeToCamera, pt_plane, pt_cam);
// can't see behind the camera
if (pt_cam.z > 0) {
// compute normalized then convert to pixels
normToPixel.compute(pt_cam.x / pt_cam.z, pt_cam.y / pt_cam.z, pixel);
float x = (float) pixel.x;
float y = (float) pixel.y;
// make sure it's in the image
if (BoofMiscOps.isInside(intrinsic.width, intrinsic.height, x, y)) {
Point2D_F32 p = points.grow();
p.setTo(x, y);
mapPixels[indexOut] = p;
} else {
mapPixels[indexOut] = null;
}
}
}
}
}
use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class TestFourPointSyntheticStability method basic.
/**
* Try a few different known scenarios
*/
@Test
void basic() {
Point2Transform2_F64 p2n = new LensDistortionBrown(intrinsic).undistort_F64(true, false);
Point2Transform2_F64 n2p = new LensDistortionBrown(intrinsic).distort_F64(false, true);
FourPointSyntheticStability alg = new FourPointSyntheticStability();
alg.setShape(0.2, 0.2);
alg.setTransforms(p2n, n2p);
Se3_F64 f2c0 = SpecialEuclideanOps_F64.eulerXyz(0, 0, 1, 0, 0, 0, null);
Se3_F64 f2c1 = SpecialEuclideanOps_F64.eulerXyz(0, 0, 3, 0, 0, 0, null);
Se3_F64 f2c2 = SpecialEuclideanOps_F64.eulerXyz(0, 0, 1, 0, 0.5, 0, null);
FiducialStability found0 = new FiducialStability();
FiducialStability found1 = new FiducialStability();
FiducialStability found2 = new FiducialStability();
alg.computeStability(f2c0, 1, found0);
alg.computeStability(f2c1, 1, found1);
alg.computeStability(f2c2, 1, found2);
// farther away, errors result in large pose errors
assertTrue(found0.location * 1.1 < found1.location);
assertTrue(found0.orientation * 1.1 < found1.orientation);
// when viewed at an angle a small change results in a large change in pose, meaning its easier to estimate
// when viewed head on a small change in pixel results in a similar pose making it insensitive to changes
assertTrue(found0.location > 1.1 * found2.location);
assertTrue(found0.orientation > 1.1 * found2.orientation);
// larger disturbance larger error
alg.computeStability(f2c0, 2, found1);
assertTrue(found0.location * 1.1 < found1.location);
assertTrue(found0.orientation * 1.1 < found1.orientation);
}
Aggregations