use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class VisualDepthOps method depthTo3D.
/**
* Creates a point cloud from a depth image.
* @param param Intrinsic camera parameters for depth image
* @param depth depth image. each value is in millimeters.
* @param cloud Output point cloud
*/
public static void depthTo3D(CameraPinholeRadial param, GrayU16 depth, FastQueue<Point3D_F64> cloud) {
cloud.reset();
Point2Transform2_F64 p2n = LensDistortionOps.narrow(param).undistort_F64(true, false);
Point2D_F64 n = new Point2D_F64();
for (int y = 0; y < depth.height; y++) {
int index = depth.startIndex + y * depth.stride;
for (int x = 0; x < depth.width; x++) {
int mm = depth.data[index++] & 0xFFFF;
// skip pixels with no depth information
if (mm == 0)
continue;
// this could all be precomputed to speed it up
p2n.compute(x, y, n);
Point3D_F64 p = cloud.grow();
p.z = mm;
p.x = n.x * p.z;
p.y = n.y * p.z;
}
}
}
use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class VisOdomPixelDepthPnP_to_DepthVisualOdometry method setCalibration.
@Override
public void setCalibration(CameraPinholeRadial paramVisual, Point2Transform2_F32 visToDepth) {
PointToPixelTransform_F32 visToDepth_pixel = new PointToPixelTransform_F32(visToDepth);
sparse3D.configure(LensDistortionOps.narrow(paramVisual), visToDepth_pixel);
Point2Transform2_F64 leftPixelToNorm = narrow(paramVisual).undistort_F64(true, false);
Point2Transform2_F64 leftNormToPixel = narrow(paramVisual).distort_F64(false, true);
alg.setPixelToNorm(leftPixelToNorm);
alg.setNormToPixel(leftNormToPixel);
distance.setIntrinsic(paramVisual.fx, paramVisual.fy, paramVisual.skew);
}
use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class CreateSyntheticOverheadView method configure.
/**
* Specifies camera configurations.
* @param intrinsic Intrinsic camera parameters
* @param planeToCamera Transform from the plane to the camera. This is the extrinsic parameters.
* @param centerX X-coordinate of camera center in the overhead image in world units.
* @param centerY Y-coordinate of camera center in the overhead image in world units.
* @param cellSize Size of each cell in the overhead image in world units.
* @param overheadWidth Number of columns in overhead image
* @param overheadHeight Number of rows in overhead image
*/
public void configure(CameraPinholeRadial intrinsic, Se3_F64 planeToCamera, double centerX, double centerY, double cellSize, int overheadWidth, int overheadHeight) {
this.overheadWidth = overheadWidth;
this.overheadHeight = overheadHeight;
Point2Transform2_F64 normToPixel = LensDistortionOps.narrow(intrinsic).distort_F64(false, true);
// Declare storage for precomputed pixel locations
int overheadPixels = overheadHeight * overheadWidth;
if (mapPixels == null || mapPixels.length < overheadPixels) {
mapPixels = new Point2D_F32[overheadPixels];
}
points.reset();
// -------- storage for intermediate results
Point2D_F64 pixel = new Point2D_F64();
// coordinate on the plane
Point3D_F64 pt_plane = new Point3D_F64();
// coordinate in camera reference frame
Point3D_F64 pt_cam = new Point3D_F64();
int indexOut = 0;
for (int i = 0; i < overheadHeight; i++) {
pt_plane.x = -(i * cellSize - centerY);
for (int j = 0; j < overheadWidth; j++, indexOut++) {
pt_plane.z = j * cellSize - centerX;
// plane to camera reference frame
SePointOps_F64.transform(planeToCamera, pt_plane, pt_cam);
// can't see behind the camera
if (pt_cam.z > 0) {
// compute normalized then convert to pixels
normToPixel.compute(pt_cam.x / pt_cam.z, pt_cam.y / pt_cam.z, pixel);
float x = (float) pixel.x;
float y = (float) pixel.y;
// make sure it's in the image
if (BoofMiscOps.checkInside(intrinsic.width, intrinsic.height, x, y)) {
Point2D_F32 p = points.grow();
p.set(x, y);
mapPixels[indexOut] = p;
} else {
mapPixels[indexOut] = null;
}
}
}
}
}
use of boofcv.struct.distort.Point2Transform2_F64 in project narchy by automenta.
the class ExampleStereoTwoViewsOneCamera method convertToNormalizedCoordinates.
/**
* Convert a set of associated point features from pixel coordinates into normalized image coordinates.
*/
public void convertToNormalizedCoordinates(List<AssociatedPair> matchedFeatures, CameraPinholeRadial intrinsic) {
Point2Transform2_F64 p_to_n = LensDistortionOps.narrow(intrinsic).undistort_F64(true, false);
matchedCalibrated.clear();
for (int i = 0, matchedFeaturesSize = matchedFeatures.size(); i < matchedFeaturesSize; i++) {
AssociatedPair p = matchedFeatures.get(i);
AssociatedPair c = new AssociatedPair();
p_to_n.compute(p.p1.x, p.p1.y, c.p1);
p_to_n.compute(p.p2.x, p.p2.y, c.p2);
matchedCalibrated.add(c);
}
}
use of boofcv.struct.distort.Point2Transform2_F64 in project BoofCV by lessthanoptimal.
the class CameraToEquirectangular_F64 method setCameraModel.
public void setCameraModel(CameraPinhole camera) {
Point2Transform2_F64 pixelToNormalized = new LensDistortionPinhole(camera).undistort_F64(true, false);
setCameraModel(camera.width, camera.height, pixelToNormalized);
}
Aggregations