use of georegression.struct.point.Point3D_F64 in project BoofCV by lessthanoptimal.
the class CalibratedPoseAndPoint method configure.
/**
* Specifies the number of views and 3D points being estimated
*
* @param numViews Number of camera views observing the points.
* @param numPoints Number of points observed
*/
public void configure(int numViews, int numPoints) {
if (worldToCamera.length < numViews) {
Se3_F64[] temp = new Se3_F64[numViews];
System.arraycopy(worldToCamera, 0, temp, 0, worldToCamera.length);
for (int i = worldToCamera.length; i < temp.length; i++) {
temp[i] = new Se3_F64();
}
worldToCamera = temp;
viewKnown = new boolean[numViews];
}
if (points.length < numPoints) {
Point3D_F64[] temp = new Point3D_F64[numPoints];
System.arraycopy(points, 0, temp, 0, points.length);
for (int i = points.length; i < temp.length; i++) {
temp[i] = new Point3D_F64();
}
points = temp;
}
this.numPoints = numPoints;
this.numViews = numViews;
for (int i = 0; i < numViews; i++) {
viewKnown[i] = false;
}
}
use of georegression.struct.point.Point3D_F64 in project BoofCV by lessthanoptimal.
the class EquirectangularDistortBase_F64 method declareVectors.
/**
* Declares storage for precomputed pointing vectors to output image
*
* @param width output image width
* @param height output image height
*/
protected void declareVectors(int width, int height) {
this.outWidth = width;
if (vectors.length < width * height) {
Point3D_F64[] tmp = new Point3D_F64[width * height];
System.arraycopy(vectors, 0, tmp, 0, vectors.length);
for (int i = vectors.length; i < tmp.length; i++) {
tmp[i] = new Point3D_F64();
}
vectors = tmp;
}
}
use of georegression.struct.point.Point3D_F64 in project BoofCV by lessthanoptimal.
the class EquirectangularDistortBase_F64 method compute.
/**
* Input is in pinhole camera pixel coordinates. Output is in equirectangular coordinates
*
* @param x Pixel x-coordinate in rendered pinhole camera
* @param y Pixel y-coordinate in rendered pinhole camera
*/
@Override
public void compute(int x, int y) {
// grab precomputed normalized image coordinate at canonical location
Point3D_F64 v = vectors[y * outWidth + x];
// move to requested orientation
// TODO make faster by not using an array based matrix
GeometryMath_F64.mult(R, v, n);
// compute pixel coordinate
tools.normToEquiFV(n.x, n.y, n.z, out);
distX = out.x;
distY = out.y;
}
use of georegression.struct.point.Point3D_F64 in project BoofCV by lessthanoptimal.
the class ExampleStereoDisparity3D method main.
public static void main(String[] args) {
// ------------- Compute Stereo Correspondence
// Load camera images and stereo camera parameters
String calibDir = UtilIO.pathExample("calibration/stereo/Bumblebee2_Chess/");
String imageDir = UtilIO.pathExample("stereo/");
StereoParameters param = CalibrationIO.load(new File(calibDir, "stereo.yaml"));
// load and convert images into a BoofCV format
BufferedImage origLeft = UtilImageIO.loadImage(imageDir, "chair01_left.jpg");
BufferedImage origRight = UtilImageIO.loadImage(imageDir, "chair01_right.jpg");
GrayU8 distLeft = ConvertBufferedImage.convertFrom(origLeft, (GrayU8) null);
GrayU8 distRight = ConvertBufferedImage.convertFrom(origRight, (GrayU8) null);
// re-scale input images
GrayU8 scaledLeft = new GrayU8((int) (distLeft.width * scale), (int) (distLeft.height * scale));
GrayU8 scaledRight = new GrayU8((int) (distRight.width * scale), (int) (distRight.height * scale));
new FDistort(distLeft, scaledLeft).scaleExt().apply();
new FDistort(distRight, scaledRight).scaleExt().apply();
// Don't forget to adjust camera parameters for the change in scale!
PerspectiveOps.scaleIntrinsic(param.left, scale);
PerspectiveOps.scaleIntrinsic(param.right, scale);
// rectify images and compute disparity
GrayU8 rectLeft = new GrayU8(scaledLeft.width, scaledLeft.height);
GrayU8 rectRight = new GrayU8(scaledRight.width, scaledRight.height);
RectifyCalibrated rectAlg = ExampleStereoDisparity.rectify(scaledLeft, scaledRight, param, rectLeft, rectRight);
// GrayU8 disparity = ExampleStereoDisparity.denseDisparity(rectLeft, rectRight, 3,minDisparity, maxDisparity);
GrayF32 disparity = ExampleStereoDisparity.denseDisparitySubpixel(rectLeft, rectRight, 3, minDisparity, maxDisparity);
// ------------- Convert disparity image into a 3D point cloud
// The point cloud will be in the left cameras reference frame
DMatrixRMaj rectK = rectAlg.getCalibrationMatrix();
DMatrixRMaj rectR = rectAlg.getRectifiedRotation();
// used to display the point cloud
PointCloudViewer viewer = new PointCloudViewer(rectK, 10);
viewer.setPreferredSize(new Dimension(rectLeft.width, rectLeft.height));
// extract intrinsic parameters from rectified camera
double baseline = param.getBaseline();
double fx = rectK.get(0, 0);
double fy = rectK.get(1, 1);
double cx = rectK.get(0, 2);
double cy = rectK.get(1, 2);
// Iterate through each pixel in disparity image and compute its 3D coordinate
Point3D_F64 pointRect = new Point3D_F64();
Point3D_F64 pointLeft = new Point3D_F64();
for (int y = 0; y < disparity.height; y++) {
for (int x = 0; x < disparity.width; x++) {
double d = disparity.unsafe_get(x, y) + minDisparity;
// skip over pixels were no correspondence was found
if (d >= rangeDisparity)
continue;
// Coordinate in rectified camera frame
pointRect.z = baseline * fx / d;
pointRect.x = pointRect.z * (x - cx) / fx;
pointRect.y = pointRect.z * (y - cy) / fy;
// rotate into the original left camera frame
GeometryMath_F64.multTran(rectR, pointRect, pointLeft);
// add pixel to the view for display purposes and sets its gray scale value
int v = rectLeft.unsafe_get(x, y);
viewer.addPoint(pointLeft.x, pointLeft.y, pointLeft.z, v << 16 | v << 8 | v);
}
}
// display the results. Click and drag to change point cloud camera
BufferedImage visualized = VisualizeImageData.disparity(disparity, null, minDisparity, maxDisparity, 0);
ShowImages.showWindow(visualized, "Disparity");
ShowImages.showWindow(viewer, "Point Cloud");
}
use of georegression.struct.point.Point3D_F64 in project BoofCV by lessthanoptimal.
the class GenericCalibrationGrid method observations.
public static CalibrationObservation observations(Se3_F64 motion, List<Point2D_F64> obs2D) {
CalibrationObservation ret = new CalibrationObservation();
for (int i = 0; i < obs2D.size(); i++) {
Point2D_F64 p2 = obs2D.get(i);
Point3D_F64 p3 = new Point3D_F64(p2.x, p2.y, 0);
Point3D_F64 t = SePointOps_F64.transform(motion, p3, null);
ret.add(new Point2D_F64(t.x / t.z, t.y / t.z), i);
}
return ret;
}
Aggregations