use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class CommonBundleAdjustmentMetricSchurJacobian method createSceneChainSameMotion.
/**
* Create a scene where a "stereo" camera is created that moves. The right to left transform is fixed and common
* across all views
*/
static SceneStructureMetric createSceneChainSameMotion(Random rand, boolean homogenous) {
SceneStructureMetric out = new SceneStructureMetric(homogenous);
int numSteps = 3;
out.initialize(1, numSteps, 5);
out.setCamera(0, true, new CameraPinhole(200, 300, 0.1, 400, 500, 1, 1));
// Create a fixed transform for left to right camera
int motionIdx = out.addMotion(false, SpecialEuclideanOps_F64.eulerXyz(0.25, 0.01, -0.05, 0.01, 0.02, -0.1, null));
if (homogenous) {
for (int i = 0; i < out.points.size; i++) {
double w = rand.nextDouble() * 0.5 + 0.5;
out.setPoint(i, w * (i + 1), w * (i + 2 * rand.nextGaussian()), w * (2 * i - 3 * rand.nextGaussian()), w);
}
} else {
for (int i = 0; i < out.points.size; i++) {
out.setPoint(i, i + 1, i + 2 * rand.nextGaussian(), 2 * i - 3 * rand.nextGaussian());
}
}
for (int step = 0; step < numSteps; step++) {
out.setView(step, -1, motionIdx, step - 1);
out.connectViewToCamera(step, 0);
}
// Assign first point to all views then the other points to just one view
for (int i = 0; i < out.views.size; i++) {
out.points.data[0].views.add(i);
}
for (int i = 1; i < out.points.size; i++) {
out.points.data[i].views.add((i - 1) % out.views.size);
}
// Sanity check
assertEquals(1, out.motions.size);
assertEquals(numSteps, out.views.size);
return out;
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class TestBundleAdjustmentOps method convert_pinhole_to_bundlePinhole.
@Test
void convert_pinhole_to_bundlePinhole() {
var src = new CameraPinhole(10, 12, 1, 1, 2, 10, 32);
var dst = new BundlePinhole().setK(9, 3, 0, 0, 2);
assertSame(dst, BundleAdjustmentOps.convert(src, dst));
assertEquals(src.fx, dst.fx);
assertEquals(src.fy, dst.fy);
assertEquals(src.cx, dst.cx);
assertEquals(src.cy, dst.cy);
assertEquals(src.skew, dst.skew);
assertFalse(dst.zeroSkew);
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class CreateCloudFromDisparityImages method addDisparity.
/**
* Add points from the disparity image which have not been masked out.
*
* NOTE: The reason point and pixel transforms are used is that combined disparity images might include lens
* distortion.
*
* @param disparity (Input) Disparity image
* @param mask (Input,Output) Mask that specifies which points in the disparity image are valid. When an existing
* point in the cloud hits this image the mask is updated.
* @param world_to_view (Input) Transform from world to view reference frame
* @param parameters (Input) Describes how to interpret the disparity values
* @param rectNorm_to_dispPixel (Input) Transform from rectified normalized image coordinates into disparity pixels
* @param dispPixel_to_rectNorm (Input) Transform from disparity pixels into rectified normalized image coordinates.
* @return The index of the view that can be used to retrieve the specified points added
*/
public int addDisparity(GrayF32 disparity, GrayU8 mask, Se3_F64 world_to_view, DisparityParameters parameters, Point2Transform2_F64 rectNorm_to_dispPixel, PixelTransform<Point2D_F64> dispPixel_to_rectNorm) {
InputSanityCheck.checkSameShape(disparity, mask);
MultiViewStereoOps.maskOutPointsInCloud(cloud.toList(), disparity, parameters, world_to_view, rectNorm_to_dispPixel, disparitySimilarTol, mask);
if (UtilEjml.isUncountable(ImageStatistics.sum(disparity)))
throw new RuntimeException("BUG");
// normalized image coordinates of disparity image
final Point2D_F64 norm = new Point2D_F64();
// 3D point in rectified stereo reference frame
final Point3D_F64 rectP = new Point3D_F64();
// 3D point in left stereo camera reference frame
final Point3D_F64 leftP = new Point3D_F64();
final CameraPinhole intrinsic = parameters.pinhole;
final double baseline = parameters.baseline;
final double disparityMin = parameters.disparityMin;
for (int y = 0; y < disparity.height; y++) {
int indexDisp = y * disparity.stride + disparity.startIndex;
int indexMask = y * mask.stride + mask.startIndex;
for (int x = 0; x < disparity.width; x++, indexDisp++, indexMask++) {
// Check to see if it has been masked out as invalid
if (mask.data[indexMask] != 0)
continue;
// Get the disparity and see if it has a valid value
double d = disparity.data[indexDisp];
if (d >= parameters.disparityRange)
continue;
// Make sure there are no points at infinity. THose can't be handled here
d += disparityMin;
if (d <= 0.0)
continue;
// Get normalized image coordinates.
dispPixel_to_rectNorm.compute(x, y, norm);
// Find 3D point in rectified reference frame
rectP.z = baseline * intrinsic.fx / d;
rectP.x = rectP.z * norm.x;
rectP.y = rectP.z * norm.y;
// Rectified left camera to native left camera
GeometryMath_F64.multTran(parameters.rotateToRectified, rectP, leftP);
// Left to world frame
SePointOps_F64.transformReverse(world_to_view, leftP, cloud.grow());
}
}
// Denote where this set of points end
viewPointIdx.add(cloud.size());
return this.viewPointIdx.size - 1;
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class MultiBaselineDisparityMedian method addToFusedImage.
/**
* Adds valid disparity values inside of this into the fused image
*
* @return true if successful and false if it failed
*/
boolean addToFusedImage(DisparityImage image) {
final GrayF32 disparity = image.disparity;
final GrayU8 mask = image.mask;
final DisparityParameters imageParam = image.parameters;
// Only do the int to float and double to float conversion once
final float imageRange = imageParam.disparityRange;
final float imageMin = imageParam.disparityMin;
final double imageFocalX = imageParam.pinhole.fx;
final double imageBaseline = imageParam.baseline;
final CameraPinhole imagePinhole = imageParam.pinhole;
DConvertMatrixStruct.convert(image.undist_to_rect_px, rect);
// fused image undistorted pixel coordinates
Point2D_F64 undistPix = new Point2D_F64();
// rectified image coordinates
Point2D_F64 rectPix = new Point2D_F64();
// To avoid sampling issues, go from fused image to disparity image
for (int origPixY = 0; origPixY < fused.height; origPixY++) {
for (int origPixX = 0; origPixX < fused.width; origPixX++) {
// Go from distorted to undistorted pixels
pixelOrig_to_Undist.compute(origPixX, origPixY, undistPix);
// undistorted to rectified pixels
HomographyPointOps_F64.transform(rect, undistPix.x, undistPix.y, rectPix);
// tricked used below
if (rectPix.x < 0.0 || rectPix.y < 0.0)
continue;
// Discretize the point so that a pixel can be read. Round instead of floor to reduce error.
int rectPixX = (int) (rectPix.x + 0.5);
int rectPixY = (int) (rectPix.y + 0.5);
// Make sure it's inside the disparity image before sampling
if (rectPixX >= mask.width || rectPixY >= mask.height)
continue;
// If marked as invalid don't sample here
if (mask.unsafe_get(rectPixX, rectPixY) == 0)
continue;
// Sample the disparity image and make sure it has a valid value
float imageDisp = disparity.unsafe_get(rectPixX, rectPixY);
if (imageDisp >= imageRange)
continue;
// Don't trust the disparity if it's at the upper or lower extremes. It's likely that the true value
// was above or below but it wasn't allowed to match there
// if (imageDisp < 1.0f || imageDisp > imageRange-1.0f)
// continue; TODO consider in the future once there are metrics
float d = imageDisp + imageMin;
if (d != 0) {
// Convert the disparity from "image" into "fused image"
// First compute the 3D point in the rectified coordinate system
double rectZ = imageBaseline * imageFocalX / d;
double rectX = rectZ * (rectPixX - imagePinhole.cx) / imagePinhole.fx;
double rectY = rectZ * (rectPixY - imagePinhole.cy) / imagePinhole.fy;
// Go from rectified to left camera, which is the fused camera
double worldZ = dotRightCol(imageParam.rotateToRectified, rectX, rectY, rectZ);
// Now that we know Z we can compute the disparity
float fusedDisp = (float) (fusedBaseline * fusedIntrinsic.fx / worldZ);
fused.get(origPixX, origPixY).add(fusedDisp);
} else {
// Points at infinity are a special case. They will remain at infinity
fused.get(origPixX, origPixY).add(0.0f);
}
}
}
return true;
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class ImplMultiViewStereoOps method disparityToCloud.
public static void disparityToCloud(GrayF32 disparity, DisparityParameters parameters, PixelTransform<Point2D_F64> pixelToNorm, BoofLambdas.PixXyzConsumer_F64 consumer) {
final CameraPinhole intrinsic = parameters.pinhole;
final double baseline = parameters.baseline;
final double disparityMin = parameters.disparityMin;
final double[] R = parameters.rotateToRectified.data;
// pixel in normalized image coordinates
final Point2D_F64 norm = new Point2D_F64();
for (int pixY = 0; pixY < disparity.height; pixY++) {
int indexDisp = disparity.startIndex + pixY * disparity.stride;
for (int pixX = 0; pixX < disparity.width; pixX++, indexDisp++) {
float d = disparity.data[indexDisp];
if (d >= parameters.disparityRange)
continue;
// Converts the pixel into normalized image coordinate
pixelToNorm.compute(pixX, pixY, norm);
// Compute the point in rectified reference frame
double Z = baseline * intrinsic.fx / (d + disparityMin);
double X = Z * norm.x;
double Y = Z * norm.y;
// Rotate back into the camera reference frame
// This is the transpose of the 3x3 rotation matrix.
// It's in a row-major format (row,col) = R[row*3+col]
double outX = R[0] * X + R[3] * Y + R[6] * Z;
double outY = R[1] * X + R[4] * Y + R[7] * Z;
double outZ = R[2] * X + R[5] * Y + R[8] * Z;
consumer.process(pixX, pixY, outX, outY, outZ);
}
}
}
Aggregations