use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class TestSelfCalibrationLinearRotationSingle method checkLinearSystem.
@Test
void checkLinearSystem() {
CameraPinhole intrinsic = new CameraPinhole(400, 420, 0.1, 450, 475, 0, 0);
renderRotationOnly(intrinsic);
DMatrixRMaj K = new DMatrixRMaj(3, 3);
PerspectiveOps.pinholeToMatrix(intrinsic, K);
DMatrixRMaj w = new DMatrixRMaj(3, 3);
CommonOps_DDRM.multTransB(K, K, w);
// compute planes at infinity
List<Homography2D_F64> homographies = new ArrayList<>();
for (int i = 0; i < listP.size(); i++) {
// System.out.println("projective "+i);
DMatrixRMaj P = listP.get(i);
Equation eq = new Equation();
eq.alias(P, "P", planeAtInfinity, "p", K, "K", w, "w");
eq.process("H = P(:,0:2) - P(:,3)*p'");
// eq.process("w2 = H*w*H'");
// eq.process("w2 = w2 - w");
// System.out.println("w");
// eq.lookupDDRM("w").print();
// System.out.println("w2");
// eq.lookupDDRM("w2").print();
Homography2D_F64 H = new Homography2D_F64();
DConvertMatrixStruct.convert(eq.lookupDDRM("H"), H);
homographies.add(H);
// H.print();
}
SelfCalibrationLinearRotationSingle alg = new SelfCalibrationLinearRotationSingle();
alg.ensureDeterminantOfOne(homographies);
int N = homographies.size();
DMatrixRMaj A = new DMatrixRMaj(6 * N, 6);
DMatrixRMaj X = new DMatrixRMaj(6, 1);
DMatrixRMaj found = new DMatrixRMaj(A.numRows, 1);
for (int i = 0; i < homographies.size(); i++) {
alg.add(i, homographies.get(i), A);
}
X.data[0] = w.data[0];
X.data[1] = w.data[1];
X.data[2] = w.data[2];
X.data[3] = w.data[4];
X.data[4] = w.data[5];
X.data[5] = w.data[8];
CommonOps_DDRM.mult(A, X, found);
assertEquals(0, NormOps_DDRM.normF(found), UtilEjml.TEST_F64);
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class TestSelfCalibrationLinearRotationSingle method one_axis.
/**
* One axis rotation will fail
*/
@Test
void one_axis() {
CameraPinhole intrinsic = new CameraPinhole(400, 420, 0.1, 450, 475, 0, 0);
renderRotateOneAxis(intrinsic);
checkFailure();
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class CreateCloudFromDisparityImages method addDisparity.
/**
* Add points from the disparity image which have not been masked out.
*
* NOTE: The reason point and pixel transforms are used is that combined disparity images might include lens
* distortion.
*
* @param disparity (Input) Disparity image
* @param mask (Input,Output) Mask that specifies which points in the disparity image are valid. When an existing
* point in the cloud hits this image the mask is updated.
* @param world_to_view (Input) Transform from world to view reference frame
* @param parameters (Input) Describes how to interpret the disparity values
* @param rectNorm_to_dispPixel (Input) Transform from rectified normalized image coordinates into disparity pixels
* @param dispPixel_to_rectNorm (Input) Transform from disparity pixels into rectified normalized image coordinates.
* @return The index of the view that can be used to retrieve the specified points added
*/
public int addDisparity(GrayF32 disparity, GrayU8 mask, Se3_F64 world_to_view, DisparityParameters parameters, Point2Transform2_F64 rectNorm_to_dispPixel, PixelTransform<Point2D_F64> dispPixel_to_rectNorm) {
InputSanityCheck.checkSameShape(disparity, mask);
MultiViewStereoOps.maskOutPointsInCloud(cloud.toList(), disparity, parameters, world_to_view, rectNorm_to_dispPixel, disparitySimilarTol, mask);
if (UtilEjml.isUncountable(ImageStatistics.sum(disparity)))
throw new RuntimeException("BUG");
// normalized image coordinates of disparity image
final Point2D_F64 norm = new Point2D_F64();
// 3D point in rectified stereo reference frame
final Point3D_F64 rectP = new Point3D_F64();
// 3D point in left stereo camera reference frame
final Point3D_F64 leftP = new Point3D_F64();
final CameraPinhole intrinsic = parameters.pinhole;
final double baseline = parameters.baseline;
final double disparityMin = parameters.disparityMin;
for (int y = 0; y < disparity.height; y++) {
int indexDisp = y * disparity.stride + disparity.startIndex;
int indexMask = y * mask.stride + mask.startIndex;
for (int x = 0; x < disparity.width; x++, indexDisp++, indexMask++) {
// Check to see if it has been masked out as invalid
if (mask.data[indexMask] != 0)
continue;
// Get the disparity and see if it has a valid value
double d = disparity.data[indexDisp];
if (d >= parameters.disparityRange)
continue;
// Make sure there are no points at infinity. THose can't be handled here
d += disparityMin;
if (d <= 0.0)
continue;
// Get normalized image coordinates.
dispPixel_to_rectNorm.compute(x, y, norm);
// Find 3D point in rectified reference frame
rectP.z = baseline * intrinsic.fx / d;
rectP.x = rectP.z * norm.x;
rectP.y = rectP.z * norm.y;
// Rectified left camera to native left camera
GeometryMath_F64.multTran(parameters.rotateToRectified, rectP, leftP);
// Left to world frame
SePointOps_F64.transformReverse(world_to_view, leftP, cloud.grow());
}
}
// Denote where this set of points end
viewPointIdx.add(cloud.size());
return this.viewPointIdx.size - 1;
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class MultiBaselineDisparityMedian method addToFusedImage.
/**
* Adds valid disparity values inside of this into the fused image
*
* @return true if successful and false if it failed
*/
boolean addToFusedImage(DisparityImage image) {
final GrayF32 disparity = image.disparity;
final GrayU8 mask = image.mask;
final DisparityParameters imageParam = image.parameters;
// Only do the int to float and double to float conversion once
final float imageRange = imageParam.disparityRange;
final float imageMin = imageParam.disparityMin;
final double imageFocalX = imageParam.pinhole.fx;
final double imageBaseline = imageParam.baseline;
final CameraPinhole imagePinhole = imageParam.pinhole;
DConvertMatrixStruct.convert(image.undist_to_rect_px, rect);
// fused image undistorted pixel coordinates
Point2D_F64 undistPix = new Point2D_F64();
// rectified image coordinates
Point2D_F64 rectPix = new Point2D_F64();
// To avoid sampling issues, go from fused image to disparity image
for (int origPixY = 0; origPixY < fused.height; origPixY++) {
for (int origPixX = 0; origPixX < fused.width; origPixX++) {
// Go from distorted to undistorted pixels
pixelOrig_to_Undist.compute(origPixX, origPixY, undistPix);
// undistorted to rectified pixels
HomographyPointOps_F64.transform(rect, undistPix.x, undistPix.y, rectPix);
// tricked used below
if (rectPix.x < 0.0 || rectPix.y < 0.0)
continue;
// Discretize the point so that a pixel can be read. Round instead of floor to reduce error.
int rectPixX = (int) (rectPix.x + 0.5);
int rectPixY = (int) (rectPix.y + 0.5);
// Make sure it's inside the disparity image before sampling
if (rectPixX >= mask.width || rectPixY >= mask.height)
continue;
// If marked as invalid don't sample here
if (mask.unsafe_get(rectPixX, rectPixY) == 0)
continue;
// Sample the disparity image and make sure it has a valid value
float imageDisp = disparity.unsafe_get(rectPixX, rectPixY);
if (imageDisp >= imageRange)
continue;
// Don't trust the disparity if it's at the upper or lower extremes. It's likely that the true value
// was above or below but it wasn't allowed to match there
// if (imageDisp < 1.0f || imageDisp > imageRange-1.0f)
// continue; TODO consider in the future once there are metrics
float d = imageDisp + imageMin;
if (d != 0) {
// Convert the disparity from "image" into "fused image"
// First compute the 3D point in the rectified coordinate system
double rectZ = imageBaseline * imageFocalX / d;
double rectX = rectZ * (rectPixX - imagePinhole.cx) / imagePinhole.fx;
double rectY = rectZ * (rectPixY - imagePinhole.cy) / imagePinhole.fy;
// Go from rectified to left camera, which is the fused camera
double worldZ = dotRightCol(imageParam.rotateToRectified, rectX, rectY, rectZ);
// Now that we know Z we can compute the disparity
float fusedDisp = (float) (fusedBaseline * fusedIntrinsic.fx / worldZ);
fused.get(origPixX, origPixY).add(fusedDisp);
} else {
// Points at infinity are a special case. They will remain at infinity
fused.get(origPixX, origPixY).add(0.0f);
}
}
}
return true;
}
use of boofcv.struct.calib.CameraPinhole in project BoofCV by lessthanoptimal.
the class ImplMultiViewStereoOps method disparityToCloud.
public static void disparityToCloud(GrayF32 disparity, DisparityParameters parameters, PixelTransform<Point2D_F64> pixelToNorm, BoofLambdas.PixXyzConsumer_F64 consumer) {
final CameraPinhole intrinsic = parameters.pinhole;
final double baseline = parameters.baseline;
final double disparityMin = parameters.disparityMin;
final double[] R = parameters.rotateToRectified.data;
// pixel in normalized image coordinates
final Point2D_F64 norm = new Point2D_F64();
for (int pixY = 0; pixY < disparity.height; pixY++) {
int indexDisp = disparity.startIndex + pixY * disparity.stride;
for (int pixX = 0; pixX < disparity.width; pixX++, indexDisp++) {
float d = disparity.data[indexDisp];
if (d >= parameters.disparityRange)
continue;
// Converts the pixel into normalized image coordinate
pixelToNorm.compute(pixX, pixY, norm);
// Compute the point in rectified reference frame
double Z = baseline * intrinsic.fx / (d + disparityMin);
double X = Z * norm.x;
double Y = Z * norm.y;
// Rotate back into the camera reference frame
// This is the transpose of the 3x3 rotation matrix.
// It's in a row-major format (row,col) = R[row*3+col]
double outX = R[0] * X + R[3] * Y + R[6] * Z;
double outY = R[1] * X + R[4] * Y + R[7] * Z;
double outZ = R[2] * X + R[5] * Y + R[8] * Z;
consumer.process(pixX, pixY, outX, outY, outZ);
}
}
}
Aggregations