use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class DecomposeProjectiveToMetric method projectiveToMetricKnownK.
/**
* <p>
* Convert the projective camera matrix into a metric transform given the rectifying homography and a
* known calibration matrix. This simplifies the math compared to {@link #projectiveToMetric} where it needs
* to extract `K`.
* </p>
* {@code P = K*[R|T]*H} where H is the inverse of the rectifying homography.
*
* A goodness of fit error can be accessed using {@link #singularError}.
*
* @param cameraMatrix (Input) camera matrix. 3x4
* @param H (Input) Rectifying homography. 4x4
* @param K (Input) Known calibration matrix
* @param worldToView (Output) transform from world to camera view
* @return true if the decomposition was successful
*/
public boolean projectiveToMetricKnownK(DMatrixRMaj cameraMatrix, DMatrixRMaj H, DMatrixRMaj K, Se3_F64 worldToView) {
// Reset internal data structures
singularError = 0;
// Elevate the projective camera into a metric camera matrix
CommonOps_DDRM.mult(cameraMatrix, H, P_metric);
// "Remove" K from the metric camera, e.g. P= [K*R | K*T] then inv(K)P = [R | T]
CommonOps_DDRM.invert(K, K_inv);
CommonOps_DDRM.mult(K_inv, P_metric, P_rt);
// Remove R and T
CommonOps_DDRM.extract(P_rt, 0, 0, worldToView.R);
worldToView.T.x = P_rt.get(0, 3);
worldToView.T.y = P_rt.get(1, 3);
worldToView.T.z = P_rt.get(2, 3);
// Turn R into a true rotation matrix which is orthogonal and has a determinant of +1
DMatrixRMaj R = worldToView.R;
if (!svd.decompose(R))
return false;
CommonOps_DDRM.multTransB(svd.getU(null, false), svd.getV(null, false), R);
// determinant should be +1
double det = CommonOps_DDRM.det(R);
if (det < 0) {
CommonOps_DDRM.scale(-1, R);
worldToView.T.scale(-1);
}
// recover the scale of T. This is important when trying to construct a common metric frame from a common
// projective frame
double[] sv = svd.getSingularValues();
double sv_mag = (sv[0] + sv[1] + sv[2]) / 3.0;
worldToView.T.divideIP(sv_mag);
// values will not all be identical
for (int i = 0; i < 3; i++) {
singularError += Math.abs(sv[i] - sv_mag);
}
return true;
}
use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class LensDistortionOps_F64 method transformChangeModel.
/**
* Creates a {@link Point2Transform2_F32} for converting pixels from original camera model into a new synthetic
* model. The scaling of the image can be adjusted to ensure certain visibility requirements.
*
* @param type The type of adjustment it will apply to the transform
* @param paramOriginal Camera model for the current image
* @param paramDesired Desired camera model for the distorted image
* @param desiredToOriginal If true then the transform's input is assumed to be pixels in the desired
* image and the output will be in original image, if false then the reverse transform
* is returned.
* @param paramMod The modified camera model to meet the requested visibility requirements. Null if you don't want it.
* @return The requested transform
*/
public static <O extends CameraPinhole, D extends CameraPinhole> Point2Transform2_F64 transformChangeModel(AdjustmentType type, O paramOriginal, D paramDesired, boolean desiredToOriginal, @Nullable D paramMod) {
LensDistortionNarrowFOV original = LensDistortionFactory.narrow(paramOriginal);
LensDistortionNarrowFOV desired = LensDistortionFactory.narrow(paramDesired);
Point2Transform2_F64 ori_p_to_n = original.undistort_F64(true, false);
Point2Transform2_F64 des_n_to_p = desired.distort_F64(false, true);
Point2Transform2_F64 ori_to_des = new SequencePoint2Transform2_F64(ori_p_to_n, des_n_to_p);
Point2D_F64 work = new Point2D_F64();
RectangleLength2D_F64 bound;
if (type == AdjustmentType.FULL_VIEW) {
bound = DistortImageOps.boundBox_F64(paramOriginal.width, paramOriginal.height, new PointToPixelTransform_F64(ori_to_des), work);
} else if (type == AdjustmentType.EXPAND) {
bound = LensDistortionOps_F64.boundBoxInside(paramOriginal.width, paramOriginal.height, new PointToPixelTransform_F64(ori_to_des), work);
// ensure there are no strips of black
LensDistortionOps_F64.roundInside(bound);
} else if (type == AdjustmentType.CENTER) {
bound = LensDistortionOps_F64.centerBoxInside(paramOriginal.width, paramOriginal.height, new PointToPixelTransform_F64(ori_to_des), work);
} else if (type == AdjustmentType.NONE) {
bound = new RectangleLength2D_F64(0, 0, paramDesired.width, paramDesired.height);
} else {
throw new IllegalArgumentException("Unsupported type " + type);
}
double scaleX = bound.width / paramDesired.width;
double scaleY = bound.height / paramDesired.height;
double scale;
if (type == AdjustmentType.FULL_VIEW) {
scale = Math.max(scaleX, scaleY);
} else if (type == AdjustmentType.EXPAND) {
scale = Math.min(scaleX, scaleY);
} else if (type == AdjustmentType.CENTER) {
scale = Math.max(scaleX, scaleY);
} else {
scale = 1.0;
}
double deltaX = (bound.x0 + (scaleX - scale) * paramDesired.width / 2.0);
double deltaY = (bound.y0 + (scaleY - scale) * paramDesired.height / 2.0);
// adjustment matrix
DMatrixRMaj A = new DMatrixRMaj(3, 3, true, scale, 0, deltaX, 0, scale, deltaY, 0, 0, 1);
DMatrixRMaj A_inv = new DMatrixRMaj(3, 3);
if (!CommonOps_DDRM.invert(A, A_inv)) {
throw new RuntimeException("Failed to invert adjustment matrix. Probably bad.");
}
if (paramMod != null) {
PerspectiveOps.adjustIntrinsic(paramDesired, A_inv, paramMod);
}
if (desiredToOriginal) {
Point2Transform2_F64 des_p_to_n = desired.undistort_F64(true, false);
Point2Transform2_F64 ori_n_to_p = original.distort_F64(false, true);
PointTransformHomography_F64 adjust = new PointTransformHomography_F64(A);
return new SequencePoint2Transform2_F64(adjust, des_p_to_n, ori_n_to_p);
} else {
PointTransformHomography_F64 adjust = new PointTransformHomography_F64(A_inv);
return new SequencePoint2Transform2_F64(ori_to_des, adjust);
}
}
use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class DecomposeEssential method extractTransform.
/**
* There are four possible reconstructions from an essential matrix. This function will compute different
* permutations depending on optionA and optionB being true or false.
*/
private void extractTransform(DMatrixRMaj U, DMatrixRMaj V, Se3_F64 se, boolean optionA, boolean optionB) {
DMatrixRMaj R = se.getR();
Vector3D_F64 T = se.getT();
// extract rotation
if (optionA)
CommonOps_DDRM.multTransB(U, W, temp);
else
CommonOps_DDRM.mult(U, W, temp);
CommonOps_DDRM.multTransB(temp, V, R);
T.x = U.get(0, 2);
T.y = U.get(1, 2);
T.z = U.get(2, 2);
if (optionB)
T.scale(-1);
}
use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class ExampleComputeFundamentalMatrix method simpleFundamental.
/**
* If the set of associated features are known to be correct, then the fundamental matrix can
* be computed directly with a lot less code. The down side is that this technique is very
* sensitive to noise.
*/
public static DMatrixRMaj simpleFundamental(List<AssociatedPair> matches) {
// Use the 8-point algorithm since it will work with an arbitrary number of points
Estimate1ofEpipolar estimateF = FactoryMultiView.fundamental_1(EnumFundamental.LINEAR_8, 0);
var F = new DMatrixRMaj(3, 3);
if (!estimateF.process(matches, F))
throw new IllegalArgumentException("Failed");
// as was done above.
return F;
}
use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class ExampleStereoDisparity method rectify.
/**
* Rectified the input images using known calibration.
*/
public static RectifyCalibrated rectify(GrayU8 origLeft, GrayU8 origRight, StereoParameters param, GrayU8 rectLeft, GrayU8 rectRight) {
// Compute rectification
RectifyCalibrated rectifyAlg = RectifyImageOps.createCalibrated();
Se3_F64 leftToRight = param.getRightToLeft().invert(null);
// original camera calibration matrices
DMatrixRMaj K1 = PerspectiveOps.pinholeToMatrix(param.getLeft(), (DMatrixRMaj) null);
DMatrixRMaj K2 = PerspectiveOps.pinholeToMatrix(param.getRight(), (DMatrixRMaj) null);
rectifyAlg.process(K1, new Se3_F64(), K2, leftToRight);
// rectification matrix for each image
DMatrixRMaj rect1 = rectifyAlg.getUndistToRectPixels1();
DMatrixRMaj rect2 = rectifyAlg.getUndistToRectPixels2();
// New calibration matrix,
DMatrixRMaj rectK = rectifyAlg.getCalibrationMatrix();
// Adjust the rectification to make the view area more useful
RectifyImageOps.allInsideLeft(param.left, rect1, rect2, rectK, null);
// undistorted and rectify images
var rect1_F32 = new FMatrixRMaj(3, 3);
var rect2_F32 = new FMatrixRMaj(3, 3);
ConvertMatrixData.convert(rect1, rect1_F32);
ConvertMatrixData.convert(rect2, rect2_F32);
ImageDistort<GrayU8, GrayU8> imageDistortLeft = RectifyDistortImageOps.rectifyImage(param.getLeft(), rect1_F32, BorderType.SKIP, origLeft.getImageType());
ImageDistort<GrayU8, GrayU8> imageDistortRight = RectifyDistortImageOps.rectifyImage(param.getRight(), rect2_F32, BorderType.SKIP, origRight.getImageType());
imageDistortLeft.apply(origLeft, rectLeft);
imageDistortRight.apply(origRight, rectRight);
return rectifyAlg;
}
Aggregations