use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class Relinearlize method process.
/**
* Estimates betas using relinearization.
*
* @param L_full Linear constraint matrix
* @param y distances between world control points
* @param betas Estimated betas. Output.
*/
public void process(DMatrixRMaj L_full, DMatrixRMaj y, double[] betas) {
svd.decompose(L_full);
// extract null space
V = svd.getV(null, true);
// compute one possible solution
pseudo.setA(L_full);
pseudo.solve(y, x0);
// add additional constraints to reduce the number of possible solutions
DMatrixRMaj alphas = solveConstraintMatrix();
// compute the final solution
for (int i = 0; i < x0.numRows; i++) {
for (int j = 0; j < numNull; j++) {
x0.data[i] += alphas.data[j] * valueNull(j, i);
}
}
if (numControl == 4) {
betas[0] = Math.sqrt(Math.abs(x0.data[0]));
betas[1] = Math.sqrt(Math.abs(x0.data[4])) * Math.signum(x0.data[1]);
betas[2] = Math.sqrt(Math.abs(x0.data[7])) * Math.signum(x0.data[2]);
betas[3] = Math.sqrt(Math.abs(x0.data[9])) * Math.signum(x0.data[3]);
} else {
betas[0] = Math.sqrt(Math.abs(x0.data[0]));
betas[1] = Math.sqrt(Math.abs(x0.data[3])) * Math.signum(x0.data[1]);
betas[2] = Math.sqrt(Math.abs(x0.data[5])) * Math.signum(x0.data[2]);
}
}
use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class PixelDepthLinearMetric method depth2View.
/**
* Computes pixel depth in image 'a' from two observations.
*
* @param a Observation in first frame. In calibrated coordinates. Not modified.
* @param b Observation in second frame. In calibrated coordinates. Not modified.
* @param fromAtoB Transform from frame a to frame b.
* @return Pixel depth in first frame. In same units as T inside of fromAtoB.
*/
public double depth2View(Point2D_F64 a, Point2D_F64 b, Se3_F64 fromAtoB) {
DMatrixRMaj R = fromAtoB.getR();
Vector3D_F64 T = fromAtoB.getT();
GeometryMath_F64.multCrossA(b, R, temp0);
GeometryMath_F64.mult(temp0, a, temp1);
GeometryMath_F64.cross(b, T, temp2);
return -(temp2.x + temp2.y + temp2.z) / (temp1.x + temp1.y + temp1.z);
}
use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class TriangulateMetricLinearDLT method addView.
private int addView(Se3_F64 motion, Point2D_F64 a, int index) {
final double sx = stats.stdX, sy = stats.stdY;
// final double cx = stats.meanX, cy = stats.meanY;
DMatrixRMaj R = motion.getR();
Vector3D_F64 T = motion.getT();
double r11 = R.data[0], r12 = R.data[1], r13 = R.data[2];
double r21 = R.data[3], r22 = R.data[4], r23 = R.data[5];
double r31 = R.data[6], r32 = R.data[7], r33 = R.data[8];
// These rows are derived by applying the scaling matrix to pixels and camera matrix
// more comments are in the projective code
// first row
A.data[index++] = (a.x * r31 - r11) / sx;
A.data[index++] = (a.x * r32 - r12) / sx;
A.data[index++] = (a.x * r33 - r13) / sx;
A.data[index++] = (a.x * T.z - T.x) / sx;
// second row
A.data[index++] = (a.y * r31 - r21) / sy;
A.data[index++] = (a.y * r32 - r22) / sy;
A.data[index++] = (a.y * r33 - r23) / sy;
A.data[index++] = (a.y * T.z - T.y) / sy;
return index;
}
use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class QrCodeBinaryGridToPixel method setTransformFromLinesSquare.
/**
* Used to estimate the image to grid coordinate system before the version is known. The top left square is
* used to fix the coordinate system. Then 4 lines between corners going to other QR codes is used to
* make it less suspectable to errors in the first 4 corners
*/
public void setTransformFromLinesSquare(QrCode qr) {
// clear old points
storagePairs2D.reset();
storagePairs3D.reset();
// use 3 of the corners to set the coordinate system
// set(0, 0, qr.ppCorner,0); <-- prone to damage. Significantly degrades results if used
set(0, 7, qr.ppCorner, 1);
set(7, 7, qr.ppCorner, 2);
set(7, 0, qr.ppCorner, 3);
// Use 4 lines to make it more robust errors in these corners
// We just need to get the direction right for the lines. the exact grid to image doesn't matter
setLine(0, 7, 0, 14, qr.ppCorner, 1, qr.ppRight, 0);
setLine(7, 7, 7, 14, qr.ppCorner, 2, qr.ppRight, 3);
setLine(7, 7, 14, 7, qr.ppCorner, 2, qr.ppDown, 1);
setLine(7, 0, 14, 0, qr.ppCorner, 3, qr.ppDown, 0);
DMatrixRMaj HH = new DMatrixRMaj(3, 3);
dlt.process(storagePairs2D.toList(), storagePairs3D.toList(), null, HH);
H.setTo(HH);
H.invert(Hinv);
ConvertFloatType.convert(Hinv, Hinv32);
ConvertFloatType.convert(H, H32);
}
use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class ProjectiveToMetricCameraPracticalGuessAndCheck method process.
@Override
public boolean process(List<ElevateViewInfo> views, List<DMatrixRMaj> cameraMatrices, List<AssociatedTuple> observations, MetricCameras metricViews) {
BoofMiscOps.checkTrue(cameraMatrices.size() + 1 == views.size());
metricViews.reset();
// tell it the image size
ImageDimension dimension = views.get(0).shape;
selfCalib.setCamera(0.0, 0.0, 0.0, dimension.width, dimension.height);
// Perform self calibration
if (!selfCalib.process(cameraMatrices))
return false;
DMatrixRMaj H = selfCalib.getRectifyingHomography();
// the top left 3x3 matrix is K in view 1
CommonOps_DDRM.extract(H, 0, 0, K);
PerspectiveOps.matrixToPinhole(K, -1, -1, metricViews.intrinsics.grow());
// Get the solution for the remaining cameras / views
double largestT = 0.0;
for (int viewIdx = 0; viewIdx < cameraMatrices.size(); viewIdx++) {
DMatrixRMaj P = cameraMatrices.get(viewIdx);
if (!MultiViewOps.projectiveToMetric(P, H, metricViews.motion_1_to_k.grow(), K))
return false;
PerspectiveOps.matrixToPinhole(K, -1, -1, metricViews.intrinsics.grow());
largestT = Math.max(largestT, metricViews.motion_1_to_k.getTail().T.norm());
}
// Ensure the found motion has a scale around 1.0
for (int i = 0; i < metricViews.motion_1_to_k.size; i++) {
metricViews.motion_1_to_k.get(i).T.divide(largestT);
}
resolveSign.process(observations, metricViews);
return true;
}
Aggregations