use of org.ejml.data.DMatrixRMaj in project BoofCV by lessthanoptimal.
the class PoseFromPairLinear6 method massageResults.
/**
* <p>
* Since the linear solution is probably not an exact rotation matrix, this code finds the best
* approximation.
* </p>
*
* See page 280 of [1]
*/
private void massageResults() {
DMatrixRMaj R = motion.getR();
Vector3D_F64 T = motion.getT();
if (!svd.decompose(R))
throw new RuntimeException("SVD Failed");
CommonOps_DDRM.multTransB(svd.getU(null, false), svd.getV(null, false), R);
// determinant should be +1
double det = CommonOps_DDRM.det(R);
if (det < 0)
CommonOps_DDRM.scale(-1, R);
// compute the determinant of the singular matrix
double b = 1.0;
double[] s = svd.getSingularValues();
for (int i = 0; i < svd.numberOfSingularValues(); i++) {
b *= s[i];
}
b = Math.signum(det) / Math.pow(b, 1.0 / 3.0);
GeometryMath_F64.scale(T, b);
}
use of org.ejml.data.DMatrixRMaj in project clusterMaker2 by RBVI.
the class EjmlOps method colMean.
public static DMatrixRMaj colMean(DMatrixRMaj y, int i) {
DMatrixRMaj colmean = new DMatrixRMaj(1, y.numCols);
sumCols(y, colmean);
divide(colmean, y.numRows);
return colmean;
}
use of org.ejml.data.DMatrixRMaj in project clusterMaker2 by RBVI.
the class EjmlOps method fillWithRow.
public static DMatrixRMaj fillWithRow(DMatrixRMaj matrix, int setrow) {
int rows = matrix.numRows;
int cols = matrix.numCols;
DMatrixRMaj result = new DMatrixRMaj(rows, cols);
for (int row = 0; row < rows; row++) {
for (int col = 0; col < cols; col++) {
result.set(row, col, matrix.get(setrow, col));
}
}
return result;
}
use of org.ejml.data.DMatrixRMaj in project clusterMaker2 by RBVI.
the class FastTSne method tsne.
//
@Override
public double[][] tsne(TSneConfiguration config, TaskMonitor monitor) {
double[][] X = config.getXin();
int no_dims = config.getOutputDims();
int initial_dims = config.getInitialDims();
double perplexity = config.getPerplexity();
int max_iter = config.getMaxIter();
boolean use_pca = config.usePca();
this.monitor = monitor;
this.config = config;
String IMPLEMENTATION_NAME = this.getClass().getSimpleName();
// System.out.println("X:Shape is = " + X.length + " x " + X[0].length);
monitor.showMessage(TaskMonitor.Level.INFO, "Running " + IMPLEMENTATION_NAME + ".");
long end = System.currentTimeMillis();
long start = System.currentTimeMillis();
long total = System.currentTimeMillis();
// Initialize variables
if (use_pca && X[0].length > initial_dims && initial_dims > 0) {
monitor.showMessage(TaskMonitor.Level.INFO, "Using PCA to reduce dimensions");
PrincipalComponentAnalysis pca = new PrincipalComponentAnalysis();
X = pca.pca(X, initial_dims);
monitor.showMessage(TaskMonitor.Level.INFO, "X:Shape after PCA is = " + X.length + " x " + X[0].length);
}
int n = X.length;
double momentum = .5;
double initial_momentum = 0.5;
double final_momentum = 0.8;
int eta = 500;
double min_gain = 0.01;
DMatrixRMaj Y = new DMatrixRMaj(rnorm(n, no_dims));
// DMatrixRMaj Y = new DMatrixRMaj(fillMatrix(n,no_dims,.5));
DMatrixRMaj Ysqlmul = new DMatrixRMaj(Y.numRows, Y.numRows);
DMatrixRMaj dY = new DMatrixRMaj(fillMatrix(n, no_dims, 0.0));
DMatrixRMaj iY = new DMatrixRMaj(fillMatrix(n, no_dims, 0.0));
DMatrixRMaj gains = new DMatrixRMaj(fillMatrix(n, no_dims, 1.0));
DMatrixRMaj btNeg = new DMatrixRMaj(n, no_dims);
DMatrixRMaj bt = new DMatrixRMaj(n, no_dims);
if (config.cancelled())
return null;
// Compute P-values
// P = n x n
DMatrixRMaj P = new DMatrixRMaj(x2p(X, 1e-5, perplexity).P);
if (config.cancelled())
return null;
DMatrixRMaj Ptr = new DMatrixRMaj(P.numRows, P.numCols);
// L = n x n
DMatrixRMaj L = new DMatrixRMaj(P);
DMatrixRMaj logdivide = new DMatrixRMaj(P.numRows, P.numCols);
DMatrixRMaj diag = new DMatrixRMaj(fillMatrix(L.numRows, L.numCols, 0.0));
transpose(P, Ptr);
addEquals(P, Ptr);
divide(P, elementSum(P));
replaceNaN(P, Double.MIN_VALUE);
// early exaggeration
scale(4.0, P);
maximize(P, 1e-12);
if (config.cancelled())
return null;
monitor.showMessage(TaskMonitor.Level.INFO, "Y:Shape is = " + Y.getNumRows() + " x " + Y.getNumCols());
DMatrixRMaj sqed = new DMatrixRMaj(Y.numRows, Y.numCols);
DMatrixRMaj sum_Y = new DMatrixRMaj(1, Y.numRows);
DMatrixRMaj num = new DMatrixRMaj(Y.numRows, Y.numRows);
DMatrixRMaj Q = new DMatrixRMaj(P.numRows, P.numCols);
System.out.println("Created sum_Y: (" + "" + sum_Y.numRows + "," + sum_Y.numCols + ")");
double progress = 0.0;
for (int iter = 0; iter < max_iter && !abort; iter++) {
if (config.cancelled())
return null;
progress = (double) iter / (double) max_iter;
monitor.setProgress(progress);
// Compute pairwise affinities
elementPower(Y, 2, sqed);
sumRows(sqed, sum_Y);
// Transpose the sum vector. The sumRows now
// gratuitously tansposes sum_Y
transpose(sum_Y);
multAddTransB(-2.0, Y, Y, Ysqlmul);
addRowVector(Ysqlmul, sum_Y);
transpose(Ysqlmul);
addRowVector(Ysqlmul, sum_Y);
add(Ysqlmul, 1.0);
divide(1.0, Ysqlmul);
num.set(Ysqlmul);
assignAtIndex(num, range(n), range(n), 0);
divide(num, elementSum(num), Q);
maximize(Q, 1e-12);
// writeMatrix("Q", Q);
// Compute gradient
subtract(P, Q, L);
elementMult(L, num);
// rowsum = nx1
DMatrixRMaj rowsum = sumRows(L, null);
double[] rsum = new double[rowsum.numRows];
for (int i = 0; i < rsum.length; i++) {
rsum[i] = rowsum.get(i, 0);
}
setDiag(diag, rsum);
subtract(diag, L, L);
mult(L, Y, dY);
scale(4.0, dY);
// Perform the update
if (iter < 20)
momentum = initial_momentum;
else
momentum = final_momentum;
boolean[][] boolMtrx = equal(biggerThan(dY, 0.0), biggerThan(iY, 0.0));
setData(btNeg, abs(negate(boolMtrx)));
setData(bt, abs(boolMtrx));
DMatrixRMaj gainsSmall = new DMatrixRMaj(gains);
DMatrixRMaj gainsBig = new DMatrixRMaj(gains);
add(gainsSmall, 0.2);
scale(0.8, gainsBig);
elementMult(gainsSmall, btNeg);
elementMult(gainsBig, bt);
add(gainsSmall, gainsBig, gains);
assignAllLessThan(gains, min_gain, min_gain);
scale(momentum, iY);
DMatrixRMaj gainsdY = new DMatrixRMaj(gains.numRows, dY.numCols);
elementMult(gains, dY, gainsdY);
scale(eta, gainsdY);
subtractEquals(iY, gainsdY);
addEquals(Y, iY);
DMatrixRMaj colMeanY = colMean(Y, 0);
DMatrixRMaj meanTile = tile(colMeanY, n, 1);
subtractEquals(Y, meanTile);
// Compute current value of cost function
if (iter % 100 == 0) {
DMatrixRMaj Pdiv = new DMatrixRMaj(P);
elementDiv(Pdiv, Q);
elementLog(Pdiv, logdivide);
replaceNaN(logdivide, Double.MIN_VALUE);
elementMult(logdivide, P);
replaceNaN(logdivide, Double.MIN_VALUE);
double C = elementSum(logdivide);
end = System.currentTimeMillis();
monitor.showMessage(TaskMonitor.Level.INFO, String.format("Iteration %d: error is %f (100 iterations in %4.2f seconds)\n", iter, C, (end - start) / 1000.0));
if (C < 0) {
monitor.showMessage(TaskMonitor.Level.WARN, "Warning: Error is negative, this is usually a very bad sign!");
}
start = System.currentTimeMillis();
}
// Stop lying about P-values
if (iter == 100)
divide(P, 4);
}
end = System.currentTimeMillis();
monitor.showMessage(TaskMonitor.Level.INFO, String.format("Completed in %4.2f seconds)", (end - total) / 1000.0));
// Return solution
return extractDoubleArray(Y);
}
use of org.ejml.data.DMatrixRMaj in project clusterMaker2 by RBVI.
the class PrincipalComponentAnalysis method response.
/**
* Computes the dot product of each basis vector against the sample. Can be used as a measure
* for membership in the training sample set. High values correspond to a better fit.
*
* @param sample Sample of original data.
* @return Higher value indicates it is more likely to be a member of input dataset.
*/
public double response(double[] sample) {
if (sample.length != A.numCols)
throw new IllegalArgumentException("Expected input vector to be in sample space");
DMatrixRMaj dots = new DMatrixRMaj(numComponents, 1);
DMatrixRMaj s = DMatrixRMaj.wrap(A.numCols, 1, sample);
CommonOps_DDRM.mult(V_t, s, dots);
return NormOps_DDRM.normF(dots);
}
Aggregations