use of Jama.CholeskyDecomposition in project h2o-2 by h2oai.
the class CholTest method test.
public void test() {
Log.info("CholTest::test enter");
for (int sz = 6000; sz < 10000; sz += 2000) {
Log.info("CholTest::test sz is " + sz);
DataSetup data = new DataSetup(sz, 12345);
long start = System.currentTimeMillis();
CholeskyDecomposition jamaChol = new Matrix(data.xx).chol();
Log.info("JAMA CHOLESKY [N = " + sz + "] TAKES " + (System.currentTimeMillis() - start) + " MILLISECONDS.");
if (!jamaChol.isSPD())
continue;
ForkJoinPool fjp = new ForkJoinPool(32);
for (int t = 2; t <= 32; t += 2) {
for (int step : STEPS) fjp.invoke(new TestSetup(new DataSetup(data.xx), jamaChol.getL().getArray(), step, t));
}
}
Log.info("CholTest::test exit");
}
use of Jama.CholeskyDecomposition in project h2o-3 by h2oai.
the class LinearAlgebraUtils method computeR.
/**
* Get R = L' from Cholesky decomposition Y'Y = LL' (same as R from Y = QR)
* @param jobKey Job key for Gram calculation
* @param yinfo DataInfo for Y matrix
* @param transpose Should result be transposed to get L?
* @return L or R matrix from Cholesky of Y Gram
*/
public static double[][] computeR(Key<Job> jobKey, DataInfo yinfo, boolean transpose) {
// Calculate Cholesky of Y Gram to get R' = L matrix
// Gram is Y'Y/n where n = nrow(Y)
Gram.GramTask gtsk = new Gram.GramTask(jobKey, yinfo);
gtsk.doAll(yinfo._adaptedFrame);
// Gram.Cholesky chol = gtsk._gram.cholesky(null); // If Y'Y = LL' Cholesky, then R = L'
Matrix ygram = new Matrix(gtsk._gram.getXX());
CholeskyDecomposition chol = new CholeskyDecomposition(ygram);
double[][] L = chol.getL().getArray();
// Must scale since Cholesky of Y'Y/n where nobs = nrow(Y)
ArrayUtils.mult(L, Math.sqrt(gtsk._nobs));
return transpose ? L : ArrayUtils.transpose(L);
}
use of Jama.CholeskyDecomposition in project h2o-2 by h2oai.
the class Gram method cholesky.
/**
* Compute the cholesky decomposition.
*
* In case our gram starts with diagonal submatrix of dimension N, we exploit this fact to reduce the complexity of the problem.
* We use the standard decomposition of the cholesky factorization into submatrices.
*
* We split the Gram into 3 regions (4 but we only consider lower diagonal, sparse means diagonal region in this context):
* diagonal
* diagonal*dense
* dense*dense
* Then we can solve the cholesky in 3 steps:
* 1. We solve the diagnonal part right away (just do the sqrt of the elements).
* 2. The diagonal*dense part is simply divided by the sqrt of diagonal.
* 3. Compute Cholesky of dense*dense - outer product of cholesky of diagonal*dense computed in previous step
*
* @param chol
* @return
*/
public Cholesky cholesky(Cholesky chol, boolean parallelize, String id) {
long start = System.currentTimeMillis();
if (chol == null) {
double[][] xx = _xx.clone();
for (int i = 0; i < xx.length; ++i) xx[i] = xx[i].clone();
chol = new Cholesky(xx, _diag.clone());
}
final Cholesky fchol = chol;
final int sparseN = _diag.length;
final int denseN = _fullN - sparseN;
// compute the cholesky of the diagonal and diagonal*dense parts
if (_diag != null)
for (int i = 0; i < sparseN; ++i) {
double d = 1.0 / (chol._diag[i] = Math.sqrt(_diag[i]));
for (int j = 0; j < denseN; ++j) chol._xx[j][i] = d * _xx[j][i];
}
ForkJoinTask[] fjts = new ForkJoinTask[denseN];
// compute the outer product of diagonal*dense
//Log.info("SPARSEN = " + sparseN + " DENSEN = " + denseN);
final int[][] nz = new int[denseN][];
for (int i = 0; i < denseN; ++i) {
final int fi = i;
fjts[i] = new RecursiveAction() {
@Override
protected void compute() {
int[] tmp = new int[sparseN];
double[] rowi = fchol._xx[fi];
int n = 0;
for (int k = 0; k < sparseN; ++k) if (rowi[k] != .0)
tmp[n++] = k;
nz[fi] = Arrays.copyOf(tmp, n);
}
};
}
ForkJoinTask.invokeAll(fjts);
for (int i = 0; i < denseN; ++i) {
final int fi = i;
fjts[i] = new RecursiveAction() {
@Override
protected void compute() {
double[] rowi = fchol._xx[fi];
int[] nzi = nz[fi];
for (int j = 0; j <= fi; ++j) {
double[] rowj = fchol._xx[j];
int[] nzj = nz[j];
double s = 0;
for (int t = 0, z = 0; t < nzi.length && z < nzj.length; ) {
int k1 = nzi[t];
int k2 = nzj[z];
if (k1 < k2) {
t++;
continue;
} else if (k1 > k2) {
z++;
continue;
} else {
s += rowi[k1] * rowj[k1];
t++;
z++;
}
}
rowi[j + sparseN] = _xx[fi][j + sparseN] - s;
}
}
};
}
ForkJoinTask.invokeAll(fjts);
// compute the cholesky of dense*dense-outer_product(diagonal*dense)
// TODO we still use Jama, which requires (among other things) copy and expansion of the matrix. Do it here without copy and faster.
double[][] arr = new double[denseN][];
for (int i = 0; i < arr.length; ++i) arr[i] = Arrays.copyOfRange(fchol._xx[i], sparseN, sparseN + denseN);
// Log.info(id + ": CHOLESKY PRECOMPUTE TIME " + (System.currentTimeMillis() - start));
start = System.currentTimeMillis();
// parallelize cholesky
if (parallelize) {
int p = Runtime.getRuntime().availableProcessors();
InPlaceCholesky d = InPlaceCholesky.decompose_2(arr, 10, p);
fchol.setSPD(d.isSPD());
arr = d.getL();
// Log.info (id + ": H2O CHOLESKY DECOMP TAKES: " + (System.currentTimeMillis()-start));
} else {
// make it symmetric
for (int i = 0; i < arr.length; ++i) for (int j = 0; j < i; ++j) arr[j][i] = arr[i][j];
CholeskyDecomposition c = new Matrix(arr).chol();
fchol.setSPD(c.isSPD());
arr = c.getL().getArray();
//Log.info ("JAMA CHOLESKY DECOMPOSE TAKES: " + (System.currentTimeMillis()-start));
}
for (int i = 0; i < arr.length; ++i) System.arraycopy(arr[i], 0, fchol._xx[i], sparseN, i + 1);
return chol;
}
Aggregations