use of org.ejml.data.DenseMatrix64F in project GDSC-SMLM by aherbert.
the class ErfGaussian2DFunctionTest method functionComputesExtendedGradientForEach.
@Test
public void functionComputesExtendedGradientForEach() {
final ErfGaussian2DFunction f1 = (ErfGaussian2DFunction) this.f1;
final int nparams = f1.getNumberOfGradients();
final int[] gradientIndices = f1.gradientIndices();
final double[] du_da = new double[f1.getNumberOfGradients()];
final double[] du_db = new double[f1.getNumberOfGradients()];
final ErfGaussian2DFunction[] fHigh = new ErfGaussian2DFunction[nparams];
final ErfGaussian2DFunction[] fLow = new ErfGaussian2DFunction[nparams];
final double[] delta = new double[nparams];
for (int j = 0; j < nparams; j++) {
fHigh[j] = f1.copy();
fLow[j] = f1.copy();
}
for (double background : testbackground) // Peak 1
for (double amplitude1 : testamplitude1) for (double shape1 : testshape1) for (double cx1 : testcx1) for (double cy1 : testcy1) for (double[] w1 : testw1) {
double[] a = createParameters(background, amplitude1, shape1, cx1, cy1, w1[0], w1[1]);
f1.initialiseExtended2(a);
// Create a set of functions initialised +/- delta in each parameter
for (int j = 0; j < nparams; j++) {
int targetParameter = gradientIndices[j];
// Numerically solve gradient.
// Calculate the step size h to be an exact numerical representation
final double xx = a[targetParameter];
// Get h to minimise roundoff error
double h = Precision.representableDelta(xx, h_);
// Evaluate at (x+h) and (x-h)
a[targetParameter] = xx + h;
fHigh[j].initialise1(a);
a[targetParameter] = xx - h;
fLow[j].initialise1(a);
a[targetParameter] = xx;
delta[j] = 2 * h;
}
f1.forEach(new ExtendedGradient2Procedure() {
int i = -1;
public void executeExtended(double value, double[] dy_da, double[] d2y_dadb) {
i++;
DenseMatrix64F m = DenseMatrix64F.wrap(nparams, nparams, d2y_dadb);
for (int j = 0; j < nparams; j++) {
// Evaluate the function +/- delta for parameter j
fHigh[j].eval(i, du_da);
fLow[j].eval(i, du_db);
// Check the gradient with respect to parameter k
for (int k = 0; k < nparams; k++) {
double gradient = (du_da[k] - du_db[k]) / delta[j];
boolean ok = eq.almostEqualRelativeOrAbsolute(gradient, m.get(j, k));
if (!ok) {
System.out.printf("%d [%d,%d] %f ?= %f\n", i, j, k, gradient, m.get(j, k));
Assert.fail(String.format("%d [%d,%d] %f != %f", i, j, k, gradient, m.get(j, k)));
}
}
}
}
});
}
}
use of org.ejml.data.DenseMatrix64F in project GDSC-SMLM by aherbert.
the class FastMLEJacobianGradient2ProcedureTest method gradientCalculatorComputesGradient.
private void gradientCalculatorComputesGradient(int nPeaks, ErfGaussian2DFunction func) {
// Check the first and second derivatives
int nparams = func.getNumberOfGradients();
int[] indices = func.gradientIndices();
int iter = 100;
rdg = new RandomDataGenerator(new Well19937c(30051977));
ArrayList<double[]> paramsList = new ArrayList<double[]>(iter);
ArrayList<double[]> yList = new ArrayList<double[]>(iter);
createData(nPeaks, iter, paramsList, yList, true);
double delta = 1e-5;
DoubleEquality eq = new DoubleEquality(1e-2, 1e-3);
for (int i = 0; i < paramsList.size(); i++) {
double[] y = yList.get(i);
double[] a = paramsList.get(i);
double[] a2 = a.clone();
FastMLEJacobianGradient2Procedure p = new FastMLEJacobianGradient2Procedure(y, (ExtendedGradient2Function) func);
//double ll = p.computeLogLikelihood(a);
p.computeJacobian(a);
double[] d1 = p.d1.clone();
double[] d2 = p.d2.clone();
DenseMatrix64F J = DenseMatrix64F.wrap(nparams, nparams, p.getJacobianLinear());
for (int j = 0; j < nparams; j++) {
int k = indices[j];
double d = Precision.representableDelta(a[k], (a[k] == 0) ? delta : a[k] * delta);
a2[k] = a[k] + d;
double llh = p.computeLogLikelihood(a2);
p.computeFirstDerivative(a2);
double[] d1h = p.d1.clone();
a2[k] = a[k] - d;
double lll = p.computeLogLikelihood(a2);
p.computeFirstDerivative(a2);
double[] d1l = p.d1.clone();
a2[k] = a[k];
double gradient1 = (llh - lll) / (2 * d);
double gradient2 = (d1h[j] - d1l[j]) / (2 * d);
//System.out.printf("[%d,%d] ll - %f (%s %f+/-%f) d1 %f ?= %f : d2 %f ?= %f\n", i, k, ll, func.getName(k), a[k], d,
// gradient1, d1[j], gradient2, d2[j]);
Assert.assertTrue("Not same gradient1 @ " + j, eq.almostEqualRelativeOrAbsolute(gradient1, d1[j]));
Assert.assertTrue("Not same gradient2 @ " + j, eq.almostEqualRelativeOrAbsolute(gradient2, d2[j]));
for (int jj = 0; jj < nparams; jj++) {
if (j == jj) {
// This is done above
// Check it anyway to ensure the Jacobian is correct
//continue;
}
int kk = indices[jj];
double dd = Precision.representableDelta(a[kk], (a[kk] == 0) ? delta : a[kk] * delta);
a2[kk] = a[kk] + dd;
p.computeFirstDerivative(a2);
d1h = p.d1.clone();
a2[kk] = a[kk] - dd;
p.computeFirstDerivative(a2);
d1l = p.d1.clone();
a2[kk] = a[kk];
// Use index j even though we adjusted index jj
gradient2 = (d1h[j] - d1l[j]) / (2 * dd);
boolean ok = eq.almostEqualRelativeOrAbsolute(gradient2, J.get(j, jj));
// a[k], func.getName(kk), a[kk], dd, gradient2, J.get(j, jj), ok);
if (!ok) {
Assert.fail(String.format("Not same gradientJ @ [%d,%d]", j, jj));
}
}
}
}
}
use of org.ejml.data.DenseMatrix64F in project GDSC-SMLM by aherbert.
the class PoissonGradientProcedureTest method gradientProcedureComputesSameAsGradientCalculator.
private void gradientProcedureComputesSameAsGradientCalculator(int nparams) {
int iter = 10;
rdg = new RandomDataGenerator(new Well19937c(30051977));
ArrayList<double[]> paramsList = new ArrayList<double[]>(iter);
createFakeParams(nparams, iter, paramsList);
int n = blockWidth * blockWidth;
FakeGradientFunction func = new FakeGradientFunction(blockWidth, nparams);
GradientCalculator calc = GradientCalculatorFactory.newCalculator(nparams, false);
String name = String.format("[%d]", nparams);
for (int i = 0; i < paramsList.size(); i++) {
PoissonGradientProcedure p = PoissonGradientProcedureFactory.create(func);
p.computeFisherInformation(paramsList.get(i));
double[][] m = calc.fisherInformationMatrix(n, paramsList.get(i), func);
// Exactly the same ...
double[] al = p.getLinear();
Assert.assertArrayEquals(name + " Observations: Not same alpha @ " + i, al, new DenseMatrix64F(m).data, 0);
double[][] am = p.getMatrix();
for (int j = 0; j < nparams; j++) Assert.assertArrayEquals(name + " Observations: Not same alpha @ " + i, am[j], m[j], 0);
}
}
use of org.ejml.data.DenseMatrix64F in project GDSC-SMLM by aherbert.
the class ErfGaussian2DFunctionTest method functionComputesExtendedGradientForEachWith2Peaks.
@Test
public void functionComputesExtendedGradientForEachWith2Peaks() {
org.junit.Assume.assumeNotNull(f2);
final ErfGaussian2DFunction f2 = (ErfGaussian2DFunction) this.f2;
final int nparams = f2.getNumberOfGradients();
final int[] gradientIndices = f2.gradientIndices();
final double[] du_da = new double[f2.getNumberOfGradients()];
final double[] du_db = new double[f2.getNumberOfGradients()];
final ErfGaussian2DFunction[] fHigh = new ErfGaussian2DFunction[nparams];
final ErfGaussian2DFunction[] fLow = new ErfGaussian2DFunction[nparams];
final double[] delta = new double[nparams];
for (int j = 0; j < nparams; j++) {
fHigh[j] = f2.copy();
fLow[j] = f2.copy();
}
for (double background : testbackground) // Peak 1
for (double amplitude1 : testamplitude1) for (double shape1 : testshape1) for (double cx1 : testcx1) for (double cy1 : testcy1) for (double[] w1 : testw1) // Peak 2
for (double amplitude2 : testamplitude2) for (double shape2 : testshape2) for (double cx2 : testcx2) for (double cy2 : testcy2) for (double[] w2 : testw2) {
double[] a = createParameters(background, amplitude1, shape1, cx1, cy1, w1[0], w1[1], amplitude2, shape2, cx2, cy2, w2[0], w2[1]);
f2.initialiseExtended2(a);
// Create a set of functions initialised +/- delta in each parameter
for (int j = 0; j < nparams; j++) {
int targetParameter = gradientIndices[j];
// Numerically solve gradient.
// Calculate the step size h to be an exact numerical representation
final double xx = a[targetParameter];
// Get h to minimise roundoff error
double h = Precision.representableDelta(xx, h_);
// Evaluate at (x+h) and (x-h)
a[targetParameter] = xx + h;
fHigh[j].initialise1(a);
a[targetParameter] = xx - h;
fLow[j].initialise1(a);
a[targetParameter] = xx;
delta[j] = 2 * h;
}
f2.forEach(new ExtendedGradient2Procedure() {
int i = -1;
public void executeExtended(double value, double[] dy_da, double[] d2y_dadb) {
i++;
//if (i!=f2.size()/2) return;
DenseMatrix64F m = DenseMatrix64F.wrap(nparams, nparams, d2y_dadb);
for (int j = 0; j < nparams; j++) {
// Evaluate the function +/- delta for parameter j
fHigh[j].eval(i, du_da);
fLow[j].eval(i, du_db);
// Check the gradient with respect to parameter k
for (int k = 0; k < nparams; k++) {
double gradient = (du_da[k] - du_db[k]) / delta[j];
boolean ok = eq.almostEqualRelativeOrAbsolute(gradient, m.get(j, k));
if (!ok) {
System.out.printf("%d [%d,%d] %f ?= %f\n", i, j, k, gradient, m.get(j, k));
Assert.fail(String.format("%d [%d,%d] %f != %f", i, j, k, gradient, m.get(j, k)));
}
}
}
}
});
//return;
}
}
use of org.ejml.data.DenseMatrix64F in project GDSC-SMLM by aherbert.
the class EJMLLinearSolver method invertCholesky.
/**
* Invert symmetric positive definite matrix A. On output a replaced by A^-1.
*
* @param a
* the matrix a
* @return False if the matrix is singular (no solution)
*/
public boolean invertCholesky(double[][] a) {
DenseMatrix64F A = toA(a);
if (!invertCholesky(A))
return false;
toSquareData(A, a);
return true;
}
Aggregations