use of org.apache.commons.math3.random.Well19937c in project GDSC-SMLM by aherbert.
the class FastMLEJacobianGradient2ProcedureTest method gradientCalculatorComputesGradient.
private void gradientCalculatorComputesGradient(int nPeaks, ErfGaussian2DFunction func) {
// Check the first and second derivatives
int nparams = func.getNumberOfGradients();
int[] indices = func.gradientIndices();
int iter = 100;
rdg = new RandomDataGenerator(new Well19937c(30051977));
ArrayList<double[]> paramsList = new ArrayList<double[]>(iter);
ArrayList<double[]> yList = new ArrayList<double[]>(iter);
createData(nPeaks, iter, paramsList, yList, true);
double delta = 1e-5;
DoubleEquality eq = new DoubleEquality(1e-2, 1e-3);
for (int i = 0; i < paramsList.size(); i++) {
double[] y = yList.get(i);
double[] a = paramsList.get(i);
double[] a2 = a.clone();
FastMLEJacobianGradient2Procedure p = new FastMLEJacobianGradient2Procedure(y, (ExtendedGradient2Function) func);
//double ll = p.computeLogLikelihood(a);
p.computeJacobian(a);
double[] d1 = p.d1.clone();
double[] d2 = p.d2.clone();
DenseMatrix64F J = DenseMatrix64F.wrap(nparams, nparams, p.getJacobianLinear());
for (int j = 0; j < nparams; j++) {
int k = indices[j];
double d = Precision.representableDelta(a[k], (a[k] == 0) ? delta : a[k] * delta);
a2[k] = a[k] + d;
double llh = p.computeLogLikelihood(a2);
p.computeFirstDerivative(a2);
double[] d1h = p.d1.clone();
a2[k] = a[k] - d;
double lll = p.computeLogLikelihood(a2);
p.computeFirstDerivative(a2);
double[] d1l = p.d1.clone();
a2[k] = a[k];
double gradient1 = (llh - lll) / (2 * d);
double gradient2 = (d1h[j] - d1l[j]) / (2 * d);
//System.out.printf("[%d,%d] ll - %f (%s %f+/-%f) d1 %f ?= %f : d2 %f ?= %f\n", i, k, ll, func.getName(k), a[k], d,
// gradient1, d1[j], gradient2, d2[j]);
Assert.assertTrue("Not same gradient1 @ " + j, eq.almostEqualRelativeOrAbsolute(gradient1, d1[j]));
Assert.assertTrue("Not same gradient2 @ " + j, eq.almostEqualRelativeOrAbsolute(gradient2, d2[j]));
for (int jj = 0; jj < nparams; jj++) {
if (j == jj) {
// This is done above
// Check it anyway to ensure the Jacobian is correct
//continue;
}
int kk = indices[jj];
double dd = Precision.representableDelta(a[kk], (a[kk] == 0) ? delta : a[kk] * delta);
a2[kk] = a[kk] + dd;
p.computeFirstDerivative(a2);
d1h = p.d1.clone();
a2[kk] = a[kk] - dd;
p.computeFirstDerivative(a2);
d1l = p.d1.clone();
a2[kk] = a[kk];
// Use index j even though we adjusted index jj
gradient2 = (d1h[j] - d1l[j]) / (2 * dd);
boolean ok = eq.almostEqualRelativeOrAbsolute(gradient2, J.get(j, jj));
// a[k], func.getName(kk), a[kk], dd, gradient2, J.get(j, jj), ok);
if (!ok) {
Assert.fail(String.format("Not same gradientJ @ [%d,%d]", j, jj));
}
}
}
}
}
use of org.apache.commons.math3.random.Well19937c in project GDSC-SMLM by aherbert.
the class GradientCalculatorSpeedTest method gradientCalculatorNComputesSameAsGradientCalculator.
private void gradientCalculatorNComputesSameAsGradientCalculator(Gaussian2DFunction func, int nparams, boolean mle) {
// Check the function is the correct size
Assert.assertEquals(nparams, func.gradientIndices().length);
int iter = 100;
rdg = new RandomDataGenerator(new Well19937c(30051977));
double[][] alpha = new double[nparams][nparams];
double[] beta = new double[nparams];
double[][] alpha2 = new double[nparams][nparams];
double[] beta2 = new double[nparams];
ArrayList<double[]> paramsList = new ArrayList<double[]>(iter);
ArrayList<double[]> yList = new ArrayList<double[]>(iter);
int[] x = createData(1, iter, paramsList, yList);
GradientCalculator calc = (mle) ? new MLEGradientCalculator(beta.length) : new GradientCalculator(beta.length);
GradientCalculator calc2 = GradientCalculatorFactory.newCalculator(nparams, mle);
for (int i = 0; i < paramsList.size(); i++) {
double s = calc.findLinearised(x, yList.get(i), paramsList.get(i), alpha, beta, func);
double s2 = calc2.findLinearised(x, yList.get(i), paramsList.get(i), alpha2, beta2, func);
Assert.assertTrue("Result: Not same @ " + i, eq.almostEqualRelativeOrAbsolute(s, s2));
Assert.assertTrue("Observations: Not same beta @ " + i, eq.almostEqualRelativeOrAbsolute(beta, beta2));
for (int j = 0; j < beta.length; j++) Assert.assertTrue("Observations: Not same alpha @ " + i, eq.almostEqualRelativeOrAbsolute(alpha[j], alpha2[j]));
}
for (int i = 0; i < paramsList.size(); i++) {
double s = calc.findLinearised(x.length, yList.get(i), paramsList.get(i), alpha, beta, func);
double s2 = calc2.findLinearised(x.length, yList.get(i), paramsList.get(i), alpha2, beta2, func);
Assert.assertTrue("N-Result: Not same @ " + i, eq.almostEqualRelativeOrAbsolute(s, s2));
Assert.assertTrue("N-observations: Not same beta @ " + i, eq.almostEqualRelativeOrAbsolute(beta, beta2));
for (int j = 0; j < beta.length; j++) Assert.assertTrue("N-observations: Not same alpha @ " + i, eq.almostEqualRelativeOrAbsolute(alpha[j], alpha2[j]));
}
if (!mle) {
func.setNoiseModel(CameraNoiseModel.createNoiseModel(10, 0, true));
for (int i = 0; i < paramsList.size(); i++) {
double s = calc.findLinearised(x, yList.get(i), paramsList.get(i), alpha, beta, func);
double s2 = calc2.findLinearised(x, yList.get(i), paramsList.get(i), alpha2, beta2, func);
Assert.assertTrue("Result+Noise: Not same @ " + i, eq.almostEqualRelativeOrAbsolute(s, s2));
Assert.assertTrue("Observations+Noise: Not same beta @ " + i, eq.almostEqualRelativeOrAbsolute(beta, beta2));
for (int j = 0; j < beta.length; j++) Assert.assertTrue("Observations+Noise: Not same alpha @ " + i, eq.almostEqualRelativeOrAbsolute(alpha[j], alpha2[j]));
}
for (int i = 0; i < paramsList.size(); i++) {
double s = calc.findLinearised(x.length, yList.get(i), paramsList.get(i), alpha, beta, func);
double s2 = calc2.findLinearised(x.length, yList.get(i), paramsList.get(i), alpha2, beta2, func);
Assert.assertTrue("N-Result+Noise: Not same @ " + i, eq.almostEqualRelativeOrAbsolute(s, s2));
Assert.assertTrue("N-Observations+Noise: Not same beta @ " + i, eq.almostEqualRelativeOrAbsolute(beta, beta2));
for (int j = 0; j < beta.length; j++) Assert.assertTrue("N-Observations+Noise: Not same alpha @ " + i, eq.almostEqualRelativeOrAbsolute(alpha[j], alpha2[j]));
}
}
// Only the diagonal Fisher Information has been unrolled into the other calculators
for (int i = 0; i < paramsList.size(); i++) {
beta = calc.fisherInformationDiagonal(x.length, paramsList.get(i), func);
beta2 = calc.fisherInformationDiagonal(x.length, paramsList.get(i), func);
Assert.assertTrue("Not same diagonal @ " + i, eq.almostEqualRelativeOrAbsolute(beta, beta2));
}
}
use of org.apache.commons.math3.random.Well19937c in project GDSC-SMLM by aherbert.
the class GradientCalculatorSpeedTest method mleCalculatorComputesLogLikelihoodRatio.
@Test
public void mleCalculatorComputesLogLikelihoodRatio() {
EllipticalGaussian2DFunction func = new EllipticalGaussian2DFunction(1, blockWidth, blockWidth);
int n = blockWidth * blockWidth;
double[] a = new double[7];
rdg = new RandomDataGenerator(new Well19937c(30051977));
for (int run = 5; run-- > 0; ) {
a[0] = random(Background);
a[1] = random(Amplitude);
a[2] = random(Angle);
a[3] = random(Xpos);
a[4] = random(Ypos);
a[5] = random(Xwidth);
a[6] = random(Ywidth);
// Simulate Poisson process
func.initialise(a);
double[] x = Utils.newArray(n, 0, 1.0);
double[] u = new double[x.length];
for (int i = 0; i < n; i++) {
u[i] = func.eval(i);
if (u[i] > 0)
x[i] = rdg.nextPoisson(u[i]);
}
GradientCalculator calc = GradientCalculatorFactory.newCalculator(func.getNumberOfGradients(), true);
double[][] alpha = new double[7][7];
double[] beta = new double[7];
double llr = PoissonCalculator.logLikelihoodRatio(u, x);
double llr2 = calc.findLinearised(n, x, a, alpha, beta, func);
//System.out.printf("llr=%f, llr2=%f\n", llr, llr2);
Assert.assertEquals("Log-likelihood ratio", llr, llr2, llr * 1e-10);
}
}
use of org.apache.commons.math3.random.Well19937c in project GDSC-SMLM by aherbert.
the class LSQLVMGradientProcedureTest method gradientProcedureIsNotSlowerThanGradientCalculator.
private void gradientProcedureIsNotSlowerThanGradientCalculator(final int nparams, final BaseLSQLVMGradientProcedureFactory factory) {
org.junit.Assume.assumeTrue(speedTests || TestSettings.RUN_SPEED_TESTS);
final int iter = 1000;
rdg = new RandomDataGenerator(new Well19937c(30051977));
final double[][] alpha = new double[nparams][nparams];
final double[] beta = new double[nparams];
final ArrayList<double[]> paramsList = new ArrayList<double[]>(iter);
final ArrayList<double[]> yList = new ArrayList<double[]>(iter);
int[] x = createFakeData(nparams, iter, paramsList, yList);
final int n = x.length;
final FakeGradientFunction func = new FakeGradientFunction(blockWidth, nparams);
GradientCalculator calc = GradientCalculatorFactory.newCalculator(nparams, false);
for (int i = 0; i < paramsList.size(); i++) calc.findLinearised(n, yList.get(i), paramsList.get(i), alpha, beta, func);
for (int i = 0; i < paramsList.size(); i++) {
BaseLSQLVMGradientProcedure p = factory.createProcedure(yList.get(i), func);
p.gradient(paramsList.get(i));
}
// Realistic loops for an optimisation
final int loops = 15;
// Run till stable timing
Timer t1 = new Timer() {
@Override
void run() {
for (int i = 0, k = 0; i < iter; i++) {
GradientCalculator calc = GradientCalculatorFactory.newCalculator(nparams, false);
for (int j = loops; j-- > 0; ) calc.findLinearised(n, yList.get(i), paramsList.get(k++ % iter), alpha, beta, func);
}
}
};
long time1 = t1.getTime();
Timer t2 = new Timer(t1.loops) {
@Override
void run() {
for (int i = 0, k = 0; i < iter; i++) {
BaseLSQLVMGradientProcedure p = factory.createProcedure(yList.get(i), func);
for (int j = loops; j-- > 0; ) p.gradient(paramsList.get(k++ % iter));
}
}
};
long time2 = t2.getTime();
log("GradientCalculator = %d : %s %d = %d : %fx\n", time1, factory.getClass().getSimpleName(), nparams, time2, (1.0 * time1) / time2);
if (TestSettings.ASSERT_SPEED_TESTS) {
// Add contingency
Assert.assertTrue(time2 < time1 * 1.5);
}
}
use of org.apache.commons.math3.random.Well19937c in project GDSC-SMLM by aherbert.
the class LVMGradientProcedureTest method gradientProcedureLinearIsFasterThanGradientProcedure.
private void gradientProcedureLinearIsFasterThanGradientProcedure(final int nparams, final Type type, final boolean precomputed) {
org.junit.Assume.assumeTrue(speedTests || TestSettings.RUN_SPEED_TESTS);
final int iter = 100;
rdg = new RandomDataGenerator(new Well19937c(30051977));
final ArrayList<double[]> paramsList = new ArrayList<double[]>(iter);
final ArrayList<double[]> yList = new ArrayList<double[]>(iter);
createData(1, iter, paramsList, yList);
// Remove the timing of the function call by creating a dummy function
FakeGradientFunction fgf = new FakeGradientFunction(blockWidth, nparams);
final Gradient1Function func;
if (precomputed) {
final double[] b = Utils.newArray(fgf.size(), 0.1, 1.3);
func = PrecomputedGradient1Function.wrapGradient1Function(fgf, b);
} else {
func = fgf;
}
for (int i = 0; i < paramsList.size(); i++) {
LVMGradientProcedure p1 = createProcedure(type, yList.get(i), func);
p1.gradient(paramsList.get(i));
p1.gradient(paramsList.get(i));
LVMGradientProcedure p2 = LVMGradientProcedureFactory.create(yList.get(i), func, type);
p2.gradient(paramsList.get(i));
p2.gradient(paramsList.get(i));
// Check they are the same
Assert.assertArrayEquals("A " + i, p1.getAlphaLinear(), p2.getAlphaLinear(), 0);
Assert.assertArrayEquals("B " + i, p1.beta, p2.beta, 0);
}
// Realistic loops for an optimisation
final int loops = 15;
// Run till stable timing
Timer t1 = new Timer() {
@Override
void run() {
for (int i = 0, k = 0; i < paramsList.size(); i++) {
LVMGradientProcedure p1 = createProcedure(type, yList.get(i), func);
for (int j = loops; j-- > 0; ) p1.gradient(paramsList.get(k++ % iter));
}
}
};
long time1 = t1.getTime();
Timer t2 = new Timer(t1.loops) {
@Override
void run() {
for (int i = 0, k = 0; i < paramsList.size(); i++) {
LVMGradientProcedure p2 = LVMGradientProcedureFactory.create(yList.get(i), func, type);
for (int j = loops; j-- > 0; ) p2.gradient(paramsList.get(k++ % iter));
}
}
};
long time2 = t2.getTime();
log("%s, Precomputed=%b : Standard = %d : Unrolled %d = %d : %fx\n", type, precomputed, time1, nparams, time2, (1.0 * time1) / time2);
Assert.assertTrue(time2 < time1);
}
Aggregations