use of uk.ac.sussex.gdsc.core.utils.DoubleEquality in project GDSC-SMLM by aherbert.
the class FastMleJacobianGradient2ProcedureTest method gradientCalculatorComputesGradient.
private void gradientCalculatorComputesGradient(RandomSeed seed, int npeaks, ErfGaussian2DFunction func) {
// Check the first and second derivatives
final int nparams = func.getNumberOfGradients();
final int[] indices = func.gradientIndices();
final int iter = 100;
final ArrayList<double[]> paramsList = new ArrayList<>(iter);
final ArrayList<double[]> yList = new ArrayList<>(iter);
createData(RngUtils.create(seed.getSeed()), npeaks, iter, paramsList, yList, true);
// for the gradients
final double delta = 1e-4;
final DoubleEquality eq = new DoubleEquality(5e-2, 1e-16);
// Must compute most of the time
final int failureLimit = TestCounter.computeFailureLimit(iter, 0.1);
// failureLimit = 0;
final TestCounter failCounter = new TestCounter(failureLimit, 2 * nparams);
final TestCounter failCounter2 = new TestCounter(failureLimit, nparams * nparams);
for (int i = 0; i < paramsList.size(); i++) {
final int ii = i;
final double[] y = yList.get(i);
final double[] a = paramsList.get(i);
final double[] a2 = a.clone();
final FastMleJacobianGradient2Procedure p = new FastMleJacobianGradient2Procedure(y, func);
// double ll = p.computeLogLikelihood(a);
p.computeJacobian(a);
final double[] d1 = p.d1.clone();
final double[] d2 = p.d2.clone();
final DenseMatrix64F J = DenseMatrix64F.wrap(nparams, nparams, p.getJacobianLinear());
for (int j = 0; j < nparams; j++) {
final int j_ = j;
final int k = indices[j];
final double d = Precision.representableDelta(a[k], (a[k] == 0) ? delta : a[k] * delta);
a2[k] = a[k] + d;
final double llh = p.computeLogLikelihood(a2);
p.computeFirstDerivative(a2);
final double[] d1h = p.d1.clone();
a2[k] = a[k] - d;
final double lll = p.computeLogLikelihood(a2);
p.computeFirstDerivative(a2);
final double[] d1l = p.d1.clone();
a2[k] = a[k];
final double gradient1 = (llh - lll) / (2 * d);
final double gradient2 = (d1h[j] - d1l[j]) / (2 * d);
// logger.fine(FunctionUtils.getSupplier("[%d,%d] ll - %f (%s %f+/-%f) d1 %f ?= %f : d2 %f
// ?= %f", i, k, ll, func.getName(k), a[k], d,
// gradient1, d1[j], gradient2, d2[j]);
failCounter.run(j, () -> eq.almostEqualRelativeOrAbsolute(gradient1, d1[j_]), () -> {
Assertions.fail(() -> String.format("Not same gradient1 @ %d,%d: %s != %s (error=%s)", ii, j_, gradient1, d1[j_], DoubleEquality.relativeError(gradient1, d1[j_])));
});
failCounter.run(nparams + j, () -> eq.almostEqualRelativeOrAbsolute(gradient2, d2[j_]), () -> {
Assertions.fail(() -> String.format("Not same gradient2 @ %d,%d: %s != %s (error=%s)", ii, j_, gradient2, d2[j_], DoubleEquality.relativeError(gradient2, d2[j_])));
});
for (int jj = 0; jj < nparams; jj++) {
if (j == jj) {
// This is done above
// Check it anyway to ensure the Jacobian is correct
// continue;
}
final int jj_ = jj;
final int kk = indices[jj];
final double dd = Precision.representableDelta(a[kk], (a[kk] == 0) ? delta : a[kk] * delta);
a2[kk] = a[kk] + dd;
p.computeFirstDerivative(a2);
System.arraycopy(p.d1, 0, d1h, 0, d1h.length);
a2[kk] = a[kk] - dd;
p.computeFirstDerivative(a2);
System.arraycopy(p.d1, 0, d1l, 0, d1l.length);
a2[kk] = a[kk];
// Use index j even though we adjusted index jj
final double gradient3 = (d1h[j] - d1l[j]) / (2 * dd);
final boolean ok = eq.almostEqualRelativeOrAbsolute(gradient3, J.get(j, jj));
// logger.fine(FunctionUtils.getSupplier("[%d,%d,%d] (%s %f %s %f+/-%f) J %f ?= %f %b", i,
// k, kk, func.getName(k),
// a[k], func.getName(kk), a[kk], dd, gradient3, J.get(j, jj), ok);
// if (!ok)
// {
// ExtraAssertions.fail("Not same gradientJ @ [%d,%d]", j, jj);
// }
failCounter2.run(nparams * j_ + jj_, () -> ok, () -> {
Assertions.fail(() -> String.format("Not same gradientJ @ %d [%d,%d]: %s != %s (error=%s)", ii, j_, jj_, gradient3, J.get(j_, jj_), DoubleEquality.relativeError(gradient3, J.get(j_, jj_))));
});
}
}
}
}
use of uk.ac.sussex.gdsc.core.utils.DoubleEquality in project GDSC-SMLM by aherbert.
the class LvmGradientProcedureTest method gradientProcedureSupportsPrecomputed.
private void gradientProcedureSupportsPrecomputed(RandomSeed seed, final Type type, boolean checkGradients) {
final int iter = 10;
final UniformRandomProvider rng = RngUtils.create(seed.getSeed());
final SharedStateContinuousSampler gs = SamplerUtils.createGaussianSampler(rng, 0, noise);
final ArrayList<double[]> paramsList = new ArrayList<>(iter);
final ArrayList<double[]> yList = new ArrayList<>(iter);
// 3 peaks
createData(rng, 3, iter, paramsList, yList, true);
for (int i = 0; i < paramsList.size(); i++) {
final double[] y = yList.get(i);
// Add Gaussian read noise so we have negatives
final double min = MathUtils.min(y);
for (int j = 0; j < y.length; j++) {
y[j] = y[i] - min + gs.sample();
}
}
// We want to know that:
// y|peak1+peak2+peak3 == y|peak1+peak2+peak3(precomputed)
// We want to know when:
// y|peak1+peak2+peak3 != y-peak3|peak1+peak2
// i.e. we cannot subtract a precomputed peak from the data, it must be included in the fit
// E.G. LSQ - subtraction is OK, MLE/WLSQ - subtraction is not allowed
final Gaussian2DFunction f123 = GaussianFunctionFactory.create2D(3, blockWidth, blockWidth, GaussianFunctionFactory.FIT_ERF_FREE_CIRCLE, null);
final Gaussian2DFunction f12 = GaussianFunctionFactory.create2D(2, blockWidth, blockWidth, GaussianFunctionFactory.FIT_ERF_FREE_CIRCLE, null);
final Gaussian2DFunction f3 = GaussianFunctionFactory.create2D(1, blockWidth, blockWidth, GaussianFunctionFactory.FIT_ERF_FREE_CIRCLE, null);
final FastLog fastLog = type == Type.FAST_LOG_MLE ? getFastLog() : null;
final int nparams = f12.getNumberOfGradients();
final int[] indices = f12.gradientIndices();
final double[] b = new double[f12.size()];
// for checking strict equivalence
final DoubleEquality eq = new DoubleEquality(1e-8, 1e-16);
// for the gradients
final double delta = 1e-4;
final DoubleEquality eq2 = new DoubleEquality(5e-2, 1e-16);
final double[] a1peaks = new double[1 + Gaussian2DFunction.PARAMETERS_PER_PEAK];
final double[] y_b = new double[b.length];
// Count the number of failures for each gradient
final int failureLimit = TestCounter.computeFailureLimit(iter, 0.1);
final TestCounter failCounter = new TestCounter(failureLimit, nparams * 2);
for (int i = 0; i < paramsList.size(); i++) {
final int ii = i;
final double[] y = yList.get(i);
final double[] a3peaks = paramsList.get(i);
// logger.fine(FunctionUtils.getSupplier("[%d] a=%s", i, Arrays.toString(a3peaks));
final double[] a2peaks = Arrays.copyOf(a3peaks, 1 + 2 * Gaussian2DFunction.PARAMETERS_PER_PEAK);
final double[] a2peaks2 = a2peaks.clone();
for (int j = 1; j < a1peaks.length; j++) {
a1peaks[j] = a3peaks[j + 2 * Gaussian2DFunction.PARAMETERS_PER_PEAK];
}
// Evaluate peak 3 to get the background and subtract it from the data to get the new data
f3.initialise0(a1peaks);
f3.forEach(new ValueProcedure() {
int index = 0;
@Override
public void execute(double value) {
b[index] = value;
// Remove negatives for MLE
if (type.isMle()) {
y[index] = Math.max(0, y[index]);
y_b[index] = Math.max(0, y[index] - value);
} else {
y_b[index] = y[index] - value;
}
index++;
}
});
final LvmGradientProcedure p123 = LvmGradientProcedureUtils.create(y, f123, type, fastLog);
// ///////////////////////////////////
// These should be the same
// ///////////////////////////////////
final LvmGradientProcedure p12b3 = LvmGradientProcedureUtils.create(y, OffsetGradient1Function.wrapGradient1Function(f12, b), type, fastLog);
// Check they are the same
p123.gradient(a3peaks);
final double[][] m123 = p123.getAlphaMatrix();
p12b3.gradient(a2peaks);
double value = p12b3.value;
final double[] beta = p12b3.beta.clone();
double[][] alpha = p12b3.getAlphaMatrix();
if (!eq.almostEqualRelativeOrAbsolute(p123.value, value)) {
Assertions.fail(FunctionUtils.getSupplier("p12b3 Not same value @ %d (error=%s) : %s == %s", i, DoubleEquality.relativeError(p123.value, value), p123.value, value));
}
if (!almostEqualRelativeOrAbsolute(eq, beta, p123.beta)) {
Assertions.fail(FunctionUtils.getSupplier("p12b3 Not same gradient @ %d (error=%s) : %s vs %s", i, relativeError(beta, p123.beta), Arrays.toString(beta), Arrays.toString(p123.beta)));
}
for (int j = 0; j < alpha.length; j++) {
// Arrays.toString(m123[j]));
if (!almostEqualRelativeOrAbsolute(eq, alpha[j], m123[j])) {
Assertions.fail(FunctionUtils.getSupplier("p12b3 Not same alpha @ %d,%d (error=%s) : %s vs %s", i, j, relativeError(alpha[j], m123[j]), Arrays.toString(alpha[j]), Arrays.toString(m123[j])));
}
}
// Check actual gradients are correct
if (checkGradients) {
for (int j = 0; j < nparams; j++) {
final int jj = j;
final int k = indices[j];
// double d = Precision.representableDelta(a2peaks[k], (a2peaks[k] == 0) ? 1e-3 :
// a2peaks[k] * delta);
final double d = Precision.representableDelta(a2peaks[k], delta);
a2peaks2[k] = a2peaks[k] + d;
p12b3.value(a2peaks2);
final double s1 = p12b3.value;
a2peaks2[k] = a2peaks[k] - d;
p12b3.value(a2peaks2);
final double s2 = p12b3.value;
a2peaks2[k] = a2peaks[k];
// Apply a factor of -2 to compute the actual gradients:
// See Numerical Recipes in C++, 2nd Ed. Equation 15.5.6 for Nonlinear Models
beta[j] *= -2;
final double gradient = (s1 - s2) / (2 * d);
// logger.fine(FunctionUtils.getSupplier("[%d,%d] %f (%s %f+/-%f) %f ?= %f (%f)", i, k, s,
// Gaussian2DFunction.getName(k), a2peaks[k], d, beta[j], gradient,
// DoubleEquality.relativeError(gradient, beta[j]));
failCounter.run(j, () -> eq2.almostEqualRelativeOrAbsolute(beta[jj], gradient), () -> {
Assertions.fail(() -> String.format("Not same gradient @ %d,%d: %s != %s (error=%s)", ii, jj, beta[jj], gradient, DoubleEquality.relativeError(beta[jj], gradient)));
});
}
}
// ///////////////////////////////////
// This may be different
// ///////////////////////////////////
final LvmGradientProcedure p12m3 = LvmGradientProcedureUtils.create(y_b, f12, type, fastLog);
// Check these may be different.
// Sometimes they are not different.
p12m3.gradient(a2peaks);
value = p12m3.value;
System.arraycopy(p12m3.beta, 0, beta, 0, p12m3.beta.length);
alpha = p12m3.getAlphaMatrix();
if (type != Type.LSQ) {
if (eq.almostEqualRelativeOrAbsolute(p123.value, value)) {
logger.log(TestLogUtils.getFailRecord("p12b3 Same value @ %d (error=%s) : %s == %s", i, DoubleEquality.relativeError(p123.value, value), p123.value, value));
}
if (almostEqualRelativeOrAbsolute(eq, beta, p123.beta)) {
logger.log(TestLogUtils.getFailRecord("p12b3 Same gradient @ %d (error=%s) : %s vs %s", i, relativeError(beta, p123.beta), Arrays.toString(beta), Arrays.toString(p123.beta)));
}
// Note: Test the matrix is different by finding 1 different column
int dj = -1;
for (int j = 0; j < alpha.length; j++) {
// Arrays.toString(m123[j]));
if (!almostEqualRelativeOrAbsolute(eq, alpha[j], m123[j])) {
// Different column
dj = j;
break;
}
}
if (dj == -1) {
// Find biggest error for reporting. This helps set the test tolerance.
double error = 0;
dj = -1;
for (int j = 0; j < alpha.length; j++) {
final double e = relativeError(alpha[j], m123[j]);
if (error <= e) {
error = e;
dj = j;
}
}
logger.log(TestLogUtils.getFailRecord("p12b3 Same alpha @ %d,%d (error=%s) : %s vs %s", i, dj, error, Arrays.toString(alpha[dj]), Arrays.toString(m123[dj])));
}
} else {
if (!eq.almostEqualRelativeOrAbsolute(p123.value, value)) {
logger.log(TestLogUtils.getFailRecord("p12b3 Not same value @ %d (error=%s) : %s == %s", i, DoubleEquality.relativeError(p123.value, value), p123.value, value));
}
if (!almostEqualRelativeOrAbsolute(eq, beta, p123.beta)) {
logger.log(TestLogUtils.getFailRecord("p12b3 Not same gradient @ %d (error=%s) : %s vs %s", i, relativeError(beta, p123.beta), Arrays.toString(beta), Arrays.toString(p123.beta)));
}
for (int j = 0; j < alpha.length; j++) {
// Arrays.toString(m123[j]));
if (!almostEqualRelativeOrAbsolute(eq, alpha[j], m123[j])) {
logger.log(TestLogUtils.getFailRecord("p12b3 Not same alpha @ %d,%d (error=%s) : %s vs %s", i, j, relativeError(alpha[j], m123[j]), Arrays.toString(alpha[j]), Arrays.toString(m123[j])));
}
}
}
// Check actual gradients are correct
if (!checkGradients) {
continue;
}
for (int j = 0; j < nparams; j++) {
final int jj = j;
final int k = indices[j];
// double d = Precision.representableDelta(a2peaks[k], (a2peaks[k] == 0) ? 1e-3 : a2peaks[k]
// * delta);
final double d = Precision.representableDelta(a2peaks[k], delta);
a2peaks2[k] = a2peaks[k] + d;
p12m3.value(a2peaks2);
final double s1 = p12m3.value;
a2peaks2[k] = a2peaks[k] - d;
p12m3.value(a2peaks2);
final double s2 = p12m3.value;
a2peaks2[k] = a2peaks[k];
// Apply a factor of -2 to compute the actual gradients:
// See Numerical Recipes in C++, 2nd Ed. Equation 15.5.6 for Nonlinear Models
beta[j] *= -2;
final double gradient = (s1 - s2) / (2 * d);
// logger.fine(FunctionUtils.getSupplier("[%d,%d] %f (%s %f+/-%f) %f ?= %f (%f)", i, k, s,
// Gaussian2DFunction.getName(k), a2peaks[k], d, beta[j], gradient,
// DoubleEquality.relativeError(gradient, beta[j]));
failCounter.run(nparams + j, () -> eq2.almostEqualRelativeOrAbsolute(beta[jj], gradient), () -> {
Assertions.fail(() -> String.format("Not same gradient @ %d,%d: %s != %s (error=%s)", ii, jj, beta[jj], gradient, DoubleEquality.relativeError(beta[jj], gradient)));
});
}
}
}
use of uk.ac.sussex.gdsc.core.utils.DoubleEquality in project GDSC-SMLM by aherbert.
the class EjmlLinearSolverTest method canSolveLinearEquationWithZerosInA.
@Test
void canSolveLinearEquationWithZerosInA() {
final EjmlLinearSolver solver = new EjmlLinearSolver();
final DoubleEquality eq = new DoubleEquality(5e-3, 1e-16);
solver.setEqual(eq);
// Solves (one) linear equation, a x = b, for x[n]
final double[][] a = new double[][] { new double[] { 2, 0, -1, 0, 0, 0 }, new double[] { 0, 0, 0, 0, 0, 0 }, new double[] { -1, 0, 2, 0, 0, -1 }, new double[] { 0, 0, 0, 0, 0, 0 }, new double[] { 0, 0, 0, 0, 0, 0 }, new double[] { 0, 0, -1, 0, 0, 2 } };
final double[] b = new double[] { 3, 0, 3, 0, 0, 4 };
// Expected solution
final double[] x = new double[] { 4.75, 0, 6.5, 0, 0, 5.25 };
final double[][] a_inv = new double[][] { new double[] { 0.75, 0, 0.5, 0, 0, 0.25 }, new double[] { 0, 0, 0, 0, 0, 0 }, new double[] { 0.5, 0, 1, 0, 0, 0.5 }, new double[] { 0, 0, 0, 0, 0, 0 }, new double[] { 0, 0, 0, 0, 0, 0 }, new double[] { 0.25, 0, 0.5, 0, 0, 0.75 } };
final boolean result = solver.solve(a, b);
solver.invertLastA(a);
Assertions.assertTrue(result, "Failed to invert");
Assertions.assertArrayEquals(x, b, 1e-4f, "Bad solution");
if (logger.isLoggable(Level.INFO)) {
logger.info(FunctionUtils.getSupplier("x = %s", Arrays.toString(b)));
}
for (int i = 0; i < b.length; i++) {
if (logger.isLoggable(Level.INFO)) {
logger.info(FunctionUtils.getSupplier("a[%d] = %s", i, Arrays.toString(a[i])));
}
Assertions.assertArrayEquals(a_inv[i], a[i], 1e-4f, "Bad inversion");
}
}
use of uk.ac.sussex.gdsc.core.utils.DoubleEquality in project GDSC-SMLM by aherbert.
the class GradientCalculatorSpeedTest method gradientCalculatorComputesGradient.
private void gradientCalculatorComputesGradient(RandomSeed seed, GradientCalculator calc) {
final int nparams = calc.nparams;
final Gaussian2DFunction func = new SingleEllipticalGaussian2DFunction(blockWidth, blockWidth);
// Check the function is the correct size
final int[] indices = func.gradientIndices();
Assertions.assertEquals(nparams, indices.length);
final int iter = 50;
final double[] beta = new double[nparams];
final double[] beta2 = new double[nparams];
final ArrayList<double[]> paramsList = new ArrayList<>(iter);
final ArrayList<double[]> yList = new ArrayList<>(iter);
final int[] x = createData(RngUtils.create(seed.getSeed()), 1, iter, paramsList, yList, true);
final double delta = 1e-3;
final DoubleEquality eq = new DoubleEquality(1e-3, 1e-3);
final IntArrayFormatSupplier msg = new IntArrayFormatSupplier("[%d] Not same gradient @ %d", 2);
for (int i = 0; i < paramsList.size(); i++) {
msg.set(0, i);
final double[] y = yList.get(i);
final double[] a = paramsList.get(i);
final double[] a2 = a.clone();
// double s =
calc.evaluate(x, y, a, beta, func);
for (int k = 0; k < nparams; k++) {
final int j = indices[k];
final double d = Precision.representableDelta(a[j], (a[j] == 0) ? 1e-3 : a[j] * delta);
a2[j] = a[j] + d;
final double s1 = calc.evaluate(x, y, a2, beta2, func);
a2[j] = a[j] - d;
final double s2 = calc.evaluate(x, y, a2, beta2, func);
a2[j] = a[j];
final double gradient = (s1 - s2) / (2 * d);
// logger.fine(FunctionUtils.getSupplier("[%d,%d] %f (%s %f+/-%f) %f ?= %f", i, j, s,
// func.getName(j), a[j], d, beta[k],
// gradient));
Assertions.assertTrue(eq.almostEqualRelativeOrAbsolute(beta[k], gradient), msg.set(1, j));
}
}
}
use of uk.ac.sussex.gdsc.core.utils.DoubleEquality in project GDSC-SMLM by aherbert.
the class PoissonCalculatorTest method instanceAndFastMethodIsApproximatelyEqualToStaticMethod.
@SeededTest
void instanceAndFastMethodIsApproximatelyEqualToStaticMethod(RandomSeed seed) {
final DoubleEquality eq = new DoubleEquality(3e-4, 0);
final UniformRandomProvider rg = RngUtils.create(seed.getSeed());
// Test for different x. The calculator approximation begins
final int n = 100;
final double[] u = new double[n];
final double[] x = new double[n];
double expected;
double observed;
for (final double testx : new double[] { Math.nextDown(PoissonCalculator.APPROXIMATION_X), PoissonCalculator.APPROXIMATION_X, Math.nextUp(PoissonCalculator.APPROXIMATION_X), PoissonCalculator.APPROXIMATION_X * 1.1, PoissonCalculator.APPROXIMATION_X * 2, PoissonCalculator.APPROXIMATION_X * 10 }) {
final String X = Double.toString(testx);
Arrays.fill(x, testx);
final PoissonCalculator pc = new PoissonCalculator(x);
expected = PoissonCalculator.maximumLogLikelihood(x);
observed = pc.getMaximumLogLikelihood();
logger.log(TestLogUtils.getRecord(LOG_LEVEL, "[%s] Instance MaxLL = %g vs %g (error = %g)", X, expected, observed, DoubleEquality.relativeError(expected, observed)));
Assertions.assertTrue(eq.almostEqualRelativeOrAbsolute(expected, observed), () -> "Instance Max LL not equal: x=" + X);
observed = PoissonCalculator.fastMaximumLogLikelihood(x);
logger.log(TestLogUtils.getRecord(LOG_LEVEL, "[%s] Fast MaxLL = %g vs %g (error = %g)", X, expected, observed, DoubleEquality.relativeError(expected, observed)));
Assertions.assertTrue(eq.almostEqualRelativeOrAbsolute(expected, observed), () -> "Fast Max LL not equal: x=" + X);
// Generate data around the value
for (int i = 0; i < n; i++) {
u[i] = x[i] + rg.nextDouble() - 0.5;
}
expected = PoissonCalculator.logLikelihood(u, x);
observed = pc.logLikelihood(u);
logger.log(TestLogUtils.getRecord(LOG_LEVEL, "[%s] Instance LL = %g vs %g (error = %g)", X, expected, observed, DoubleEquality.relativeError(expected, observed)));
Assertions.assertTrue(eq.almostEqualRelativeOrAbsolute(expected, observed), () -> "Instance LL not equal: x=" + X);
observed = PoissonCalculator.fastLogLikelihood(u, x);
logger.log(TestLogUtils.getRecord(LOG_LEVEL, "[%s] Fast LL = %g vs %g (error = %g)", X, expected, observed, DoubleEquality.relativeError(expected, observed)));
Assertions.assertTrue(eq.almostEqualRelativeOrAbsolute(expected, observed), () -> "Fast LL not equal: x=" + X);
expected = PoissonCalculator.logLikelihoodRatio(u, x);
observed = pc.getLogLikelihoodRatio(observed);
logger.log(TestLogUtils.getRecord(LOG_LEVEL, "[%s] Instance LLR = %g vs %g (error = %g)", X, expected, observed, DoubleEquality.relativeError(expected, observed)));
Assertions.assertTrue(eq.almostEqualRelativeOrAbsolute(expected, observed), () -> "Instance LLR not equal: x=" + X);
}
}
Aggregations