use of uk.ac.sussex.gdsc.test.utils.TestCounter in project GDSC-SMLM by aherbert.
the class FastMleJacobianGradient2ProcedureTest method gradientCalculatorComputesGradient.
private void gradientCalculatorComputesGradient(RandomSeed seed, int npeaks, ErfGaussian2DFunction func) {
// Check the first and second derivatives
final int nparams = func.getNumberOfGradients();
final int[] indices = func.gradientIndices();
final int iter = 100;
final ArrayList<double[]> paramsList = new ArrayList<>(iter);
final ArrayList<double[]> yList = new ArrayList<>(iter);
createData(RngUtils.create(seed.getSeed()), npeaks, iter, paramsList, yList, true);
// for the gradients
final double delta = 1e-4;
final DoubleEquality eq = new DoubleEquality(5e-2, 1e-16);
// Must compute most of the time
final int failureLimit = TestCounter.computeFailureLimit(iter, 0.1);
// failureLimit = 0;
final TestCounter failCounter = new TestCounter(failureLimit, 2 * nparams);
final TestCounter failCounter2 = new TestCounter(failureLimit, nparams * nparams);
for (int i = 0; i < paramsList.size(); i++) {
final int ii = i;
final double[] y = yList.get(i);
final double[] a = paramsList.get(i);
final double[] a2 = a.clone();
final FastMleJacobianGradient2Procedure p = new FastMleJacobianGradient2Procedure(y, func);
// double ll = p.computeLogLikelihood(a);
p.computeJacobian(a);
final double[] d1 = p.d1.clone();
final double[] d2 = p.d2.clone();
final DenseMatrix64F J = DenseMatrix64F.wrap(nparams, nparams, p.getJacobianLinear());
for (int j = 0; j < nparams; j++) {
final int j_ = j;
final int k = indices[j];
final double d = Precision.representableDelta(a[k], (a[k] == 0) ? delta : a[k] * delta);
a2[k] = a[k] + d;
final double llh = p.computeLogLikelihood(a2);
p.computeFirstDerivative(a2);
final double[] d1h = p.d1.clone();
a2[k] = a[k] - d;
final double lll = p.computeLogLikelihood(a2);
p.computeFirstDerivative(a2);
final double[] d1l = p.d1.clone();
a2[k] = a[k];
final double gradient1 = (llh - lll) / (2 * d);
final double gradient2 = (d1h[j] - d1l[j]) / (2 * d);
// logger.fine(FunctionUtils.getSupplier("[%d,%d] ll - %f (%s %f+/-%f) d1 %f ?= %f : d2 %f
// ?= %f", i, k, ll, func.getName(k), a[k], d,
// gradient1, d1[j], gradient2, d2[j]);
failCounter.run(j, () -> eq.almostEqualRelativeOrAbsolute(gradient1, d1[j_]), () -> {
Assertions.fail(() -> String.format("Not same gradient1 @ %d,%d: %s != %s (error=%s)", ii, j_, gradient1, d1[j_], DoubleEquality.relativeError(gradient1, d1[j_])));
});
failCounter.run(nparams + j, () -> eq.almostEqualRelativeOrAbsolute(gradient2, d2[j_]), () -> {
Assertions.fail(() -> String.format("Not same gradient2 @ %d,%d: %s != %s (error=%s)", ii, j_, gradient2, d2[j_], DoubleEquality.relativeError(gradient2, d2[j_])));
});
for (int jj = 0; jj < nparams; jj++) {
if (j == jj) {
// This is done above
// Check it anyway to ensure the Jacobian is correct
// continue;
}
final int jj_ = jj;
final int kk = indices[jj];
final double dd = Precision.representableDelta(a[kk], (a[kk] == 0) ? delta : a[kk] * delta);
a2[kk] = a[kk] + dd;
p.computeFirstDerivative(a2);
System.arraycopy(p.d1, 0, d1h, 0, d1h.length);
a2[kk] = a[kk] - dd;
p.computeFirstDerivative(a2);
System.arraycopy(p.d1, 0, d1l, 0, d1l.length);
a2[kk] = a[kk];
// Use index j even though we adjusted index jj
final double gradient3 = (d1h[j] - d1l[j]) / (2 * dd);
final boolean ok = eq.almostEqualRelativeOrAbsolute(gradient3, J.get(j, jj));
// logger.fine(FunctionUtils.getSupplier("[%d,%d,%d] (%s %f %s %f+/-%f) J %f ?= %f %b", i,
// k, kk, func.getName(k),
// a[k], func.getName(kk), a[kk], dd, gradient3, J.get(j, jj), ok);
// if (!ok)
// {
// ExtraAssertions.fail("Not same gradientJ @ [%d,%d]", j, jj);
// }
failCounter2.run(nparams * j_ + jj_, () -> ok, () -> {
Assertions.fail(() -> String.format("Not same gradientJ @ %d [%d,%d]: %s != %s (error=%s)", ii, j_, jj_, gradient3, J.get(j_, jj_), DoubleEquality.relativeError(gradient3, J.get(j_, jj_))));
});
}
}
}
}
use of uk.ac.sussex.gdsc.test.utils.TestCounter in project GDSC-SMLM by aherbert.
the class LvmGradientProcedureTest method gradientProcedureSupportsPrecomputed.
private void gradientProcedureSupportsPrecomputed(RandomSeed seed, final Type type, boolean checkGradients) {
final int iter = 10;
final UniformRandomProvider rng = RngUtils.create(seed.getSeed());
final SharedStateContinuousSampler gs = SamplerUtils.createGaussianSampler(rng, 0, noise);
final ArrayList<double[]> paramsList = new ArrayList<>(iter);
final ArrayList<double[]> yList = new ArrayList<>(iter);
// 3 peaks
createData(rng, 3, iter, paramsList, yList, true);
for (int i = 0; i < paramsList.size(); i++) {
final double[] y = yList.get(i);
// Add Gaussian read noise so we have negatives
final double min = MathUtils.min(y);
for (int j = 0; j < y.length; j++) {
y[j] = y[i] - min + gs.sample();
}
}
// We want to know that:
// y|peak1+peak2+peak3 == y|peak1+peak2+peak3(precomputed)
// We want to know when:
// y|peak1+peak2+peak3 != y-peak3|peak1+peak2
// i.e. we cannot subtract a precomputed peak from the data, it must be included in the fit
// E.G. LSQ - subtraction is OK, MLE/WLSQ - subtraction is not allowed
final Gaussian2DFunction f123 = GaussianFunctionFactory.create2D(3, blockWidth, blockWidth, GaussianFunctionFactory.FIT_ERF_FREE_CIRCLE, null);
final Gaussian2DFunction f12 = GaussianFunctionFactory.create2D(2, blockWidth, blockWidth, GaussianFunctionFactory.FIT_ERF_FREE_CIRCLE, null);
final Gaussian2DFunction f3 = GaussianFunctionFactory.create2D(1, blockWidth, blockWidth, GaussianFunctionFactory.FIT_ERF_FREE_CIRCLE, null);
final FastLog fastLog = type == Type.FAST_LOG_MLE ? getFastLog() : null;
final int nparams = f12.getNumberOfGradients();
final int[] indices = f12.gradientIndices();
final double[] b = new double[f12.size()];
// for checking strict equivalence
final DoubleEquality eq = new DoubleEquality(1e-8, 1e-16);
// for the gradients
final double delta = 1e-4;
final DoubleEquality eq2 = new DoubleEquality(5e-2, 1e-16);
final double[] a1peaks = new double[1 + Gaussian2DFunction.PARAMETERS_PER_PEAK];
final double[] y_b = new double[b.length];
// Count the number of failures for each gradient
final int failureLimit = TestCounter.computeFailureLimit(iter, 0.1);
final TestCounter failCounter = new TestCounter(failureLimit, nparams * 2);
for (int i = 0; i < paramsList.size(); i++) {
final int ii = i;
final double[] y = yList.get(i);
final double[] a3peaks = paramsList.get(i);
// logger.fine(FunctionUtils.getSupplier("[%d] a=%s", i, Arrays.toString(a3peaks));
final double[] a2peaks = Arrays.copyOf(a3peaks, 1 + 2 * Gaussian2DFunction.PARAMETERS_PER_PEAK);
final double[] a2peaks2 = a2peaks.clone();
for (int j = 1; j < a1peaks.length; j++) {
a1peaks[j] = a3peaks[j + 2 * Gaussian2DFunction.PARAMETERS_PER_PEAK];
}
// Evaluate peak 3 to get the background and subtract it from the data to get the new data
f3.initialise0(a1peaks);
f3.forEach(new ValueProcedure() {
int index = 0;
@Override
public void execute(double value) {
b[index] = value;
// Remove negatives for MLE
if (type.isMle()) {
y[index] = Math.max(0, y[index]);
y_b[index] = Math.max(0, y[index] - value);
} else {
y_b[index] = y[index] - value;
}
index++;
}
});
final LvmGradientProcedure p123 = LvmGradientProcedureUtils.create(y, f123, type, fastLog);
// ///////////////////////////////////
// These should be the same
// ///////////////////////////////////
final LvmGradientProcedure p12b3 = LvmGradientProcedureUtils.create(y, OffsetGradient1Function.wrapGradient1Function(f12, b), type, fastLog);
// Check they are the same
p123.gradient(a3peaks);
final double[][] m123 = p123.getAlphaMatrix();
p12b3.gradient(a2peaks);
double value = p12b3.value;
final double[] beta = p12b3.beta.clone();
double[][] alpha = p12b3.getAlphaMatrix();
if (!eq.almostEqualRelativeOrAbsolute(p123.value, value)) {
Assertions.fail(FunctionUtils.getSupplier("p12b3 Not same value @ %d (error=%s) : %s == %s", i, DoubleEquality.relativeError(p123.value, value), p123.value, value));
}
if (!almostEqualRelativeOrAbsolute(eq, beta, p123.beta)) {
Assertions.fail(FunctionUtils.getSupplier("p12b3 Not same gradient @ %d (error=%s) : %s vs %s", i, relativeError(beta, p123.beta), Arrays.toString(beta), Arrays.toString(p123.beta)));
}
for (int j = 0; j < alpha.length; j++) {
// Arrays.toString(m123[j]));
if (!almostEqualRelativeOrAbsolute(eq, alpha[j], m123[j])) {
Assertions.fail(FunctionUtils.getSupplier("p12b3 Not same alpha @ %d,%d (error=%s) : %s vs %s", i, j, relativeError(alpha[j], m123[j]), Arrays.toString(alpha[j]), Arrays.toString(m123[j])));
}
}
// Check actual gradients are correct
if (checkGradients) {
for (int j = 0; j < nparams; j++) {
final int jj = j;
final int k = indices[j];
// double d = Precision.representableDelta(a2peaks[k], (a2peaks[k] == 0) ? 1e-3 :
// a2peaks[k] * delta);
final double d = Precision.representableDelta(a2peaks[k], delta);
a2peaks2[k] = a2peaks[k] + d;
p12b3.value(a2peaks2);
final double s1 = p12b3.value;
a2peaks2[k] = a2peaks[k] - d;
p12b3.value(a2peaks2);
final double s2 = p12b3.value;
a2peaks2[k] = a2peaks[k];
// Apply a factor of -2 to compute the actual gradients:
// See Numerical Recipes in C++, 2nd Ed. Equation 15.5.6 for Nonlinear Models
beta[j] *= -2;
final double gradient = (s1 - s2) / (2 * d);
// logger.fine(FunctionUtils.getSupplier("[%d,%d] %f (%s %f+/-%f) %f ?= %f (%f)", i, k, s,
// Gaussian2DFunction.getName(k), a2peaks[k], d, beta[j], gradient,
// DoubleEquality.relativeError(gradient, beta[j]));
failCounter.run(j, () -> eq2.almostEqualRelativeOrAbsolute(beta[jj], gradient), () -> {
Assertions.fail(() -> String.format("Not same gradient @ %d,%d: %s != %s (error=%s)", ii, jj, beta[jj], gradient, DoubleEquality.relativeError(beta[jj], gradient)));
});
}
}
// ///////////////////////////////////
// This may be different
// ///////////////////////////////////
final LvmGradientProcedure p12m3 = LvmGradientProcedureUtils.create(y_b, f12, type, fastLog);
// Check these may be different.
// Sometimes they are not different.
p12m3.gradient(a2peaks);
value = p12m3.value;
System.arraycopy(p12m3.beta, 0, beta, 0, p12m3.beta.length);
alpha = p12m3.getAlphaMatrix();
if (type != Type.LSQ) {
if (eq.almostEqualRelativeOrAbsolute(p123.value, value)) {
logger.log(TestLogUtils.getFailRecord("p12b3 Same value @ %d (error=%s) : %s == %s", i, DoubleEquality.relativeError(p123.value, value), p123.value, value));
}
if (almostEqualRelativeOrAbsolute(eq, beta, p123.beta)) {
logger.log(TestLogUtils.getFailRecord("p12b3 Same gradient @ %d (error=%s) : %s vs %s", i, relativeError(beta, p123.beta), Arrays.toString(beta), Arrays.toString(p123.beta)));
}
// Note: Test the matrix is different by finding 1 different column
int dj = -1;
for (int j = 0; j < alpha.length; j++) {
// Arrays.toString(m123[j]));
if (!almostEqualRelativeOrAbsolute(eq, alpha[j], m123[j])) {
// Different column
dj = j;
break;
}
}
if (dj == -1) {
// Find biggest error for reporting. This helps set the test tolerance.
double error = 0;
dj = -1;
for (int j = 0; j < alpha.length; j++) {
final double e = relativeError(alpha[j], m123[j]);
if (error <= e) {
error = e;
dj = j;
}
}
logger.log(TestLogUtils.getFailRecord("p12b3 Same alpha @ %d,%d (error=%s) : %s vs %s", i, dj, error, Arrays.toString(alpha[dj]), Arrays.toString(m123[dj])));
}
} else {
if (!eq.almostEqualRelativeOrAbsolute(p123.value, value)) {
logger.log(TestLogUtils.getFailRecord("p12b3 Not same value @ %d (error=%s) : %s == %s", i, DoubleEquality.relativeError(p123.value, value), p123.value, value));
}
if (!almostEqualRelativeOrAbsolute(eq, beta, p123.beta)) {
logger.log(TestLogUtils.getFailRecord("p12b3 Not same gradient @ %d (error=%s) : %s vs %s", i, relativeError(beta, p123.beta), Arrays.toString(beta), Arrays.toString(p123.beta)));
}
for (int j = 0; j < alpha.length; j++) {
// Arrays.toString(m123[j]));
if (!almostEqualRelativeOrAbsolute(eq, alpha[j], m123[j])) {
logger.log(TestLogUtils.getFailRecord("p12b3 Not same alpha @ %d,%d (error=%s) : %s vs %s", i, j, relativeError(alpha[j], m123[j]), Arrays.toString(alpha[j]), Arrays.toString(m123[j])));
}
}
}
// Check actual gradients are correct
if (!checkGradients) {
continue;
}
for (int j = 0; j < nparams; j++) {
final int jj = j;
final int k = indices[j];
// double d = Precision.representableDelta(a2peaks[k], (a2peaks[k] == 0) ? 1e-3 : a2peaks[k]
// * delta);
final double d = Precision.representableDelta(a2peaks[k], delta);
a2peaks2[k] = a2peaks[k] + d;
p12m3.value(a2peaks2);
final double s1 = p12m3.value;
a2peaks2[k] = a2peaks[k] - d;
p12m3.value(a2peaks2);
final double s2 = p12m3.value;
a2peaks2[k] = a2peaks[k];
// Apply a factor of -2 to compute the actual gradients:
// See Numerical Recipes in C++, 2nd Ed. Equation 15.5.6 for Nonlinear Models
beta[j] *= -2;
final double gradient = (s1 - s2) / (2 * d);
// logger.fine(FunctionUtils.getSupplier("[%d,%d] %f (%s %f+/-%f) %f ?= %f (%f)", i, k, s,
// Gaussian2DFunction.getName(k), a2peaks[k], d, beta[j], gradient,
// DoubleEquality.relativeError(gradient, beta[j]));
failCounter.run(nparams + j, () -> eq2.almostEqualRelativeOrAbsolute(beta[jj], gradient), () -> {
Assertions.fail(() -> String.format("Not same gradient @ %d,%d: %s != %s (error=%s)", ii, jj, beta[jj], gradient, DoubleEquality.relativeError(beta[jj], gradient)));
});
}
}
}
use of uk.ac.sussex.gdsc.test.utils.TestCounter in project GDSC-SMLM by aherbert.
the class SolverSpeedTest method solveLinearAndGaussJordanReturnSameSolutionAndInversionResult.
@SeededTest
void solveLinearAndGaussJordanReturnSameSolutionAndInversionResult(RandomSeed seed) {
final int iter = 100;
final SolverSpeedTestData data = ensureData(seed, iter);
final ArrayList<double[][]> adata = copyAdouble(data.adata, iter);
final ArrayList<double[]> bdata = copyBdouble(data.bdata, iter);
final ArrayList<double[][]> adata2 = copyAdouble(data.adata, iter);
final ArrayList<double[]> bdata2 = copyBdouble(data.bdata, iter);
final GaussJordan solver = new GaussJordan();
final EjmlLinearSolver solver2 = new EjmlLinearSolver();
final int failureLimit = TestCounter.computeFailureLimit(iter, 0.1);
final TestCounter failCounter = new TestCounter(failureLimit, 2);
final DoubleDoubleBiPredicate predicate = TestHelper.doublesAreClose(1e-2, 0);
int fail = 0;
for (int i = 0; i < adata.size(); i++) {
final double[][] a1 = adata.get(i);
final double[] b1 = bdata.get(i);
final double[][] a2 = adata2.get(i);
final double[] b2 = bdata2.get(i);
final boolean r1 = solver.solve(a1, b1);
final boolean r2 = solver2.solveLinear(a2, b2);
solver2.invertLastA(a2);
// Assertions.assertTrue("Different solve result @ " + i, r1 == r2);
if (r1 && r2) {
failCounter.run(0, () -> {
TestAssertions.assertArrayTest(b1, b2, predicate, "Different b result");
});
failCounter.run(0, () -> {
TestAssertions.assertArrayTest(a1, a2, predicate, "Different a result");
});
} else {
fail++;
}
}
if (fail > iter / 2) {
Assertions.fail(String.format("Failed to solve %d / %d", fail, iter));
}
}
use of uk.ac.sussex.gdsc.test.utils.TestCounter in project GDSC-SMLM by aherbert.
the class FastMleGradient2ProcedureTest method gradientCalculatorComputesGradient.
private void gradientCalculatorComputesGradient(RandomSeed seed, ErfGaussian2DFunction func) {
// Check the first and second derivatives
final int nparams = func.getNumberOfGradients();
final int[] indices = func.gradientIndices();
final int iter = 100;
final ArrayList<double[]> paramsList = new ArrayList<>(iter);
final ArrayList<double[]> yList = new ArrayList<>(iter);
createData(RngUtils.create(seed.getSeed()), 1, iter, paramsList, yList, true);
// for the gradients
final double delta = 1e-4;
final DoubleEquality eq = new DoubleEquality(5e-2, 1e-16);
// Must compute most of the time
final int failureLimit = TestCounter.computeFailureLimit(iter, 0.1);
final TestCounter failCounter = new TestCounter(failureLimit, nparams);
for (int i = 0; i < paramsList.size(); i++) {
final int ii = i;
final double[] y = yList.get(i);
final double[] a = paramsList.get(i);
final double[] a2 = a.clone();
final FastMleGradient2Procedure p = FastMleGradient2ProcedureUtils.create(y, func);
// double ll = p.computeLogLikelihood(a);
p.computeSecondDerivative(a);
final double[] d1 = p.d1.clone();
final double[] d2 = p.d2.clone();
for (int j = 0; j < nparams; j++) {
final int j_ = j;
final int k = indices[j];
final double d = Precision.representableDelta(a[k], (a[k] == 0) ? delta : a[k] * delta);
a2[k] = a[k] + d;
final double llh = p.computeLogLikelihood(a2);
p.computeFirstDerivative(a2);
final double[] d1h = p.d1.clone();
a2[k] = a[k] - d;
final double lll = p.computeLogLikelihood(a2);
p.computeFirstDerivative(a2);
final double[] d1l = p.d1.clone();
a2[k] = a[k];
final double gradient1 = (llh - lll) / (2 * d);
final double gradient2 = (d1h[j] - d1l[j]) / (2 * d);
// logger.fine("[%d,%d] ll - %f (%s %f+/-%f) d1 %f ?= %f : d2 %f ?= %f", i, k, ll,
// func.getName(k), a[k], d,
// gradient1, d1[j], gradient2, d2[j]);
failCounter.run(j, () -> eq.almostEqualRelativeOrAbsolute(gradient1, d1[j_]), () -> {
Assertions.fail(FunctionUtils.getSupplier("Not same gradient1 @ %d,%d: %s != %s (error=%s)", ii, j_, gradient1, d1[j_], DoubleEquality.relativeError(gradient1, d1[j_])));
});
failCounter.run(nparams + j, () -> eq.almostEqualRelativeOrAbsolute(gradient2, d2[j_]), () -> {
Assertions.fail(FunctionUtils.getSupplier("Not same gradient2 @ %d,%d: %s != %s (error=%s)", ii, j_, gradient2, d2[j_], DoubleEquality.relativeError(gradient2, d2[j_])));
});
}
}
}
use of uk.ac.sussex.gdsc.test.utils.TestCounter in project GDSC-SMLM by aherbert.
the class LsqLvmGradientProcedureTest method gradientProcedureComputesGradient.
private void gradientProcedureComputesGradient(RandomSeed seed, ErfGaussian2DFunction func) {
final int nparams = func.getNumberOfGradients();
final int[] indices = func.gradientIndices();
final int iter = 100;
final ArrayList<double[]> paramsList = new ArrayList<>(iter);
final ArrayList<double[]> yList = new ArrayList<>(iter);
createData(RngUtils.create(seed.getSeed()), 1, iter, paramsList, yList, true);
// for the gradients
final double delta = 1e-4;
final DoubleEquality eq = new DoubleEquality(5e-2, 1e-16);
// Must compute most of the time
final int failureLimit = TestCounter.computeFailureLimit(iter, 0.1);
final TestCounter failCounter = new TestCounter(failureLimit, nparams);
for (int i = 0; i < paramsList.size(); i++) {
final int ii = i;
final double[] y = yList.get(i);
final double[] a = paramsList.get(i);
final double[] a2 = a.clone();
final BaseLsqLvmGradientProcedure p = LsqLvmGradientProcedureUtils.create(y, func);
p.gradient(a);
// double s = p.ssx;
final double[] beta = p.beta.clone();
for (int j = 0; j < nparams; j++) {
final int jj = j;
final int k = indices[j];
final double d = Precision.representableDelta(a[k], (a[k] == 0) ? 1e-3 : a[k] * delta);
a2[k] = a[k] + d;
p.value(a2);
final double s1 = p.value;
a2[k] = a[k] - d;
p.value(a2);
final double s2 = p.value;
a2[k] = a[k];
// Apply a factor of -2 to compute the actual gradients:
// See Numerical Recipes in C++, 2nd Ed. Equation 15.5.6 for Nonlinear Models
beta[j] *= -2;
final double gradient = (s1 - s2) / (2 * d);
// logger.fine(FunctionUtils.getSupplier("[%d,%d] %f (%s %f+/-%f) %f ?= %f", i, k, s,
// func.getName(k), a[k], d, beta[j],
// gradient);
failCounter.run(j, () -> eq.almostEqualRelativeOrAbsolute(beta[jj], gradient), () -> {
Assertions.fail(() -> String.format("Not same gradient @ %d,%d: %s != %s (error=%s)", ii, jj, beta[jj], gradient, DoubleEquality.relativeError(beta[jj], gradient)));
});
}
}
}
Aggregations