use of uk.ac.sussex.gdsc.test.api.function.DoubleDoubleBiPredicate in project GDSC-SMLM by aherbert.
the class TrackPopulationAnalysisTest method canComputeBrownianDiffusionFunction1.
@Test
void canComputeBrownianDiffusionFunction1() {
final int size = 10;
final double delta = 1e-6;
final DoubleDoubleBiPredicate test = TestHelper.doublesAreClose(1e-5);
for (final double t : new double[] { 0.8, 1, 1.2 }) {
final MultivariateJacobianFunction f = new BrownianDiffusionFunction(size, t);
for (final double d : new double[] { 0.8, 0.9, 1, 1.1, 1.2 }) {
for (final double s : new double[] { 2, 20 }) {
// Check the value and Jacobian
final RealVector point = new ArrayRealVector(new double[] { d, s }, false);
final Pair<RealVector, RealMatrix> p = f.value(point);
final double[] value = p.getFirst().toArray();
Assertions.assertEquals(size, value.length);
for (int n = 1; n <= size; n++) {
// MSD = 4Dt * (n - 1/3) + 4s^2
final double msd = 4 * d * t * (n - 1.0 / 3) + 4 * s * s;
TestAssertions.assertTest(msd, value[n - 1], test, "value");
}
// Columns of the Jacobian
final double[] dfda1 = p.getSecond().getColumn(0);
final double[] dfdb1 = p.getSecond().getColumn(1);
point.setEntry(0, d - delta);
RealVector v1 = f.value(point).getFirst();
point.setEntry(0, d + delta);
RealVector v2 = f.value(point).getFirst();
final double[] dfda = v2.subtract(v1).mapDivide(2 * delta).toArray();
point.setEntry(0, d);
point.setEntry(1, s - delta);
v1 = f.value(point).getFirst();
point.setEntry(1, s + delta);
v2 = f.value(point).getFirst();
final double[] dfdb = v2.subtract(v1).mapDivide(2 * delta).toArray();
// Element-by-element relative error
TestAssertions.assertArrayTest(dfda, dfda1, test, "jacobian dfda");
TestAssertions.assertArrayTest(dfdb, dfdb1, test, "jacobian dfdb");
}
}
}
}
use of uk.ac.sussex.gdsc.test.api.function.DoubleDoubleBiPredicate in project GDSC-SMLM by aherbert.
the class UnivariateLikelihoodFisherInformationCalculatorTest method computePoissonFisherInformation.
private static void computePoissonFisherInformation(UniformRandomProvider rng, Model model) {
// Create function
final Gaussian2DFunction func = GaussianFunctionFactory.create2D(1, 10, 10, GaussianFunctionFactory.FIT_ERF_CIRCLE, null);
final double[] params = new double[1 + Gaussian2DFunction.PARAMETERS_PER_PEAK];
params[Gaussian2DFunction.BACKGROUND] = nextUniform(rng, 0.1, 0.3);
params[Gaussian2DFunction.SIGNAL] = nextUniform(rng, 100, 300);
params[Gaussian2DFunction.X_POSITION] = nextUniform(rng, 4, 6);
params[Gaussian2DFunction.Y_POSITION] = nextUniform(rng, 4, 6);
params[Gaussian2DFunction.X_SD] = nextUniform(rng, 1, 1.3);
Gradient1Function f1 = func;
FisherInformation fi;
switch(model) {
// Get a variance
case POISSON_GAUSSIAN:
final double var = 0.9 + 0.2 * rng.nextDouble();
fi = new PoissonGaussianApproximationFisherInformation(Math.sqrt(var));
f1 = (Gradient1Function) OffsetFunctionFactory.wrapFunction(func, SimpleArrayUtils.newDoubleArray(func.size(), var));
break;
case POISSON:
fi = new PoissonFisherInformation();
break;
case HALF_POISSON:
fi = new HalfPoissonFisherInformation();
break;
default:
throw new IllegalStateException();
}
// This introduces a dependency on a different package, and relies on that
// computing the correct answer. However that code predates this and so the
// test ensures that the FisherInformationCalculator functions correctly.
final PoissonGradientProcedure p1 = PoissonGradientProcedureUtils.create(f1);
p1.computeFisherInformation(params);
final double[] e = p1.getLinear();
final FisherInformationCalculator calc = new UnivariateLikelihoodFisherInformationCalculator(func, fi);
final FisherInformationMatrix I = calc.compute(params);
final double[] o = I.getMatrix().data;
final boolean emCcd = model == Model.HALF_POISSON;
if (emCcd) {
// Assumes half the poisson fisher information
SimpleArrayUtils.multiply(e, 0.5);
}
Assertions.assertArrayEquals(e, o, 1e-6);
final DoubleDoubleBiPredicate predicate = TestHelper.doublesAreClose(5e-2, 0);
if (model == Model.POISSON || model == Model.HALF_POISSON) {
// Get the Mortensen approximation for fitting Poisson data with a Gaussian.
// Set a to 100 for the square pixel adjustment.
final double a = 100;
final double s = params[Gaussian2DFunction.X_SD] * a;
final double N = params[Gaussian2DFunction.SIGNAL];
final double b2 = params[Gaussian2DFunction.BACKGROUND];
double var = Gaussian2DPeakResultHelper.getMLVarianceX(a, s, N, b2, emCcd);
// Convert expected variance to pixels
var /= (a * a);
// Get the limits by inverting the Fisher information
final double[] crlb = I.crlb();
TestAssertions.assertTest(var, crlb[2], predicate);
TestAssertions.assertTest(var, crlb[3], predicate);
}
}
use of uk.ac.sussex.gdsc.test.api.function.DoubleDoubleBiPredicate in project GDSC-SMLM by aherbert.
the class CustomSimpsonIntegratorTest method canIntegrateFunction.
private static void canIntegrateFunction(TestUnivariateFunction func, double ax, double bx, int iter) {
final double relativeAccuracy = 1e-4;
// So it stops at the min iterations
final double absoluteAccuracy = Double.POSITIVE_INFINITY;
final CustomSimpsonIntegrator in = new CustomSimpsonIntegrator(// new SimpsonIntegrator(
relativeAccuracy, absoluteAccuracy, iter, CustomSimpsonIntegrator.SIMPSON_MAX_ITERATIONS_COUNT);
final double e = func.sum(ax, bx);
final double ee = simpson(func, ax, bx, iter);
final double o = in.integrate(Integer.MAX_VALUE, func, ax, bx);
logger.log(TestLogUtils.getRecord(Level.INFO, "%s iter=%d %g-%g e=%g ee=%g o=%g", func.getClass().getSimpleName(), iter, ax, bx, e, ee, o));
final DoubleDoubleBiPredicate predicate = TestHelper.doublesAreClose(1e-6, 0);
TestAssertions.assertTest(e, ee, predicate);
TestAssertions.assertTest(e, o, predicate);
// These should be the same within numeric tolerance
TestAssertions.assertTest(ee, o, TestHelper.doublesAreClose(1e-12, 0));
}
use of uk.ac.sussex.gdsc.test.api.function.DoubleDoubleBiPredicate in project GDSC-SMLM by aherbert.
the class MultivariateGaussianMixtureExpectationMaximizationTest method canCreateUnmixedMultivariateGaussianDistribution.
@Test
void canCreateUnmixedMultivariateGaussianDistribution() {
final double[][] data = { { 1, 2 }, { 2.5, 1.5 }, { 3.5, 1.0 } };
final double[] means = getColumnMeans(data);
final double[][] covariances = getCovariance(data);
final MultivariateGaussianDistribution exp = MultivariateGaussianDistribution.create(means, covariances);
final MultivariateGaussianDistribution obs = MultivariateGaussianMixtureExpectationMaximization.createUnmixed(data);
final DoubleDoubleBiPredicate test = TestHelper.doublesAreClose(1e-6);
TestAssertions.assertArrayTest(exp.getMeans(), obs.getMeans(), test);
TestAssertions.assertArrayTest(exp.getCovariances(), obs.getCovariances(), test);
}
use of uk.ac.sussex.gdsc.test.api.function.DoubleDoubleBiPredicate in project GDSC-SMLM by aherbert.
the class MultivariateGaussianMixtureExpectationMaximizationTest method canEstimateInitialMixture.
@SeededTest
void canEstimateInitialMixture(RandomSeed seed) {
// Test verses the Commons Math estimation
final UniformRandomProvider rng = RngUtils.create(seed.getSeed());
final DoubleDoubleBiPredicate test = TestHelper.doublesAreClose(1e-5, 1e-16);
// Number of components
for (int n = 2; n <= 3; n++) {
final double[] sampleWeights = createWeights(n, rng);
final double[][] sampleMeans = create(n, 2, rng, -5, 5);
final double[][] sampleStdDevs = create(n, 2, rng, 1, 10);
final double[] sampleCorrelations = create(n, rng, -0.9, 0.9);
final double[][] data = createData2d(1000, rng, sampleWeights, sampleMeans, sampleStdDevs, sampleCorrelations);
final MixtureMultivariateGaussianDistribution model1 = MultivariateGaussianMixtureExpectationMaximization.estimate(data, n);
final MixtureMultivariateNormalDistribution model2 = MultivariateNormalMixtureExpectationMaximization.estimate(data, n);
final List<Pair<Double, MultivariateNormalDistribution>> comp = model2.getComponents();
final double[] weights = model1.getWeights();
final MultivariateGaussianDistribution[] distributions = model1.getDistributions();
Assertions.assertEquals(n, comp.size());
Assertions.assertEquals(n, weights.length);
Assertions.assertEquals(n, distributions.length);
for (int i = 0; i < n; i++) {
// Must be binary equal for estimated model
Assertions.assertEquals(comp.get(i).getFirst(), weights[i], "weight");
final MultivariateNormalDistribution d = comp.get(i).getSecond();
TestAssertions.assertArrayTest(d.getMeans(), distributions[i].getMeans(), test, "means");
TestAssertions.assertArrayTest(d.getCovariances().getData(), distributions[i].getCovariances(), test, "covariances");
}
}
}
Aggregations