Search in sources :

Example 11 with DoubleArrayList

use of it.unimi.dsi.fastutil.doubles.DoubleArrayList in project gdsc-smlm by aherbert.

the class WeightedFilterTest method filterDoesNotAlterFilteredImageMean.

@SeededTest
void filterDoesNotAlterFilteredImageMean(RandomSeed seed) {
    final UniformRandomProvider rg = RngFactory.create(seed.get());
    // ExponentialDistribution ed = new ExponentialDistribution(rand, 57,
    // ExponentialDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
    final DataFilter filter = createDataFilter();
    final float[] offsets = getOffsets(filter);
    final int[] boxSizes = getBoxSizes(filter);
    final DoubleArrayList l1 = new DoubleArrayList();
    final SharedStateContinuousSampler gs = SamplerUtils.createGaussianSampler(rg, 2, 0.2);
    for (final int width : primes) {
        for (final int height : primes) {
            final float[] data = createData(width, height, rg);
            l1.clear();
            filter.setWeights(null, width, height);
            for (final int boxSize : boxSizes) {
                for (final float offset : offsets) {
                    for (final boolean internal : checkInternal) {
                        l1.add(getMean(data, width, height, boxSize - offset, internal, filter));
                    }
                }
            }
            final double[] e = l1.elements();
            int ei = 0;
            // Uniform weights
            final float[] w = new float[width * height];
            Arrays.fill(w, 0.5f);
            filter.setWeights(w, width, height);
            for (final int boxSize : boxSizes) {
                for (final float offset : offsets) {
                    for (final boolean internal : checkInternal) {
                        testMean(data, width, height, boxSize - offset, internal, filter, "w=0.5", e[ei++], 1e-5);
                    }
                }
            }
            // Weights simulating the variance of sCMOS pixels
            for (int i = 0; i < w.length; i++) {
                // w[i] = (float) (1.0 / Math.max(0.01, ed.sample()));
                w[i] = (float) (1.0 / Math.max(0.01, gs.sample()));
            // w[i] = 0.5f;
            }
            ei = 0;
            filter.setWeights(w, width, height);
            for (final int boxSize : boxSizes) {
                for (final float offset : offsets) {
                    for (final boolean internal : checkInternal) {
                        testMean(data, width, height, boxSize - offset, internal, filter, "w=?", e[ei++], 5e-2);
                    }
                }
            }
        }
    }
}
Also used : SharedStateContinuousSampler(org.apache.commons.rng.sampling.distribution.SharedStateContinuousSampler) UniformRandomProvider(org.apache.commons.rng.UniformRandomProvider) DoubleArrayList(it.unimi.dsi.fastutil.doubles.DoubleArrayList) SeededTest(uk.ac.sussex.gdsc.test.junit5.SeededTest)

Example 12 with DoubleArrayList

use of it.unimi.dsi.fastutil.doubles.DoubleArrayList in project gdsc-smlm by aherbert.

the class CameraModelAnalysis method cumulativeDistribution.

private static double[][] cumulativeDistribution(CameraModelAnalysisSettings settings, double[][] cdf, final LikelihoodFunction fun) {
    // Q. How to match this is the discrete cumulative histogram using the continuous
    // likelihood function:
    // 1. Compute integral up to the value
    // 2. Compute integral up to but not including the next value using trapezoid integration
    // 3. Compute integral up to but not including the next value using flat-top integration
    // Since the function will be used on continuous float data when fitting PSFs the best
    // match for how it will perform in practice is a continuous (trapezoid) integration.
    // The simplest is a flat-top integration.
    // Compute the probability at each value
    final double e = settings.getPhotons();
    double[] x = cdf[0];
    double[] y = new double[x.length];
    for (int i = 0; i < x.length; i++) {
        y[i] = fun.likelihood(x[i], e);
    }
    // Add more until the probability change is marginal
    double sum = MathUtils.sum(y);
    final DoubleArrayList list = new DoubleArrayList(y);
    for (int o = (int) x[x.length - 1] + 1; ; o++) {
        final double p = fun.likelihood(o, e);
        list.add(p);
        if (p == 0) {
            break;
        }
        sum += p;
        if (p / sum < PROBABILITY_DELTA) {
            break;
        }
    }
    final DoubleArrayList list2 = new DoubleArrayList(10);
    for (int o = (int) x[0] - 1; ; o--) {
        final double p = fun.likelihood(o, e);
        list2.add(p);
        if (p == 0) {
            break;
        }
        sum += p;
        if (p / sum < PROBABILITY_DELTA) {
            break;
        }
    }
    // Insert at start
    double start = x[0];
    if (!list2.isEmpty()) {
        start -= list2.size();
        DoubleArrays.reverse(list2.elements(), 0, list2.size());
        list.addAll(0, list2);
    }
    y = list.toDoubleArray();
    x = SimpleArrayUtils.newArray(y.length, start, 1.0);
    if (settings.getSimpsonIntegration()) {
        int c0 = -1;
        double dirac = 0;
        int minc = 0;
        int maxc = 0;
        CachingUnivariateFunction uf = null;
        if (settings.getMode() == MODE_EM_CCD && isPoissonGammaLikelihoodFunction(settings)) {
            // A spike is expected at c=0 due to the Dirac delta contribution.
            // This breaks integration, especially when noise < 0.1.
            // Fix by integrating around c=0 fully then integrating the rest.
            c0 = Arrays.binarySearch(x, 0);
            final double noise = getReadNoise(settings);
            final double p = settings.getPhotons();
            if (noise == 0) {
                // Pure Poisson-Gamma. Just subtract the delta, do the simple integration
                // below and add the delta back. Only functions that support noise==0
                // will be allowed so this solution works.
                dirac = PoissonGammaFunction.dirac(p);
                if (c0 != -1) {
                    y[c0] -= dirac;
                }
            } else {
                // Fix integration around c=0 using the range of the Gaussian
                minc = (int) Math.max(x[0], Math.floor(-5 * noise));
                maxc = (int) Math.min(x[x.length - 1], Math.ceil(5 * noise));
                uf = new CachingUnivariateFunction(fun, p);
            }
        }
        // Use Simpson's integration with n=4 to get the integral of the probability
        // over the range of each count.
        // Note the Poisson-Gamma function cannot be integrated with the
        // Dirac delta function at c==0
        // Compute the extra function points
        final double[] f = new double[y.length * SIMPSON_N + 1];
        int index = f.length;
        final int mod;
        if (settings.getRoundDown()) {
            // Do this final value outside the loop as y[index/n] does not exist
            mod = 0;
            index--;
            f[index] = fun.likelihood(start + index * SIMPSON_H, e);
        } else {
            // Used when computing for rounding up/down
            mod = SIMPSON_N_2;
            start -= SIMPSON_N_2 * SIMPSON_H;
        }
        while (index-- > 0) {
            if (index % SIMPSON_N == mod) {
                f[index] = y[index / SIMPSON_N];
            } else {
                f[index] = fun.likelihood(start + index * SIMPSON_H, e);
            }
        }
        // Compute indices for the integral
        final IntArrayList tmp = new IntArrayList(SIMPSON_N);
        for (int j = 1; j <= SIMPSON_N_2 - 1; j++) {
            tmp.add(2 * j);
        }
        final int[] i2 = tmp.toIntArray();
        tmp.clear();
        for (int j = 1; j <= SIMPSON_N_2; j++) {
            tmp.add(2 * j - 1);
        }
        final int[] i4 = tmp.toIntArray();
        // Compute integral
        for (int i = 0; i < y.length; i++) {
            final int a = i * SIMPSON_N;
            final int b = a + SIMPSON_N;
            sum = f[a] + f[b];
            for (int j = i2.length; j-- > 0; ) {
                sum += 2 * f[a + i2[j]];
            }
            for (int j = i4.length; j-- > 0; ) {
                sum += 4 * f[a + i4[j]];
            }
            sum *= SIMPSON_H / 3;
            // System.out.printf("y[%d] = %f => %f\n", i, y[i], s);
            y[i] = sum;
        }
        // Fix Poisson-Gamma ...
        if (c0 != -1) {
            if (uf == null) {
                // Pure Poisson-Gamma. Just add back the delta.
                y[c0] += dirac;
            } else {
                // Convolved Poisson-Gamma. Fix in the range of the Gaussian around c=0
                final CustomSimpsonIntegrator in = new CustomSimpsonIntegrator(SIMPSON_RELATIVE_ACCURACY, SIMPSON_ABSOLUTE_ACCURACY, SIMPSON_MIN_ITERATION_COUNT, CustomSimpsonIntegrator.SIMPSON_MAX_ITERATIONS_COUNT);
                final double lower = (settings.getRoundDown()) ? 0 : -0.5;
                final double upper = lower + 1;
                // Switch from c<=maxc to c<maxc. Avoid double computation at minc==maxc
                if (maxc != minc) {
                    maxc++;
                }
                maxc++;
                for (int c = minc, i = Arrays.binarySearch(x, minc); c < maxc; c++, i++) {
                    uf.reset();
                    try {
                        y[i] = in.integrate(2000, uf, c + lower, c + upper);
                    } catch (final TooManyEvaluationsException ex) {
                        // Q. Is the last sum valid?
                        if (in.getLastSum() > 0) {
                            y[i] = in.getLastSum();
                        } else {
                            // Otherwise use all the cached values to compute a sum
                            // using the trapezoid rule. This will underestimate the sum.
                            // Note: The Simpson integrator will have computed the edge values
                            // as the first two values in the cache.
                            final double[] g = uf.list.elements();
                            final double dx = (g[3] - g[1]) / in.getN();
                            final int total = 1 + 2 * ((int) in.getN());
                            sum = 0;
                            for (int j = 4; j < total; j += 2) {
                                sum += g[j];
                            }
                            y[i] = (g[0] + g[2] + 2 * sum) / dx;
                        }
                    }
                }
            }
        }
    }
    // Simple flat-top integration
    sum = 0;
    for (int i = 0; i < y.length; i++) {
        sum += y[i];
        y[i] = sum;
    }
    return new double[][] { x, y };
}
Also used : TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) DoubleArrayList(it.unimi.dsi.fastutil.doubles.DoubleArrayList) IntArrayList(it.unimi.dsi.fastutil.ints.IntArrayList) CustomSimpsonIntegrator(uk.ac.sussex.gdsc.smlm.math3.analysis.integration.CustomSimpsonIntegrator)

Example 13 with DoubleArrayList

use of it.unimi.dsi.fastutil.doubles.DoubleArrayList in project gdsc-smlm by aherbert.

the class CameraModelAnalysis method convolveHistogram.

/**
 * Convolve the histogram. The output is a discrete probability distribution.
 *
 * @param settings the settings
 * @return The histogram
 */
private static double[][] convolveHistogram(CameraModelAnalysisSettings settings) {
    // Find the range of the Poisson
    final PoissonDistribution poisson = new PoissonDistribution(settings.getPhotons());
    final int maxn = poisson.inverseCumulativeProbability(UPPER);
    final double gain = getGain(settings);
    final double noise = getReadNoise(settings);
    final boolean debug = false;
    // Build the Probability Mass/Density Function (PDF) of the distribution:
    // either a Poisson (PMF) or Poisson-Gamma (PDF). The PDF is 0 at all
    // values apart from the step interval.
    // Note: The Poisson-Gamma is computed without the Dirac delta contribution
    // at c=0. This allows correct convolution with the Gaussian of the dirac delta
    // and the rest of the Poisson-Gamma (so matching the simulation).
    final DoubleArrayList list = new DoubleArrayList();
    double step;
    String name;
    int upsample = 100;
    // Store the Dirac delta value at c=0. This must be convolved separately.
    double dirac = 0;
    // EM-CCD
    if (settings.getMode() == MODE_EM_CCD) {
        name = "Poisson-Gamma";
        final double m = gain;
        final double p = settings.getPhotons();
        dirac = PoissonGammaFunction.dirac(p);
        // Chose whether to compute a discrete PMF or a PDF using the approximation.
        // Note: The delta function at c=0 is from the PMF of the Poisson. So it is
        // a discrete contribution. This is omitted from the PDF and handled in
        // a separate convolution.
        // noise != 0;
        final boolean discrete = false;
        if (discrete) {
            // Note: This is obsolete as the Poisson-Gamma function is continuous.
            // Sampling it at integer intervals is not valid, especially for low gain.
            // The Poisson-Gamma PDF should be integrated to form a discrete PMF.
            step = 1.0;
            double upper;
            if (settings.getPhotons() < 20) {
                upper = maxn;
            } else {
                // Approximate reasonable range of Poisson as a Gaussian
                upper = settings.getPhotons() + 3 * Math.sqrt(settings.getPhotons());
            }
            final GammaDistribution gamma = new GammaDistribution(null, upper, gain, GammaDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
            final int maxc = (int) gamma.inverseCumulativeProbability(0.999);
            final int minn = Math.max(1, poisson.inverseCumulativeProbability(LOWER));
            // See Ulbrich & Isacoff (2007). Nature Methods 4, 319-321, SI equation 3.
            // Note this is not a convolution of a single Gamma distribution since the shape
            // is modified not the count. So it is a convolution of a distribution made with
            // a gamma of fixed count and variable shape.
            // The count=0 is a special case.
            list.add(PoissonGammaFunction.poissonGammaN(0, p, m));
            final long total = (maxn - minn) * (long) maxc;
            if (total < 1000000) {
                // Full computation
                // G(c) = sum n { (1 / n!) p^n e^-p (1 / ((n-1!)m^n)) c^n-1 e^-c/m }
                // Compute as a log
                // - log(n!) + n*log(p)-p -log((n-1)!) - n * log(m) + (n-1) * log(c) -c/m
                // Note: Both methods work
                LogFactorialCache lfc = LOG_FACTORIAL_CACHE.get().get();
                if (lfc == null) {
                    lfc = new LogFactorialCache();
                    LOG_FACTORIAL_CACHE.set(new SoftReference<>(lfc));
                }
                lfc.ensureRange(minn - 1, maxn);
                final double[] f = new double[maxn + 1];
                final double logm = Math.log(m);
                final double logp = Math.log(p);
                for (int n = minn; n <= maxn; n++) {
                    f[n] = -lfc.getLogFactorialUnsafe(n) + n * logp - p - lfc.getLogFactorialUnsafe(n - 1) - n * logm;
                }
                // double total2 = total;
                for (int c = 1; c <= maxc; c++) {
                    double sum = 0;
                    final double cDivM = c / m;
                    final double logc = Math.log(c);
                    for (int n = minn; n <= maxn; n++) {
                        sum += StdMath.exp(f[n] + (n - 1) * logc - cDivM);
                    }
                    // sum2 += pd[n] * gd[n].density(c);
                    list.add(sum);
                // total += sum;
                // This should match the approximation
                // double approx = PoissonGammaFunction.poissonGamma(c, p, m);
                // total2 += approx;
                // System.out.printf("c=%d sum=%g approx=%g error=%g\n", c, sum2, approx,
                // uk.ac.sussex.gdsc.core.utils.DoubleEquality.relativeError(sum2, approx));
                }
            // System.out.printf("sum=%g approx=%g error=%g\n", total, total2,
            // uk.ac.sussex.gdsc.core.utils.DoubleEquality.relativeError(total, total2));
            } else {
                // Approximate
                for (int c = 1; c <= maxc; c++) {
                    list.add(PoissonGammaFunction.poissonGammaN(c, p, m));
                }
            }
        } else {
            // This integrates the PDF using the approximation and up-samples together.
            // Compute the sampling interval.
            step = 1.0 / upsample;
            // Reset
            upsample = 1;
            // Compute the integral of [-step/2:step/2] for each point.
            // Use trapezoid integration.
            final double halfStep = step / 2;
            double prev = PoissonGammaFunction.poissonGammaN(0, p, m);
            double next = PoissonGammaFunction.poissonGammaN(halfStep, p, m);
            list.add((prev + next) * 0.25);
            double max = 0;
            for (int i = 1; ; i++) {
                // Each remaining point is modelling a PMF for the range [-step/2:step/2]
                prev = next;
                next = PoissonGammaFunction.poissonGammaN(i * step + halfStep, p, m);
                final double pp = (prev + next) * 0.5;
                if (max < pp) {
                    max = pp;
                }
                if (pp / max < 1e-5) {
                    // Use this if non-zero since it has been calculated
                    if (pp != 0) {
                        list.add(pp);
                    }
                    break;
                }
                list.add(pp);
            }
        }
        // Ensure the combined sum of PDF and Dirac is 1
        final double expected = 1 - dirac;
        // Require an odd number to get an even number (n) of sub-intervals:
        if (list.size() % 2 == 0) {
            list.add(0);
        }
        final double[] g = list.elements();
        // Number of sub intervals
        final int n = list.size() - 1;
        // h = (a-b) / n = sub-interval width
        final double h = 1;
        double sum2 = 0;
        double sum4 = 0;
        for (int j = 1; j <= n / 2 - 1; j++) {
            sum2 += g[2 * j];
        }
        for (int j = 1; j <= n / 2; j++) {
            sum4 += g[2 * j - 1];
        }
        final double sum = (h / 3) * (g[0] + 2 * sum2 + 4 * sum4 + g[n]);
        // Check
        // System.out.printf("Sum=%g Expected=%g\n", sum * step, expected);
        final double factor = expected / sum;
        SimpleArrayUtils.apply(g, 0, n + 1, x -> x * factor);
    } else {
        name = "Poisson";
        // Apply fixed gain. Just change the step interval of the PMF.
        step = gain;
        for (int n = 0; n <= maxn; n++) {
            list.add(poisson.probability(n));
        }
        final double p = poisson.probability(list.size());
        if (p != 0) {
            list.add(p);
        }
    }
    // Debug
    if (debug) {
        final String title = name;
        final Plot plot = new Plot(title, "x", "y");
        plot.addPoints(SimpleArrayUtils.newArray(list.size(), 0, step), list.toDoubleArray(), Plot.LINE);
        ImageJUtils.display(title, plot);
    }
    double zero = 0;
    double[] pg = list.toDoubleArray();
    // Sample Gaussian
    if (noise > 0) {
        step /= upsample;
        // Convolve with Gaussian kernel
        final double[] kernel = GaussianKernel.makeGaussianKernel(Math.abs(noise) / step, 6, true);
        if (upsample != 1) {
            // Use scaled convolution. This is faster that zero filling distribution g.
            pg = Convolution.convolve(kernel, pg, upsample);
        } else if (dirac > 0.01) {
            // The Poisson-Gamma may be stepped at low mean causing wrap artifacts in the FFT.
            // This is a problem if most of the probability is in the Dirac.
            // Otherwise it can be ignored and the FFT version is OK.
            pg = Convolution.convolve(kernel, pg);
        } else {
            pg = Convolution.convolveFast(kernel, pg);
        }
        // The convolution will have created a larger array so we must adjust the offset for this
        final int radius = kernel.length / 2;
        zero -= radius * step;
        // Add convolution of the dirac delta function.
        if (dirac != 0) {
            // We only need to convolve the Gaussian at c=0
            for (int i = 0; i < kernel.length; i++) {
                pg[i] += kernel[i] * dirac;
            }
        }
        // Debug
        if (debug) {
            String title = "Gaussian";
            Plot plot = new Plot(title, "x", "y");
            plot.addPoints(SimpleArrayUtils.newArray(kernel.length, -radius * step, step), kernel, Plot.LINE);
            ImageJUtils.display(title, plot);
            title = name + "-Gaussian";
            plot = new Plot(title, "x", "y");
            plot.addPoints(SimpleArrayUtils.newArray(pg.length, zero, step), pg, Plot.LINE);
            ImageJUtils.display(title, plot);
        }
        zero = downSampleCdfToPmf(settings, list, step, zero, pg, 1.0);
        pg = list.toDoubleArray();
        zero = (int) Math.floor(zero);
        step = 1.0;
    // No convolution means we have the Poisson PMF/Poisson-Gamma PDF already
    } else if (step != 1) {
        // Sample to 1.0 pixel step interval.
        if (settings.getMode() == MODE_EM_CCD) {
            // Poisson-Gamma PDF
            zero = downSampleCdfToPmf(settings, list, step, zero, pg, 1 - dirac);
            pg = list.toDoubleArray();
            zero = (int) Math.floor(zero);
            // Add the dirac delta function.
            if (dirac != 0) {
                // Note: zero is the start of the x-axis. This value should be -1.
                assert (int) zero == -1;
                // Use as an offset to find the actual zero.
                pg[-(int) zero] += dirac;
            }
        } else {
            // Poisson PMF
            // Simple non-interpolated expansion.
            // This should be used when there is no Gaussian convolution.
            final double[] pd = pg;
            list.clear();
            // Account for rounding.
            final Round round = getRound(settings);
            final int maxc = round.round(pd.length * step + 1);
            pg = new double[maxc];
            for (int n = pd.length; n-- > 0; ) {
                pg[round.round(n * step)] += pd[n];
            }
            if (pg[0] != 0) {
                list.add(0);
                list.addElements(list.size(), pg);
                pg = list.toDoubleArray();
                zero--;
            }
        }
        step = 1.0;
    } else {
        // Add the dirac delta function.
        list.elements()[0] += dirac;
    }
    return new double[][] { SimpleArrayUtils.newArray(pg.length, zero, step), pg };
}
Also used : PoissonDistribution(uk.ac.sussex.gdsc.smlm.math3.distribution.PoissonDistribution) LogFactorialCache(uk.ac.sussex.gdsc.smlm.function.LogFactorialCache) Plot(ij.gui.Plot) DoubleArrayList(it.unimi.dsi.fastutil.doubles.DoubleArrayList) GammaDistribution(org.apache.commons.math3.distribution.GammaDistribution)

Example 14 with DoubleArrayList

use of it.unimi.dsi.fastutil.doubles.DoubleArrayList in project gdsc-smlm by aherbert.

the class CameraModelFisherInformationAnalysis method loadFunction.

/**
 * Load the Poisson Fisher information for the camera type from the cache.
 *
 * @param type the type
 * @param gain the gain
 * @param noise the noise
 * @param relativeError the relative error (used to compare the gain and noise)
 * @return the poisson fisher information (or null)
 */
public static InterpolatedPoissonFisherInformation loadFunction(CameraType type, double gain, double noise, double relativeError) {
    if (type == null || type.isFast()) {
        return null;
    }
    final FiKey key = new FiKey(type, gain, noise);
    PoissonFisherInformationData data = load(key);
    if (data == null && relativeError > 0 && relativeError < 0.1) {
        // Fuzzy matching
        double error = 1;
        for (final PoissonFisherInformationData d : cache.values()) {
            final double e1 = DoubleEquality.relativeError(gain, d.getGain());
            if (e1 < relativeError) {
                final double e2 = DoubleEquality.relativeError(noise, d.getNoise());
                if (e2 < relativeError) {
                    // Combined error - Euclidean distance
                    final double e = e1 * e1 + e2 * e2;
                    if (error > e) {
                        error = e;
                        data = d;
                    }
                }
            }
        }
    }
    if (data == null) {
        return null;
    }
    // Dump the samples. Convert to base e.
    final double scale = Math.log(10);
    final DoubleArrayList meanList = new DoubleArrayList(data.getAlphaSampleCount());
    final DoubleArrayList alphalist = new DoubleArrayList(data.getAlphaSampleCount());
    for (final AlphaSample sample : data.getAlphaSampleList()) {
        meanList.add(sample.getLog10Mean() * scale);
        alphalist.add(sample.getAlpha());
    }
    final double[] means = meanList.toDoubleArray();
    final double[] alphas = alphalist.toDoubleArray();
    SortUtils.sortData(alphas, means, true, false);
    final BasePoissonFisherInformation upperf = (type == CameraType.EM_CCD) ? new HalfPoissonFisherInformation() : createPoissonGaussianApproximationFisherInformation(noise / gain);
    return new InterpolatedPoissonFisherInformation(means, alphas, type.isLowerFixedI(), upperf);
}
Also used : BasePoissonFisherInformation(uk.ac.sussex.gdsc.smlm.function.BasePoissonFisherInformation) AlphaSample(uk.ac.sussex.gdsc.smlm.data.config.FisherProtos.AlphaSample) DoubleArrayList(it.unimi.dsi.fastutil.doubles.DoubleArrayList) InterpolatedPoissonFisherInformation(uk.ac.sussex.gdsc.smlm.function.InterpolatedPoissonFisherInformation) PoissonFisherInformationData(uk.ac.sussex.gdsc.smlm.data.config.FisherProtos.PoissonFisherInformationData) HalfPoissonFisherInformation(uk.ac.sussex.gdsc.smlm.function.HalfPoissonFisherInformation)

Example 15 with DoubleArrayList

use of it.unimi.dsi.fastutil.doubles.DoubleArrayList in project gdsc-smlm by aherbert.

the class PoissonCalculatorTest method canComputeLogLikelihoodRatio.

private static void canComputeLogLikelihoodRatio(RandomSeed seed, BaseNonLinearFunction nlf) {
    logger.log(TestLogging.getRecord(LOG_LEVEL, nlf.name));
    final int n = maxx * maxx;
    final double[] a = new double[] { 1 };
    // Simulate Poisson process
    nlf.initialise(a);
    final UniformRandomProvider rng = RngFactory.create(seed.get());
    final double[] x = new double[n];
    final double[] u = new double[n];
    for (int i = 0; i < n; i++) {
        u[i] = nlf.eval(i);
        if (u[i] > 0) {
            x[i] = GdscSmlmTestUtils.createPoissonSampler(rng, u[i]).sample();
        }
    }
    final DoubleDoubleBiPredicate predicate = Predicates.doublesAreClose(1e-10, 0);
    double ll = PoissonCalculator.logLikelihood(u, x);
    final double mll = PoissonCalculator.maximumLogLikelihood(x);
    double llr = -2 * (ll - mll);
    double llr2 = PoissonCalculator.logLikelihoodRatio(u, x);
    logger.log(TestLogging.getRecord(LOG_LEVEL, "llr=%f, llr2=%f", llr, llr2));
    TestAssertions.assertTest(llr, llr2, predicate, "Log-likelihood ratio");
    final double[] op = new double[x.length];
    for (int i = 0; i < n; i++) {
        op[i] = PoissonCalculator.maximumLikelihood(x[i]);
    }
    // TestSettings.setLogLevel(TestLevel.TEST_DEBUG);
    final int df = n - 1;
    final ChiSquaredDistributionTable table = ChiSquaredDistributionTable.createUpperTailed(0.05, df);
    final ChiSquaredDistributionTable table2 = ChiSquaredDistributionTable.createUpperTailed(0.001, df);
    if (logger.isLoggable(LOG_LEVEL)) {
        logger.log(TestLogging.getRecord(LOG_LEVEL, "Chi2 = %f (q=%.3f), %f (q=%.3f)  %f %b  %f", table.getCrititalValue(df), table.getSignificanceValue(), table2.getCrititalValue(df), table2.getSignificanceValue(), ChiSquaredDistributionTable.computeQValue(24, 2), ChiSquaredDistributionTable.createUpperTailed(0.05, 2).reject(24, 2), ChiSquaredDistributionTable.getChiSquared(1e-6, 2)));
    }
    final DoubleArrayList list = new DoubleArrayList();
    final int imin = 5;
    final int imax = 15;
    for (int i = imin; i <= imax; i++) {
        a[0] = (double) i / 10;
        nlf.initialise(a);
        for (int j = 0; j < n; j++) {
            u[j] = nlf.eval(j);
        }
        ll = PoissonCalculator.logLikelihood(u, x);
        list.add(ll);
        llr = PoissonCalculator.logLikelihoodRatio(u, x);
        BigDecimal product = new BigDecimal(1);
        double ll2 = 0;
        for (int j = 0; j < n; j++) {
            final double p1 = PoissonCalculator.likelihood(u[j], x[j]);
            ll2 += Math.log(p1);
            final double ratio = p1 / op[j];
            product = product.multiply(new BigDecimal(ratio));
        }
        llr2 = -2 * Math.log(product.doubleValue());
        final double p = ChiSquaredDistributionTable.computePValue(llr, df);
        final double q = ChiSquaredDistributionTable.computeQValue(llr, df);
        logger.log(TestLogging.getRecord(LOG_LEVEL, "a=%f, ll=%f, ll2=%f, llr=%f, llr2=%f, product=%s, p=%f, q=%f " + "(reject=%b @ %.3f, reject=%b @ %.3f)", a[0], ll, ll2, llr, llr2, product.round(new MathContext(4)).toString(), p, q, table.reject(llr, df), table.getSignificanceValue(), table2.reject(llr, df), table2.getSignificanceValue()));
        // too small to store in a double.
        if (product.doubleValue() > 0) {
            TestAssertions.assertTest(ll, ll2, predicate, "Log-likelihood");
            TestAssertions.assertTest(llr, llr2, predicate, "Log-likelihood ratio");
        }
    }
    // Find max using quadratic fit
    final double[] data = list.toDoubleArray();
    int index = SimpleArrayUtils.findMaxIndex(data);
    final double maxa = (double) (imin + index) / 10;
    double fita = maxa;
    try {
        if (index == 0) {
            index++;
        }
        if (index == data.length - 1) {
            index--;
        }
        final int i1 = index - 1;
        final int i2 = index;
        final int i3 = index + 1;
        fita = QuadraticUtils.findMinMax((double) (imin + i1) / 10, data[i1], (double) (imin + i2) / 10, data[i2], (double) (imin + i3) / 10, data[i3]);
    } catch (final DataException ex) {
    // Ignore
    }
    // Allow a tolerance as the random data may alter the p-value computation.
    // Should allow it to be less than 2 increment either side of the answer.
    logger.log(TestLogging.getRecord(LOG_LEVEL, "max fit = %g => %g", maxa, fita));
    Assertions.assertEquals(1, fita, 0.199, "max");
}
Also used : DataException(uk.ac.sussex.gdsc.core.data.DataException) DoubleDoubleBiPredicate(uk.ac.sussex.gdsc.test.api.function.DoubleDoubleBiPredicate) UniformRandomProvider(org.apache.commons.rng.UniformRandomProvider) DoubleArrayList(it.unimi.dsi.fastutil.doubles.DoubleArrayList) BigDecimal(java.math.BigDecimal) MathContext(java.math.MathContext)

Aggregations

DoubleArrayList (it.unimi.dsi.fastutil.doubles.DoubleArrayList)45 IntArrayList (it.unimi.dsi.fastutil.ints.IntArrayList)10 UniformRandomProvider (org.apache.commons.rng.UniformRandomProvider)6 LongArrayList (it.unimi.dsi.fastutil.longs.LongArrayList)5 IntDoubleSparseVectorStorage (com.tencent.angel.ml.math2.storage.IntDoubleSparseVectorStorage)4 IntDoubleVector (com.tencent.angel.ml.math2.vector.IntDoubleVector)4 Plot (ij.gui.Plot)4 ObjectArrayList (it.unimi.dsi.fastutil.objects.ObjectArrayList)4 BooleanArrayList (it.unimi.dsi.fastutil.booleans.BooleanArrayList)3 ByteArrayList (it.unimi.dsi.fastutil.bytes.ByteArrayList)3 CharArrayList (it.unimi.dsi.fastutil.chars.CharArrayList)3 FloatArrayList (it.unimi.dsi.fastutil.floats.FloatArrayList)3 ShortArrayList (it.unimi.dsi.fastutil.shorts.ShortArrayList)3 ArrayList (java.util.ArrayList)3 DoubleDoubleBiPredicate (uk.ac.sussex.gdsc.test.api.function.DoubleDoubleBiPredicate)3 LongDoubleSparseVectorStorage (com.tencent.angel.ml.math2.storage.LongDoubleSparseVectorStorage)2 LongDoubleVector (com.tencent.angel.ml.math2.vector.LongDoubleVector)2 BigDecimal (java.math.BigDecimal)2 MathContext (java.math.MathContext)2 List (java.util.List)2