Search in sources :

Example 51 with Gaussian

use of org.apache.commons.math3.analysis.function.Gaussian in project GDSC-SMLM by aherbert.

the class ScmosLikelihoodWrapperTest method cumulativeProbabilityIsOneWithRealData.

private static void cumulativeProbabilityIsOneWithRealData(final double mu, double min, double max, boolean test) {
    // Test using a standard Poisson-Gaussian convolution
    // min = -max;
    // final PoissonGaussianFunction pgf = PoissonGaussianFunction.createWithVariance(1, 1, VAR);
    final UnivariateIntegrator in = new SimpsonIntegrator();
    final double pvalue = in.integrate(20000, x -> ScmosLikelihoodWrapper.likelihood(mu, VAR, G, O, x), min, max);
    // TestLog.fine(logger,"mu=%f, p=%f", mu, p);
    if (test) {
        Assertions.assertEquals(P_LIMIT, pvalue, 0.02, () -> "mu=" + mu);
    }
}
Also used : SimpsonIntegrator(org.apache.commons.math3.analysis.integration.SimpsonIntegrator) UnivariateIntegrator(org.apache.commons.math3.analysis.integration.UnivariateIntegrator)

Example 52 with Gaussian

use of org.apache.commons.math3.analysis.function.Gaussian in project GDSC-SMLM by aherbert.

the class CameraModelAnalysis method convolveHistogram.

/**
 * Convolve the histogram. The output is a discrete probability distribution.
 *
 * @param settings the settings
 * @return The histogram
 */
private static double[][] convolveHistogram(CameraModelAnalysisSettings settings) {
    // Find the range of the Poisson
    final PoissonDistribution poisson = new PoissonDistribution(settings.getPhotons());
    final int maxn = poisson.inverseCumulativeProbability(UPPER);
    final double gain = getGain(settings);
    final double noise = getReadNoise(settings);
    final boolean debug = false;
    // Build the Probability Mass/Density Function (PDF) of the distribution:
    // either a Poisson (PMF) or Poisson-Gamma (PDF). The PDF is 0 at all
    // values apart from the step interval.
    // Note: The Poisson-Gamma is computed without the Dirac delta contribution
    // at c=0. This allows correct convolution with the Gaussian of the dirac delta
    // and the rest of the Poisson-Gamma (so matching the simulation).
    final TDoubleArrayList list = new TDoubleArrayList();
    double step;
    String name;
    int upsample = 100;
    // Store the Dirac delta value at c=0. This must be convolved separately.
    double dirac = 0;
    // EM-CCD
    if (settings.getMode() == MODE_EM_CCD) {
        name = "Poisson-Gamma";
        final double m = gain;
        final double p = settings.getPhotons();
        dirac = PoissonGammaFunction.dirac(p);
        // Chose whether to compute a discrete PMF or a PDF using the approximation.
        // Note: The delta function at c=0 is from the PMF of the Poisson. So it is
        // a discrete contribution. This is omitted from the PDF and handled in
        // a separate convolution.
        // noise != 0;
        final boolean discrete = false;
        if (discrete) {
            // Note: This is obsolete as the Poisson-Gamma function is continuous.
            // Sampling it at integer intervals is not valid, especially for low gain.
            // The Poisson-Gamma PDF should be integrated to form a discrete PMF.
            step = 1.0;
            double upper;
            if (settings.getPhotons() < 20) {
                upper = maxn;
            } else {
                // Approximate reasonable range of Poisson as a Gaussian
                upper = settings.getPhotons() + 3 * Math.sqrt(settings.getPhotons());
            }
            final GammaDistribution gamma = new GammaDistribution(null, upper, gain, GammaDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
            final int maxc = (int) gamma.inverseCumulativeProbability(0.999);
            final int minn = Math.max(1, poisson.inverseCumulativeProbability(LOWER));
            // See Ulbrich & Isacoff (2007). Nature Methods 4, 319-321, SI equation 3.
            // Note this is not a convolution of a single Gamma distribution since the shape
            // is modified not the count. So it is a convolution of a distribution made with
            // a gamma of fixed count and variable shape.
            // The count=0 is a special case.
            list.add(PoissonGammaFunction.poissonGammaN(0, p, m));
            final long total = (maxn - minn) * (long) maxc;
            if (total < 1000000) {
                // Full computation
                // G(c) = sum n { (1 / n!) p^n e^-p (1 / ((n-1!)m^n)) c^n-1 e^-c/m }
                // Compute as a log
                // - log(n!) + n*log(p)-p -log((n-1)!) - n * log(m) + (n-1) * log(c) -c/m
                // Note: Both methods work
                LogFactorialCache lfc = LOG_FACTORIAL_CACHE.get().get();
                if (lfc == null) {
                    lfc = new LogFactorialCache();
                    LOG_FACTORIAL_CACHE.set(new SoftReference<>(lfc));
                }
                lfc.ensureRange(minn - 1, maxn);
                final double[] f = new double[maxn + 1];
                final double logm = Math.log(m);
                final double logp = Math.log(p);
                for (int n = minn; n <= maxn; n++) {
                    f[n] = -lfc.getLogFactorialUnsafe(n) + n * logp - p - lfc.getLogFactorialUnsafe(n - 1) - n * logm;
                }
                // double total2 = total;
                for (int c = 1; c <= maxc; c++) {
                    double sum = 0;
                    final double c_m = c / m;
                    final double logc = Math.log(c);
                    for (int n = minn; n <= maxn; n++) {
                        sum += StdMath.exp(f[n] + (n - 1) * logc - c_m);
                    }
                    // sum2 += pd[n] * gd[n].density(c);
                    list.add(sum);
                // total += sum;
                // This should match the approximation
                // double approx = PoissonGammaFunction.poissonGamma(c, p, m);
                // total2 += approx;
                // System.out.printf("c=%d sum=%g approx=%g error=%g\n", c, sum2, approx,
                // uk.ac.sussex.gdsc.core.utils.DoubleEquality.relativeError(sum2, approx));
                }
            // System.out.printf("sum=%g approx=%g error=%g\n", total, total2,
            // uk.ac.sussex.gdsc.core.utils.DoubleEquality.relativeError(total, total2));
            } else {
                // Approximate
                for (int c = 1; c <= maxc; c++) {
                    list.add(PoissonGammaFunction.poissonGammaN(c, p, m));
                }
            }
        } else {
            // This integrates the PDF using the approximation and up-samples together.
            // Compute the sampling interval.
            step = 1.0 / upsample;
            // Reset
            upsample = 1;
            // Compute the integral of [-step/2:step/2] for each point.
            // Use trapezoid integration.
            final double step_2 = step / 2;
            double prev = PoissonGammaFunction.poissonGammaN(0, p, m);
            double next = PoissonGammaFunction.poissonGammaN(step_2, p, m);
            list.add((prev + next) * 0.25);
            double max = 0;
            for (int i = 1; ; i++) {
                // Each remaining point is modelling a PMF for the range [-step/2:step/2]
                prev = next;
                next = PoissonGammaFunction.poissonGammaN(i * step + step_2, p, m);
                final double pp = (prev + next) * 0.5;
                if (max < pp) {
                    max = pp;
                }
                if (pp / max < 1e-5) {
                    // Use this if non-zero since it has been calculated
                    if (pp != 0) {
                        list.add(pp);
                    }
                    break;
                }
                list.add(pp);
            }
        }
        // Ensure the combined sum of PDF and Dirac is 1
        final double expected = 1 - dirac;
        // Require an odd number to get an even number (n) of sub-intervals:
        if (list.size() % 2 == 0) {
            list.add(0);
        }
        final double[] g = list.toArray();
        // Number of sub intervals
        final int n = g.length - 1;
        // h = (a-b) / n = sub-interval width
        final double h = 1;
        double sum2 = 0;
        double sum4 = 0;
        for (int j = 1; j <= n / 2 - 1; j++) {
            sum2 += g[2 * j];
        }
        for (int j = 1; j <= n / 2; j++) {
            sum4 += g[2 * j - 1];
        }
        final double sum = (h / 3) * (g[0] + 2 * sum2 + 4 * sum4 + g[n]);
        // Check
        // System.out.printf("Sum=%g Expected=%g\n", sum * step, expected);
        SimpleArrayUtils.multiply(g, expected / sum);
        list.resetQuick();
        list.add(g);
    } else {
        name = "Poisson";
        // Apply fixed gain. Just change the step interval of the PMF.
        step = gain;
        for (int n = 0; n <= maxn; n++) {
            list.add(poisson.probability(n));
        }
        final double p = poisson.probability(list.size());
        if (p != 0) {
            list.add(p);
        }
    }
    // Debug
    if (debug) {
        final String title = name;
        final Plot plot = new Plot(title, "x", "y");
        plot.addPoints(SimpleArrayUtils.newArray(list.size(), 0, step), list.toArray(), Plot.LINE);
        ImageJUtils.display(title, plot);
    }
    double zero = 0;
    double[] pg = list.toArray();
    // Sample Gaussian
    if (noise > 0) {
        step /= upsample;
        pg = list.toArray();
        // Convolve with Gaussian kernel
        final double[] kernel = GaussianKernel.makeGaussianKernel(Math.abs(noise) / step, 6, true);
        if (upsample != 1) {
            // Use scaled convolution. This is faster that zero filling distribution g.
            pg = Convolution.convolve(kernel, pg, upsample);
        } else if (dirac > 0.01) {
            // The Poisson-Gamma may be stepped at low mean causing wrap artifacts in the FFT.
            // This is a problem if most of the probability is in the Dirac.
            // Otherwise it can be ignored and the FFT version is OK.
            pg = Convolution.convolve(kernel, pg);
        } else {
            pg = Convolution.convolveFast(kernel, pg);
        }
        // The convolution will have created a larger array so we must adjust the offset for this
        final int radius = kernel.length / 2;
        zero -= radius * step;
        // Add convolution of the dirac delta function.
        if (dirac != 0) {
            // We only need to convolve the Gaussian at c=0
            for (int i = 0; i < kernel.length; i++) {
                pg[i] += kernel[i] * dirac;
            }
        }
        // Debug
        if (debug) {
            String title = "Gaussian";
            Plot plot = new Plot(title, "x", "y");
            plot.addPoints(SimpleArrayUtils.newArray(kernel.length, -radius * step, step), kernel, Plot.LINE);
            ImageJUtils.display(title, plot);
            title = name + "-Gaussian";
            plot = new Plot(title, "x", "y");
            plot.addPoints(SimpleArrayUtils.newArray(pg.length, zero, step), pg, Plot.LINE);
            ImageJUtils.display(title, plot);
        }
        zero = downSampleCdfToPmf(settings, list, step, zero, pg, 1.0);
        pg = list.toArray();
        zero = (int) Math.floor(zero);
        step = 1.0;
    // No convolution means we have the Poisson PMF/Poisson-Gamma PDF already
    } else if (step != 1) {
        // Sample to 1.0 pixel step interval.
        if (settings.getMode() == MODE_EM_CCD) {
            // Poisson-Gamma PDF
            zero = downSampleCdfToPmf(settings, list, step, zero, pg, 1 - dirac);
            pg = list.toArray();
            zero = (int) Math.floor(zero);
            // Add the dirac delta function.
            if (dirac != 0) {
                // Note: zero is the start of the x-axis. This value should be -1.
                assert (int) zero == -1;
                // Use as an offset to find the actual zero.
                pg[-(int) zero] += dirac;
            }
        } else {
            // Poisson PMF
            // Simple non-interpolated expansion.
            // This should be used when there is no Gaussian convolution.
            final double[] pd = pg;
            list.resetQuick();
            // Account for rounding.
            final Round round = getRound(settings);
            final int maxc = round.round(pd.length * step + 1);
            pg = new double[maxc];
            for (int n = pd.length; n-- > 0; ) {
                pg[round.round(n * step)] += pd[n];
            }
            if (pg[0] != 0) {
                list.add(0);
                list.add(pg);
                pg = list.toArray();
                zero--;
            }
        }
        step = 1.0;
    } else {
        // Add the dirac delta function.
        list.setQuick(0, list.getQuick(0) + dirac);
    }
    return new double[][] { SimpleArrayUtils.newArray(pg.length, zero, step), pg };
}
Also used : PoissonDistribution(uk.ac.sussex.gdsc.smlm.math3.distribution.PoissonDistribution) LogFactorialCache(uk.ac.sussex.gdsc.smlm.function.LogFactorialCache) Plot(ij.gui.Plot) TDoubleArrayList(gnu.trove.list.array.TDoubleArrayList) GammaDistribution(org.apache.commons.math3.distribution.GammaDistribution)

Example 53 with Gaussian

use of org.apache.commons.math3.analysis.function.Gaussian in project GDSC-SMLM by aherbert.

the class Fire method fitGaussian.

/**
 * Fit gaussian.
 *
 * @param x the x
 * @param y the y
 * @return new double[] { norm, mean, sigma }
 */
private static double[] fitGaussian(float[] x, float[] y) {
    final WeightedObservedPoints obs = new WeightedObservedPoints();
    for (int i = 0; i < x.length; i++) {
        obs.add(x[i], y[i]);
    }
    final Collection<WeightedObservedPoint> observations = obs.toList();
    final GaussianCurveFitter fitter = GaussianCurveFitter.create().withMaxIterations(2000);
    final GaussianCurveFitter.ParameterGuesser guess = new GaussianCurveFitter.ParameterGuesser(observations);
    double[] initialGuess = null;
    try {
        initialGuess = guess.guess();
        return fitter.withStartPoint(initialGuess).fit(observations);
    } catch (final Exception ex) {
    // We are expecting TooManyEvaluationsException.
    // Just in case there is another exception type, or the initial estimate failed
    // we catch all exceptions.
    }
    return initialGuess;
}
Also used : WeightedObservedPoints(org.apache.commons.math3.fitting.WeightedObservedPoints) WeightedObservedPoint(org.apache.commons.math3.fitting.WeightedObservedPoint) WeightedObservedPoint(org.apache.commons.math3.fitting.WeightedObservedPoint) GaussianCurveFitter(org.apache.commons.math3.fitting.GaussianCurveFitter) DataException(uk.ac.sussex.gdsc.core.data.DataException) ConversionException(uk.ac.sussex.gdsc.core.data.utils.ConversionException)

Example 54 with Gaussian

use of org.apache.commons.math3.analysis.function.Gaussian in project GDSC-SMLM by aherbert.

the class Fire method runQEstimation.

@SuppressWarnings("null")
private void runQEstimation() {
    IJ.showStatus(pluginTitle + " ...");
    if (!showQEstimationInputDialog()) {
        return;
    }
    MemoryPeakResults inputResults = ResultsManager.loadInputResults(settings.inputOption, false, null, null);
    if (MemoryPeakResults.isEmpty(inputResults)) {
        IJ.error(pluginTitle, "No results could be loaded");
        return;
    }
    if (inputResults.getCalibration() == null) {
        IJ.error(pluginTitle, "The results are not calibrated");
        return;
    }
    inputResults = cropToRoi(inputResults);
    if (inputResults.size() < 2) {
        IJ.error(pluginTitle, "No results within the crop region");
        return;
    }
    initialise(inputResults, null);
    // We need localisation precision.
    // Build a histogram of the localisation precision.
    // Get the initial mean and SD and plot as a Gaussian.
    final PrecisionHistogram histogram = calculatePrecisionHistogram();
    if (histogram == null) {
        IJ.error(pluginTitle, "No localisation precision available.\n \nPlease choose " + PrecisionMethod.FIXED + " and enter a precision mean and SD.");
        return;
    }
    final StoredDataStatistics precision = histogram.precision;
    final double fourierImageScale = Settings.scaleValues[settings.imageScaleIndex];
    final int imageSize = Settings.imageSizeValues[settings.imageSizeIndex];
    // Create the image and compute the numerator of FRC.
    // Do not use the signal so results.size() is the number of localisations.
    IJ.showStatus("Computing FRC curve ...");
    final FireImages images = createImages(fourierImageScale, imageSize, false);
    // DEBUGGING - Save the two images to disk. Load the images into the Matlab
    // code that calculates the Q-estimation and make this plugin match the functionality.
    // IJ.save(new ImagePlus("i1", images.ip1), "/scratch/i1.tif");
    // IJ.save(new ImagePlus("i2", images.ip2), "/scratch/i2.tif");
    final Frc frc = new Frc();
    frc.setTrackProgress(progress);
    frc.setFourierMethod(fourierMethod);
    frc.setSamplingMethod(samplingMethod);
    frc.setPerimeterSamplingFactor(settings.perimeterSamplingFactor);
    final FrcCurve frcCurve = frc.calculateFrcCurve(images.ip1, images.ip2, images.nmPerPixel);
    if (frcCurve == null) {
        IJ.error(pluginTitle, "Failed to compute FRC curve");
        return;
    }
    IJ.showStatus("Running Q-estimation ...");
    // Note:
    // The method implemented here is based on Matlab code provided by Bernd Rieger.
    // The idea is to compute the spurious correlation component of the FRC Numerator
    // using an initial estimate of distribution of the localisation precision (assumed
    // to be Gaussian). This component is the contribution of repeat localisations of
    // the same molecule to the numerator and is modelled as an exponential decay
    // (exp_decay). The component is scaled by the Q-value which
    // is the average number of times a molecule is seen in addition to the first time.
    // At large spatial frequencies the scaled component should match the numerator,
    // i.e. at high resolution (low FIRE number) the numerator is made up of repeat
    // localisations of the same molecule and not actual structure in the image.
    // The best fit is where the numerator equals the scaled component, i.e. num / (q*exp_decay) ==
    // 1.
    // The FRC Numerator is plotted and Q can be determined by
    // adjusting Q and the precision mean and SD to maximise the cost function.
    // This can be done interactively by the user with the effect on the FRC curve
    // dynamically updated and displayed.
    // Compute the scaled FRC numerator
    final double qNorm = (1 / frcCurve.mean1 + 1 / frcCurve.mean2);
    final double[] frcnum = new double[frcCurve.getSize()];
    for (int i = 0; i < frcnum.length; i++) {
        final FrcCurveResult r = frcCurve.get(i);
        frcnum[i] = qNorm * r.getNumerator() / r.getNumberOfSamples();
    }
    // Compute the spatial frequency and the region for curve fitting
    final double[] q = Frc.computeQ(frcCurve, false);
    int low = 0;
    int high = q.length;
    while (high > 0 && q[high - 1] > settings.maxQ) {
        high--;
    }
    while (low < q.length && q[low] < settings.minQ) {
        low++;
    }
    // Require we fit at least 10% of the curve
    if (high - low < q.length * 0.1) {
        IJ.error(pluginTitle, "Not enough points for Q estimation");
        return;
    }
    // Obtain initial estimate of Q plateau height and decay.
    // This can be done by fitting the precision histogram and then fixing the mean and sigma.
    // Or it can be done by allowing the precision to be sampled and the mean and sigma
    // become parameters for fitting.
    // Check if we can sample precision values
    final boolean sampleDecay = precision != null && settings.sampleDecay;
    double[] expDecay;
    if (sampleDecay) {
        // Random sample of precision values from the distribution is used to
        // construct the decay curve
        final int[] sample = RandomUtils.sample(10000, precision.getN(), UniformRandomProviders.create());
        final double four_pi2 = 4 * Math.PI * Math.PI;
        final double[] pre = new double[q.length];
        for (int i = 1; i < q.length; i++) {
            pre[i] = -four_pi2 * q[i] * q[i];
        }
        // Sample
        final int n = sample.length;
        final double[] hq = new double[n];
        for (int j = 0; j < n; j++) {
            // Scale to SR pixels
            double s2 = precision.getValue(sample[j]) / images.nmPerPixel;
            s2 *= s2;
            for (int i = 1; i < q.length; i++) {
                hq[i] += StdMath.exp(pre[i] * s2);
            }
        }
        for (int i = 1; i < q.length; i++) {
            hq[i] /= n;
        }
        expDecay = new double[q.length];
        expDecay[0] = 1;
        for (int i = 1; i < q.length; i++) {
            final double sinc_q = sinc(Math.PI * q[i]);
            expDecay[i] = sinc_q * sinc_q * hq[i];
        }
    } else {
        // Note: The sigma mean and std should be in the units of super-resolution
        // pixels so scale to SR pixels
        expDecay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
    }
    // Smoothing
    double[] smooth;
    if (settings.loessSmoothing) {
        // Note: This computes the log then smooths it
        final double bandwidth = 0.1;
        final int robustness = 0;
        final double[] l = new double[expDecay.length];
        for (int i = 0; i < l.length; i++) {
            // Original Matlab code computes the log for each array.
            // This is equivalent to a single log on the fraction of the two.
            // Perhaps the two log method is more numerically stable.
            // l[i] = Math.log(Math.abs(frcnum[i])) - Math.log(exp_decay[i]);
            l[i] = Math.log(Math.abs(frcnum[i] / expDecay[i]));
        }
        try {
            final LoessInterpolator loess = new LoessInterpolator(bandwidth, robustness);
            smooth = loess.smooth(q, l);
        } catch (final Exception ex) {
            IJ.error(pluginTitle, "LOESS smoothing failed");
            return;
        }
    } else {
        // Note: This smooths the curve before computing the log
        final double[] norm = new double[expDecay.length];
        for (int i = 0; i < norm.length; i++) {
            norm[i] = frcnum[i] / expDecay[i];
        }
        // Median window of 5 == radius of 2
        final DoubleMedianWindow mw = DoubleMedianWindow.wrap(norm, 2);
        smooth = new double[expDecay.length];
        for (int i = 0; i < norm.length; i++) {
            smooth[i] = Math.log(Math.abs(mw.getMedian()));
            mw.increment();
        }
    }
    // Fit with quadratic to find the initial guess.
    // Note: example Matlab code frc_Qcorrection7.m identifies regions of the
    // smoothed log curve with low derivative and only fits those. The fit is
    // used for the final estimate. Fitting a subset with low derivative is not
    // implemented here since the initial estimate is subsequently optimised
    // to maximise a cost function.
    final Quadratic curve = new Quadratic();
    final SimpleCurveFitter fit = SimpleCurveFitter.create(curve, new double[2]);
    final WeightedObservedPoints points = new WeightedObservedPoints();
    for (int i = low; i < high; i++) {
        points.add(q[i], smooth[i]);
    }
    final double[] estimate = fit.fit(points.toList());
    double qvalue = StdMath.exp(estimate[0]);
    // This could be made an option. Just use for debugging
    final boolean debug = false;
    if (debug) {
        // Plot the initial fit and the fit curve
        final double[] qScaled = Frc.computeQ(frcCurve, true);
        final double[] line = new double[q.length];
        for (int i = 0; i < q.length; i++) {
            line[i] = curve.value(q[i], estimate);
        }
        final String title = pluginTitle + " Initial fit";
        final Plot plot = new Plot(title, "Spatial Frequency (nm^-1)", "FRC Numerator");
        final String label = String.format("Q = %.3f", qvalue);
        plot.addPoints(qScaled, smooth, Plot.LINE);
        plot.setColor(Color.red);
        plot.addPoints(qScaled, line, Plot.LINE);
        plot.setColor(Color.black);
        plot.addLabel(0, 0, label);
        ImageJUtils.display(title, plot, ImageJUtils.NO_TO_FRONT);
    }
    if (settings.fitPrecision) {
        // Q - Should this be optional?
        if (sampleDecay) {
            // If a sample of the precision was used to construct the data for the initial fit
            // then update the estimate using the fit result since it will be a better start point.
            histogram.sigma = precision.getStandardDeviation();
            // Normalise sum-of-squares to the SR pixel size
            final double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
            histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
        }
        // Do a multivariate fit ...
        final SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
        PointValuePair pair = null;
        final MultiPlateauness f = new MultiPlateauness(frcnum, q, low, high);
        final double[] initial = new double[] { histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, qvalue };
        pair = findMin(pair, opt, f, scale(initial, 0.1));
        pair = findMin(pair, opt, f, scale(initial, 0.5));
        pair = findMin(pair, opt, f, initial);
        pair = findMin(pair, opt, f, scale(initial, 2));
        pair = findMin(pair, opt, f, scale(initial, 10));
        if (pair != null) {
            final double[] point = pair.getPointRef();
            histogram.mean = point[0] * images.nmPerPixel;
            histogram.sigma = point[1] * images.nmPerPixel;
            qvalue = point[2];
        }
    } else {
        // If so then this should be optional.
        if (sampleDecay) {
            if (precisionMethod != PrecisionMethod.FIXED) {
                histogram.sigma = precision.getStandardDeviation();
                // Normalise sum-of-squares to the SR pixel size
                final double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
                histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
            }
            expDecay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
        }
        // Estimate spurious component by promoting plateauness.
        // The Matlab code used random initial points for a Simplex optimiser.
        // A Brent line search should be pretty deterministic so do simple repeats.
        // However it will proceed downhill so if the initial point is wrong then
        // it will find a sub-optimal result.
        final UnivariateOptimizer o = new BrentOptimizer(1e-3, 1e-6);
        final Plateauness f = new Plateauness(frcnum, expDecay, low, high);
        UnivariatePointValuePair result = null;
        result = findMin(result, o, f, qvalue, 0.1);
        result = findMin(result, o, f, qvalue, 0.2);
        result = findMin(result, o, f, qvalue, 0.333);
        result = findMin(result, o, f, qvalue, 0.5);
        // Do some Simplex repeats as well
        final SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
        result = findMin(result, opt, f, qvalue * 0.1);
        result = findMin(result, opt, f, qvalue * 0.5);
        result = findMin(result, opt, f, qvalue);
        result = findMin(result, opt, f, qvalue * 2);
        result = findMin(result, opt, f, qvalue * 10);
        if (result != null) {
            qvalue = result.getPoint();
        }
    }
    final QPlot qplot = new QPlot(frcCurve, qvalue, low, high);
    // Interactive dialog to estimate Q (blinking events per flourophore) using
    // sliders for the mean and standard deviation of the localisation precision.
    showQEstimationDialog(histogram, qplot, images.nmPerPixel);
    IJ.showStatus(pluginTitle + " complete");
}
Also used : DoubleMedianWindow(uk.ac.sussex.gdsc.core.utils.DoubleMedianWindow) BrentOptimizer(org.apache.commons.math3.optim.univariate.BrentOptimizer) PointValuePair(org.apache.commons.math3.optim.PointValuePair) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair) LoessInterpolator(org.apache.commons.math3.analysis.interpolation.LoessInterpolator) WeightedObservedPoints(org.apache.commons.math3.fitting.WeightedObservedPoints) FrcCurve(uk.ac.sussex.gdsc.smlm.ij.frc.Frc.FrcCurve) SimplexOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer) MemoryPeakResults(uk.ac.sussex.gdsc.smlm.results.MemoryPeakResults) FrcCurveResult(uk.ac.sussex.gdsc.smlm.ij.frc.Frc.FrcCurveResult) SimpleCurveFitter(org.apache.commons.math3.fitting.SimpleCurveFitter) Plot(ij.gui.Plot) HistogramPlot(uk.ac.sussex.gdsc.core.ij.HistogramPlot) StoredDataStatistics(uk.ac.sussex.gdsc.core.utils.StoredDataStatistics) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair) WeightedObservedPoint(org.apache.commons.math3.fitting.WeightedObservedPoint) DataException(uk.ac.sussex.gdsc.core.data.DataException) ConversionException(uk.ac.sussex.gdsc.core.data.utils.ConversionException) Frc(uk.ac.sussex.gdsc.smlm.ij.frc.Frc) UnivariateOptimizer(org.apache.commons.math3.optim.univariate.UnivariateOptimizer)

Example 55 with Gaussian

use of org.apache.commons.math3.analysis.function.Gaussian in project GDSC-SMLM by aherbert.

the class EmGainAnalysis method fit.

/**
 * Fit the EM-gain distribution (Gaussian * Gamma).
 *
 * @param histogram The distribution
 */
private void fit(int[] histogram) {
    final int[] limits = limits(histogram);
    final double[] x = getX(limits);
    final double[] y = getY(histogram, limits);
    Plot plot = new Plot(TITLE, "ADU", "Frequency");
    double yMax = MathUtils.max(y);
    plot.setLimits(limits[0], limits[1], 0, yMax);
    plot.setColor(Color.black);
    plot.addPoints(x, y, Plot.DOT);
    ImageJUtils.display(TITLE, plot);
    // Estimate remaining parameters.
    // Assuming a gamma_distribution(shape,scale) then mean = shape * scale
    // scale = gain
    // shape = Photons = mean / gain
    double mean = getMean(histogram) - settings.bias;
    // Note: if the bias is too high then the mean will be negative. Just move the bias.
    while (mean < 0) {
        settings.bias -= 1;
        mean += 1;
    }
    double photons = mean / settings.gain;
    if (settings.settingSimulate) {
        ImageJUtils.log("Simulated bias=%d, gain=%s, noise=%s, photons=%s", (int) settings.settingBias, MathUtils.rounded(settings.settingGain), MathUtils.rounded(settings.settingNoise), MathUtils.rounded(settings.settingPhotons));
    }
    ImageJUtils.log("Estimate bias=%d, gain=%s, noise=%s, photons=%s", (int) settings.bias, MathUtils.rounded(settings.gain), MathUtils.rounded(settings.noise), MathUtils.rounded(photons));
    final int max = (int) x[x.length - 1];
    double[] pg = pdf(max, photons, settings.gain, settings.noise, (int) settings.bias);
    plot.setColor(Color.blue);
    plot.addPoints(x, pg, Plot.LINE);
    ImageJUtils.display(TITLE, plot);
    // Perform a fit
    final CustomPowellOptimizer o = new CustomPowellOptimizer(1e-6, 1e-16, 1e-6, 1e-16);
    final double[] startPoint = new double[] { photons, settings.gain, settings.noise, settings.bias };
    int maxEval = 3000;
    final String[] paramNames = { "Photons", "Gain", "Noise", "Bias" };
    // Set bounds
    final double[] lower = new double[] { 0, 0.5 * settings.gain, 0, settings.bias - settings.noise };
    final double[] upper = new double[] { 2 * photons, 2 * settings.gain, settings.gain, settings.bias + settings.noise };
    // Restart until converged.
    // TODO - Maybe fix this with a better optimiser. This needs to be tested on real data.
    PointValuePair solution = null;
    for (int iter = 0; iter < 3; iter++) {
        IJ.showStatus("Fitting histogram ... Iteration " + iter);
        try {
            // Basic Powell optimiser
            final MultivariateFunction fun = getFunction(limits, y, max, maxEval);
            final PointValuePair optimum = o.optimize(new MaxEval(maxEval), new ObjectiveFunction(fun), GoalType.MINIMIZE, new InitialGuess((solution == null) ? startPoint : solution.getPointRef()));
            if (solution == null || optimum.getValue() < solution.getValue()) {
                final double[] point = optimum.getPointRef();
                // Check the bounds
                for (int i = 0; i < point.length; i++) {
                    if (point[i] < lower[i] || point[i] > upper[i]) {
                        throw new ComputationException(String.format("Fit out of of estimated range: %s %f", paramNames[i], point[i]));
                    }
                }
                solution = optimum;
            }
        } catch (final Exception ex) {
            IJ.log("Powell error: " + ex.getMessage());
            if (ex instanceof TooManyEvaluationsException) {
                maxEval = (int) (maxEval * 1.5);
            }
        }
        try {
            // Bounded Powell optimiser
            final MultivariateFunction fun = getFunction(limits, y, max, maxEval);
            final MultivariateFunctionMappingAdapter adapter = new MultivariateFunctionMappingAdapter(fun, lower, upper);
            PointValuePair optimum = o.optimize(new MaxEval(maxEval), new ObjectiveFunction(adapter), GoalType.MINIMIZE, new InitialGuess(adapter.boundedToUnbounded((solution == null) ? startPoint : solution.getPointRef())));
            final double[] point = adapter.unboundedToBounded(optimum.getPointRef());
            optimum = new PointValuePair(point, optimum.getValue());
            if (solution == null || optimum.getValue() < solution.getValue()) {
                solution = optimum;
            }
        } catch (final Exception ex) {
            IJ.log("Bounded Powell error: " + ex.getMessage());
            if (ex instanceof TooManyEvaluationsException) {
                maxEval = (int) (maxEval * 1.5);
            }
        }
    }
    ImageJUtils.finished();
    if (solution == null) {
        ImageJUtils.log("Failed to fit the distribution");
        return;
    }
    final double[] point = solution.getPointRef();
    photons = point[0];
    settings.gain = point[1];
    settings.noise = point[2];
    settings.bias = (int) Math.round(point[3]);
    final String label = String.format("Fitted bias=%d, gain=%s, noise=%s, photons=%s", (int) settings.bias, MathUtils.rounded(settings.gain), MathUtils.rounded(settings.noise), MathUtils.rounded(photons));
    ImageJUtils.log(label);
    if (settings.settingSimulate) {
        ImageJUtils.log("Relative Error bias=%s, gain=%s, noise=%s, photons=%s", MathUtils.rounded(relativeError(settings.bias, settings.settingBias)), MathUtils.rounded(relativeError(settings.gain, settings.settingGain)), MathUtils.rounded(relativeError(settings.noise, settings.settingNoise)), MathUtils.rounded(relativeError(photons, settings.settingPhotons)));
    }
    // Show the PoissonGammaGaussian approximation
    double[] approxValues = null;
    if (settings.showApproximation) {
        approxValues = new double[x.length];
        final PoissonGammaGaussianFunction fun = new PoissonGammaGaussianFunction(1.0 / settings.gain, settings.noise);
        final double expected = photons * settings.gain;
        for (int i = 0; i < approxValues.length; i++) {
            approxValues[i] = fun.likelihood(x[i] - settings.bias, expected);
        }
        yMax = MathUtils.maxDefault(yMax, approxValues);
    }
    // Replot
    pg = pdf(max, photons, settings.gain, settings.noise, (int) settings.bias);
    plot = new Plot(TITLE, "ADU", "Frequency");
    plot.setLimits(limits[0], limits[1], 0, yMax * 1.05);
    plot.setColor(Color.black);
    plot.addPoints(x, y, Plot.DOT);
    plot.setColor(Color.red);
    plot.addPoints(x, pg, Plot.LINE);
    plot.addLabel(0, 0, label);
    if (settings.showApproximation) {
        plot.setColor(Color.blue);
        plot.addPoints(x, approxValues, Plot.LINE);
    }
    ImageJUtils.display(TITLE, plot);
}
Also used : MaxEval(org.apache.commons.math3.optim.MaxEval) InitialGuess(org.apache.commons.math3.optim.InitialGuess) Plot(ij.gui.Plot) ObjectiveFunction(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction) Point(java.awt.Point) ComputationException(uk.ac.sussex.gdsc.core.data.ComputationException) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) PointValuePair(org.apache.commons.math3.optim.PointValuePair) MultivariateFunction(org.apache.commons.math3.analysis.MultivariateFunction) MultivariateFunctionMappingAdapter(org.apache.commons.math3.optim.nonlinear.scalar.MultivariateFunctionMappingAdapter) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) ComputationException(uk.ac.sussex.gdsc.core.data.ComputationException) CustomPowellOptimizer(uk.ac.sussex.gdsc.smlm.math3.optim.nonlinear.scalar.noderiv.CustomPowellOptimizer) PoissonGammaGaussianFunction(uk.ac.sussex.gdsc.smlm.function.PoissonGammaGaussianFunction)

Aggregations

ArrayList (java.util.ArrayList)14 WeightedObservedPoint (org.apache.commons.math3.fitting.WeightedObservedPoint)12 DescriptiveStatistics (org.apache.commons.math3.stat.descriptive.DescriptiveStatistics)10 TDoubleArrayList (gnu.trove.list.array.TDoubleArrayList)8 Plot (ij.gui.Plot)8 List (java.util.List)8 TooManyEvaluationsException (org.apache.commons.math3.exception.TooManyEvaluationsException)8 PointValuePair (org.apache.commons.math3.optim.PointValuePair)8 GaussianRandomGenerator (org.apache.commons.math3.random.GaussianRandomGenerator)8 MersenneTwister (org.apache.commons.math3.random.MersenneTwister)8 RandomGenerator (org.apache.commons.math3.random.RandomGenerator)8 Test (org.junit.jupiter.api.Test)8 ImagePlus (ij.ImagePlus)6 DataException (uk.ac.sussex.gdsc.core.data.DataException)6 ExtendedGenericDialog (uk.ac.sussex.gdsc.core.ij.gui.ExtendedGenericDialog)6 IJ (ij.IJ)5 WindowManager (ij.WindowManager)5 GenericDialog (ij.gui.GenericDialog)5 PlugIn (ij.plugin.PlugIn)5 MaxEval (org.apache.commons.math3.optim.MaxEval)5