Search in sources :

Example 16 with Fraction

use of org.apache.commons.math3.fraction.Fraction in project GDSC-SMLM by aherbert.

the class ImagePSFModel method sample.

private double[][] sample(final int n, double x0, double x1, double x2) {
    final int slice = getSlice(x2);
    if (slice < 0 || slice >= sumImage.length)
        return new double[][] { null, null };
    final double[] sumPsf = cumulativeImage[slice];
    final RandomGenerator randomX, randomY;
    // Use the input generator
    randomX = rand.getRandomGenerator();
    randomY = rand.getRandomGenerator();
    //// Debugging - Use a uniform distribution to sample x
    //randomX = new AbstractRandomGenerator()
    //{
    //	int pos = 0;
    //
    //	@Override
    //	public double nextDouble()
    //	{
    //		double p = (double) pos / n;
    //		if (pos++ >= n)
    //			pos = 0;
    //		return p;
    //	}
    //
    //	@Override
    //	public void setSeed(long seed)
    //	{
    //		pos = Math.abs((int) seed) % n;
    //	}
    //};
    //// Debugging - Use a fixed distribution to sample y
    //randomY = new AbstractRandomGenerator()
    //{
    //	public double nextDouble()
    //	{
    //		return 0.5;
    //	}
    //
    //	@Override
    //	public void setSeed(long seed)
    //	{
    //	}
    //};
    // Ensure the generated index is adjusted to the correct position
    // The index will be generated at 0,0 of a pixel in the PSF image.
    // We must subtract the PSF centre so that the middle coords are zero.
    x0 -= xyCentre[slice][0] * unitsPerPixel;
    x1 -= xyCentre[slice][1] * unitsPerPixel;
    //x0 -= 0.5 * psfWidth * unitsPerPixel;
    //x1 -= 0.5 * psfWidth * unitsPerPixel;
    final double max = sumPsf[sumPsf.length - 1];
    double[] x = new double[n];
    double[] y = new double[n];
    int count = 0;
    double sx = 0, sy = 0, s = 0;
    for (int i = 0; i < n; i++) {
        final double p = randomX.nextDouble();
        // If outside the observed PSF then skip 
        if (p > max)
            continue;
        final int index = findIndex(sumPsf, p);
        // Interpolate xi using the fraction of the pixel
        double xi = index % psfWidth;
        xi += (p - sumPsf[index]) / (sumPsf[index + 1] - sumPsf[index]);
        // Add random dither within pixel for y
        final double yi = randomY.nextDouble() + (index / psfWidth);
        if (COM_CHECK) {
            final double v = 1;
            sx += xi * v;
            sy += yi * v;
            s += v;
        }
        x[count] = x0 + (xi * this.unitsPerPixel);
        y[count] = x1 + (yi * this.unitsPerPixel);
        count++;
    }
    if (COM_CHECK) {
        sx = sx / s - xyCentre[slice][0];
        sy = sy / s - xyCentre[slice][1];
        System.out.printf("%dx%d sample centre [ %f %f ] ( %f %f )\n", psfWidth, psfWidth, sx, sy, sx / unitsPerPixel, sy / unitsPerPixel);
    }
    x = Arrays.copyOf(x, count);
    y = Arrays.copyOf(y, count);
    return new double[][] { x, y };
}
Also used : RandomGenerator(org.apache.commons.math3.random.RandomGenerator)

Example 17 with Fraction

use of org.apache.commons.math3.fraction.Fraction in project GDSC-SMLM by aherbert.

the class SCMOSLikelihoodWrapperTest method functionComputesTargetGradient.

private void functionComputesTargetGradient(Gaussian2DFunction f1, int targetParameter, double threshold) {
    int[] indices = f1.gradientIndices();
    int gradientIndex = findGradientIndex(f1, targetParameter);
    double[] dyda = new double[indices.length];
    double[] a;
    SCMOSLikelihoodWrapper ff1;
    int n = maxx * maxx;
    int count = 0, total = 0;
    RandomDataGenerator rdg = new RandomDataGenerator(new Well19937c(30051977));
    for (double background : testbackground) for (double signal1 : testsignal1) for (double angle1 : testangle1) for (double cx1 : testcx1) for (double cy1 : testcy1) for (double[] w1 : testw1) {
        a = createParameters(background, signal1, angle1, cx1, cy1, w1[0], w1[1]);
        // Create y as a function we would want to move towards
        double[] a2 = a.clone();
        a2[targetParameter] *= 1.3;
        f1.initialise(a2);
        double[] data = new double[n];
        for (int i = 0; i < n; i++) {
            // Simulate sCMOS camera
            double u = f1.eval(i);
            data[i] = rdg.nextPoisson(u) * g[i] + rdg.nextGaussian(o[i], sd[i]);
        }
        ff1 = new SCMOSLikelihoodWrapper(f1, a, data, n, var, g, o);
        // Numerically solve gradient. 
        // Calculate the step size h to be an exact numerical representation
        final double xx = a[targetParameter];
        // Get h to minimise roundoff error
        double h = Precision.representableDelta(xx, h_);
        ff1.likelihood(getVariables(indices, a), dyda);
        // Evaluate at (x+h) and (x-h)
        a[targetParameter] = xx + h;
        double value2 = ff1.likelihood(getVariables(indices, a));
        a[targetParameter] = xx - h;
        double value3 = ff1.likelihood(getVariables(indices, a));
        double gradient = (value2 - value3) / (2 * h);
        boolean ok = Math.signum(gradient) == Math.signum(dyda[gradientIndex]) || Math.abs(gradient - dyda[gradientIndex]) < 0.1;
        //		dyda[gradientIndex]);
        if (!ok)
            Assert.assertTrue(NAME[targetParameter] + ": " + gradient + " != " + dyda[gradientIndex], ok);
        ok = eq.almostEqualRelativeOrAbsolute(gradient, dyda[gradientIndex]);
        if (ok)
            count++;
        total++;
    }
    double p = (100.0 * count) / total;
    logf("%s : %s = %d / %d (%.2f)\n", f1.getClass().getSimpleName(), NAME[targetParameter], count, total, p);
    Assert.assertTrue(NAME[targetParameter] + " fraction too low: " + p, p > threshold);
}
Also used : RandomDataGenerator(org.apache.commons.math3.random.RandomDataGenerator) Well19937c(org.apache.commons.math3.random.Well19937c)

Example 18 with Fraction

use of org.apache.commons.math3.fraction.Fraction in project GDSC-SMLM by aherbert.

the class SimpleRecombiner method recombine.

private ChromosomePair<T> recombine(Chromosome<T> chromosome1, Chromosome<T> chromosome2, double[] s1, double[] s2) {
    int nCrossovers = 1;
    if (fraction > 0)
        nCrossovers = Math.max(1, (int) random.nextPoisson(fraction * chromosome1.length()));
    // Randomly select positions using a partial Fischer-Yates shuffle
    int[] positions = new int[s1.length];
    for (int i = 0; i < positions.length; i++) positions[i] = i;
    RandomGenerator ran = random.getRandomGenerator();
    for (int i = positions.length, n = nCrossovers; i-- > 1 && n-- > 0; ) {
        int j = (int) (ran.nextInt(i + 1));
        int tmp = positions[i];
        positions[i] = positions[j];
        positions[j] = tmp;
    }
    // Reverse the array because the end is random
    Sort.reverse(positions);
    positions = Arrays.copyOf(positions, nCrossovers);
    // Get the positions in order
    if (nCrossovers != 1)
        Arrays.sort(positions);
    int nextSwap = 0;
    // Create the children by copying the parent, swapping at each crossover position
    double[] n1 = new double[s1.length];
    double[] n2 = new double[n1.length];
    for (int i = 0; i < n1.length; i++) {
        if (positions[nextSwap] == i) {
            double[] tmp = s1;
            s1 = s2;
            s2 = tmp;
            nextSwap++;
            // Avoid index out of bounds
            if (nextSwap == nCrossovers)
                nextSwap--;
        }
        n1[i] = s1[i];
        n2[i] = s2[i];
    }
    // Create the new chromosome using the correct parent, i.e.
    // If the first swap position was at the start then reverse them.
    Chromosome<T> c1 = (positions[0] == 0) ? chromosome2 : chromosome1;
    Chromosome<T> c2 = (positions[0] == 0) ? chromosome1 : chromosome2;
    c1 = c1.newChromosome(n1);
    c2 = c2.newChromosome(n2);
    // Ensure the child order is random
    return (ran.nextDouble() < 0.5) ? new ChromosomePair<T>(c1, c2) : new ChromosomePair<T>(c2, c1);
}
Also used : RandomGenerator(org.apache.commons.math3.random.RandomGenerator)

Example 19 with Fraction

use of org.apache.commons.math3.fraction.Fraction in project GDSC-SMLM by aherbert.

the class FIRE method runQEstimation.

private void runQEstimation() {
    IJ.showStatus(TITLE + " ...");
    if (!showQEstimationInputDialog())
        return;
    MemoryPeakResults results = ResultsManager.loadInputResults(inputOption, false);
    if (results == null || results.size() == 0) {
        IJ.error(TITLE, "No results could be loaded");
        return;
    }
    if (results.getCalibration() == null) {
        IJ.error(TITLE, "The results are not calibrated");
        return;
    }
    results = cropToRoi(results);
    if (results.size() < 2) {
        IJ.error(TITLE, "No results within the crop region");
        return;
    }
    initialise(results, null);
    // We need localisation precision.
    // Build a histogram of the localisation precision.
    // Get the initial mean and SD and plot as a Gaussian.
    PrecisionHistogram histogram = calculatePrecisionHistogram();
    if (histogram == null) {
        IJ.error(TITLE, "No localisation precision available.\n \nPlease choose " + PrecisionMethod.FIXED + " and enter a precision mean and SD.");
        return;
    }
    StoredDataStatistics precision = histogram.precision;
    //String name = results.getName();
    double fourierImageScale = SCALE_VALUES[imageScaleIndex];
    int imageSize = IMAGE_SIZE_VALUES[imageSizeIndex];
    // Create the image and compute the numerator of FRC. 
    // Do not use the signal so results.size() is the number of localisations.
    IJ.showStatus("Computing FRC curve ...");
    FireImages images = createImages(fourierImageScale, imageSize, false);
    // DEBUGGING - Save the two images to disk. Load the images into the Matlab 
    // code that calculates the Q-estimation and make this plugin match the functionality.
    //IJ.save(new ImagePlus("i1", images.ip1), "/scratch/i1.tif");
    //IJ.save(new ImagePlus("i2", images.ip2), "/scratch/i2.tif");
    FRC frc = new FRC();
    frc.progress = progress;
    frc.setFourierMethod(fourierMethod);
    frc.setSamplingMethod(samplingMethod);
    frc.setPerimeterSamplingFactor(perimeterSamplingFactor);
    FRCCurve frcCurve = frc.calculateFrcCurve(images.ip1, images.ip2, images.nmPerPixel);
    if (frcCurve == null) {
        IJ.error(TITLE, "Failed to compute FRC curve");
        return;
    }
    IJ.showStatus("Running Q-estimation ...");
    // Note:
    // The method implemented here is based on Matlab code provided by Bernd Rieger.
    // The idea is to compute the spurious correlation component of the FRC Numerator
    // using an initial estimate of distribution of the localisation precision (assumed 
    // to be Gaussian). This component is the contribution of repeat localisations of 
    // the same molecule to the numerator and is modelled as an exponential decay
    // (exp_decay). The component is scaled by the Q-value which
    // is the average number of times a molecule is seen in addition to the first time.
    // At large spatial frequencies the scaled component should match the numerator,
    // i.e. at high resolution (low FIRE number) the numerator is made up of repeat 
    // localisations of the same molecule and not actual structure in the image.
    // The best fit is where the numerator equals the scaled component, i.e. num / (q*exp_decay) == 1.
    // The FRC Numerator is plotted and Q can be determined by
    // adjusting Q and the precision mean and SD to maximise the cost function.
    // This can be done interactively by the user with the effect on the FRC curve
    // dynamically updated and displayed.
    // Compute the scaled FRC numerator
    double qNorm = (1 / frcCurve.mean1 + 1 / frcCurve.mean2);
    double[] frcnum = new double[frcCurve.getSize()];
    for (int i = 0; i < frcnum.length; i++) {
        FRCCurveResult r = frcCurve.get(i);
        frcnum[i] = qNorm * r.getNumerator() / r.getNumberOfSamples();
    }
    // Compute the spatial frequency and the region for curve fitting
    double[] q = FRC.computeQ(frcCurve, false);
    int low = 0, high = q.length;
    while (high > 0 && q[high - 1] > maxQ) high--;
    while (low < q.length && q[low] < minQ) low++;
    // Require we fit at least 10% of the curve
    if (high - low < q.length * 0.1) {
        IJ.error(TITLE, "Not enough points for Q estimation");
        return;
    }
    // Obtain initial estimate of Q plateau height and decay.
    // This can be done by fitting the precision histogram and then fixing the mean and sigma.
    // Or it can be done by allowing the precision to be sampled and the mean and sigma
    // become parameters for fitting.
    // Check if we can sample precision values
    boolean sampleDecay = precision != null && FIRE.sampleDecay;
    double[] exp_decay;
    if (sampleDecay) {
        // Random sample of precision values from the distribution is used to 
        // construct the decay curve
        int[] sample = Random.sample(10000, precision.getN(), new Well19937c());
        final double four_pi2 = 4 * Math.PI * Math.PI;
        double[] pre = new double[q.length];
        for (int i = 1; i < q.length; i++) pre[i] = -four_pi2 * q[i] * q[i];
        // Sample
        final int n = sample.length;
        double[] hq = new double[n];
        for (int j = 0; j < n; j++) {
            // Scale to SR pixels
            double s2 = precision.getValue(sample[j]) / images.nmPerPixel;
            s2 *= s2;
            for (int i = 1; i < q.length; i++) hq[i] += FastMath.exp(pre[i] * s2);
        }
        for (int i = 1; i < q.length; i++) hq[i] /= n;
        exp_decay = new double[q.length];
        exp_decay[0] = 1;
        for (int i = 1; i < q.length; i++) {
            double sinc_q = sinc(Math.PI * q[i]);
            exp_decay[i] = sinc_q * sinc_q * hq[i];
        }
    } else {
        // Note: The sigma mean and std should be in the units of super-resolution 
        // pixels so scale to SR pixels
        exp_decay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
    }
    // Smoothing
    double[] smooth;
    if (loessSmoothing) {
        // Note: This computes the log then smooths it 
        double bandwidth = 0.1;
        int robustness = 0;
        double[] l = new double[exp_decay.length];
        for (int i = 0; i < l.length; i++) {
            // Original Matlab code computes the log for each array.
            // This is equivalent to a single log on the fraction of the two.
            // Perhaps the two log method is more numerically stable.
            //l[i] = Math.log(Math.abs(frcnum[i])) - Math.log(exp_decay[i]);
            l[i] = Math.log(Math.abs(frcnum[i] / exp_decay[i]));
        }
        try {
            LoessInterpolator loess = new LoessInterpolator(bandwidth, robustness);
            smooth = loess.smooth(q, l);
        } catch (Exception e) {
            IJ.error(TITLE, "LOESS smoothing failed");
            return;
        }
    } else {
        // Note: This smooths the curve before computing the log 
        double[] norm = new double[exp_decay.length];
        for (int i = 0; i < norm.length; i++) {
            norm[i] = frcnum[i] / exp_decay[i];
        }
        // Median window of 5 == radius of 2
        MedianWindow mw = new MedianWindow(norm, 2);
        smooth = new double[exp_decay.length];
        for (int i = 0; i < norm.length; i++) {
            smooth[i] = Math.log(Math.abs(mw.getMedian()));
            mw.increment();
        }
    }
    // Fit with quadratic to find the initial guess.
    // Note: example Matlab code frc_Qcorrection7.m identifies regions of the 
    // smoothed log curve with low derivative and only fits those. The fit is 
    // used for the final estimate. Fitting a subset with low derivative is not 
    // implemented here since the initial estimate is subsequently optimised 
    // to maximise a cost function. 
    Quadratic curve = new Quadratic();
    SimpleCurveFitter fit = SimpleCurveFitter.create(curve, new double[2]);
    WeightedObservedPoints points = new WeightedObservedPoints();
    for (int i = low; i < high; i++) points.add(q[i], smooth[i]);
    double[] estimate = fit.fit(points.toList());
    double qValue = FastMath.exp(estimate[0]);
    //System.out.printf("Initial q-estimate = %s => %.3f\n", Arrays.toString(estimate), qValue);
    // This could be made an option. Just use for debugging
    boolean debug = false;
    if (debug) {
        // Plot the initial fit and the fit curve
        double[] qScaled = FRC.computeQ(frcCurve, true);
        double[] line = new double[q.length];
        for (int i = 0; i < q.length; i++) line[i] = curve.value(q[i], estimate);
        String title = TITLE + " Initial fit";
        Plot2 plot = new Plot2(title, "Spatial Frequency (nm^-1)", "FRC Numerator");
        String label = String.format("Q = %.3f", qValue);
        plot.addPoints(qScaled, smooth, Plot.LINE);
        plot.setColor(Color.red);
        plot.addPoints(qScaled, line, Plot.LINE);
        plot.setColor(Color.black);
        plot.addLabel(0, 0, label);
        Utils.display(title, plot, Utils.NO_TO_FRONT);
    }
    if (fitPrecision) {
        // Q - Should this be optional?
        if (sampleDecay) {
            // If a sample of the precision was used to construct the data for the initial fit 
            // then update the estimate using the fit result since it will be a better start point. 
            histogram.sigma = precision.getStandardDeviation();
            // Normalise sum-of-squares to the SR pixel size
            double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
            histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
        }
        // Do a multivariate fit ...
        SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
        PointValuePair p = null;
        MultiPlateauness f = new MultiPlateauness(frcnum, q, low, high);
        double[] initial = new double[] { histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, qValue };
        p = findMin(p, opt, f, scale(initial, 0.1));
        p = findMin(p, opt, f, scale(initial, 0.5));
        p = findMin(p, opt, f, initial);
        p = findMin(p, opt, f, scale(initial, 2));
        p = findMin(p, opt, f, scale(initial, 10));
        if (p != null) {
            double[] point = p.getPointRef();
            histogram.mean = point[0] * images.nmPerPixel;
            histogram.sigma = point[1] * images.nmPerPixel;
            qValue = point[2];
        }
    } else {
        // If so then this should be optional.
        if (sampleDecay) {
            if (precisionMethod != PrecisionMethod.FIXED) {
                histogram.sigma = precision.getStandardDeviation();
                // Normalise sum-of-squares to the SR pixel size
                double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
                histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
            }
            exp_decay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
        }
        // Estimate spurious component by promoting plateauness.
        // The Matlab code used random initial points for a Simplex optimiser.
        // A Brent line search should be pretty deterministic so do simple repeats.
        // However it will proceed downhill so if the initial point is wrong then 
        // it will find a sub-optimal result.
        UnivariateOptimizer o = new BrentOptimizer(1e-3, 1e-6);
        Plateauness f = new Plateauness(frcnum, exp_decay, low, high);
        UnivariatePointValuePair p = null;
        p = findMin(p, o, f, qValue, 0.1);
        p = findMin(p, o, f, qValue, 0.2);
        p = findMin(p, o, f, qValue, 0.333);
        p = findMin(p, o, f, qValue, 0.5);
        // Do some Simplex repeats as well
        SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
        p = findMin(p, opt, f, qValue * 0.1);
        p = findMin(p, opt, f, qValue * 0.5);
        p = findMin(p, opt, f, qValue);
        p = findMin(p, opt, f, qValue * 2);
        p = findMin(p, opt, f, qValue * 10);
        if (p != null)
            qValue = p.getPoint();
    }
    QPlot qplot = new QPlot(frcCurve, qValue, low, high);
    // Interactive dialog to estimate Q (blinking events per flourophore) using 
    // sliders for the mean and standard deviation of the localisation precision.
    showQEstimationDialog(histogram, qplot, frcCurve, images.nmPerPixel);
    IJ.showStatus(TITLE + " complete");
}
Also used : BrentOptimizer(org.apache.commons.math3.optim.univariate.BrentOptimizer) Plot2(ij.gui.Plot2) Well19937c(org.apache.commons.math3.random.Well19937c) PointValuePair(org.apache.commons.math3.optim.PointValuePair) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair) LoessInterpolator(org.apache.commons.math3.analysis.interpolation.LoessInterpolator) WeightedObservedPoints(org.apache.commons.math3.fitting.WeightedObservedPoints) SimplexOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer) MemoryPeakResults(gdsc.smlm.results.MemoryPeakResults) MedianWindow(gdsc.core.utils.MedianWindow) SimpleCurveFitter(org.apache.commons.math3.fitting.SimpleCurveFitter) FRCCurveResult(gdsc.smlm.ij.frc.FRC.FRCCurveResult) StoredDataStatistics(gdsc.core.utils.StoredDataStatistics) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair) WeightedObservedPoint(org.apache.commons.math3.fitting.WeightedObservedPoint) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) FRCCurve(gdsc.smlm.ij.frc.FRC.FRCCurve) FRC(gdsc.smlm.ij.frc.FRC) UnivariateOptimizer(org.apache.commons.math3.optim.univariate.UnivariateOptimizer)

Example 20 with Fraction

use of org.apache.commons.math3.fraction.Fraction in project GDSC-SMLM by aherbert.

the class BenchmarkSpotFit method summariseResults.

private void summariseResults(TIntObjectHashMap<FilterCandidates> filterCandidates, long runTime, final PreprocessedPeakResult[] preprocessedPeakResults, int nUniqueIDs) {
    createTable();
    // Summarise the fitting results. N fits, N failures. 
    // Optimal match statistics if filtering is perfect (since fitting is not perfect).
    StoredDataStatistics distanceStats = new StoredDataStatistics();
    StoredDataStatistics depthStats = new StoredDataStatistics();
    // Get stats for all fitted results and those that match 
    // Signal, SNR, Width, xShift, yShift, Precision
    createFilterCriteria();
    StoredDataStatistics[][] stats = new StoredDataStatistics[3][filterCriteria.length];
    for (int i = 0; i < stats.length; i++) for (int j = 0; j < stats[i].length; j++) stats[i][j] = new StoredDataStatistics();
    final double nmPerPixel = simulationParameters.a;
    double tp = 0, fp = 0;
    int failcTP = 0, failcFP = 0;
    int cTP = 0, cFP = 0;
    int[] singleStatus = null, multiStatus = null, doubletStatus = null, multiDoubletStatus = null;
    singleStatus = new int[FitStatus.values().length];
    multiStatus = new int[singleStatus.length];
    doubletStatus = new int[singleStatus.length];
    multiDoubletStatus = new int[singleStatus.length];
    // Easier to materialise the values since we have a lot of non final variables to manipulate
    final int[] frames = new int[filterCandidates.size()];
    final FilterCandidates[] candidates = new FilterCandidates[filterCandidates.size()];
    final int[] counter = new int[1];
    filterCandidates.forEachEntry(new TIntObjectProcedure<FilterCandidates>() {

        public boolean execute(int a, FilterCandidates b) {
            frames[counter[0]] = a;
            candidates[counter[0]] = b;
            counter[0]++;
            return true;
        }
    });
    for (FilterCandidates result : candidates) {
        // Count the number of fit results that matched (tp) and did not match (fp)
        tp += result.tp;
        fp += result.fp;
        for (int i = 0; i < result.fitResult.length; i++) {
            if (result.spots[i].match)
                cTP++;
            else
                cFP++;
            final MultiPathFitResult fitResult = result.fitResult[i];
            if (singleStatus != null && result.spots[i].match) {
                // Debugging reasons for fit failure
                addStatus(singleStatus, fitResult.getSingleFitResult());
                addStatus(multiStatus, fitResult.getMultiFitResult());
                addStatus(doubletStatus, fitResult.getDoubletFitResult());
                addStatus(multiDoubletStatus, fitResult.getMultiDoubletFitResult());
            }
            if (noMatch(fitResult)) {
                if (result.spots[i].match)
                    failcTP++;
                else
                    failcFP++;
            }
            // We have multi-path results.
            // We want statistics for:
            // [0] all fitted spots
            // [1] fitted spots that match a result
            // [2] fitted spots that do not match a result
            addToStats(fitResult.getSingleFitResult(), stats);
            addToStats(fitResult.getMultiFitResult(), stats);
            addToStats(fitResult.getDoubletFitResult(), stats);
            addToStats(fitResult.getMultiDoubletFitResult(), stats);
        }
        // Statistics on spots that fit an actual result
        for (int i = 0; i < result.match.length; i++) {
            if (!result.match[i].isFitResult())
                // For now just ignore the candidates that matched
                continue;
            FitMatch fitMatch = (FitMatch) result.match[i];
            distanceStats.add(fitMatch.d * nmPerPixel);
            depthStats.add(fitMatch.z * nmPerPixel);
        }
    }
    // Store data for computing correlation
    double[] i1 = new double[depthStats.getN()];
    double[] i2 = new double[i1.length];
    double[] is = new double[i1.length];
    int ci = 0;
    for (FilterCandidates result : candidates) {
        for (int i = 0; i < result.match.length; i++) {
            if (!result.match[i].isFitResult())
                // For now just ignore the candidates that matched
                continue;
            FitMatch fitMatch = (FitMatch) result.match[i];
            ScoredSpot spot = result.spots[fitMatch.i];
            i1[ci] = fitMatch.predictedSignal;
            i2[ci] = fitMatch.actualSignal;
            is[ci] = spot.spot.intensity;
            ci++;
        }
    }
    // We want to compute the Jaccard against the spot metric
    // Filter the results using the multi-path filter
    ArrayList<MultiPathFitResults> multiPathResults = new ArrayList<MultiPathFitResults>(filterCandidates.size());
    for (int i = 0; i < frames.length; i++) {
        int frame = frames[i];
        MultiPathFitResult[] multiPathFitResults = candidates[i].fitResult;
        int totalCandidates = candidates[i].spots.length;
        int nActual = actualCoordinates.get(frame).size();
        multiPathResults.add(new MultiPathFitResults(frame, multiPathFitResults, totalCandidates, nActual));
    }
    // Score the results and count the number returned
    List<FractionalAssignment[]> assignments = new ArrayList<FractionalAssignment[]>();
    final TIntHashSet set = new TIntHashSet(nUniqueIDs);
    FractionScoreStore scoreStore = new FractionScoreStore() {

        public void add(int uniqueId) {
            set.add(uniqueId);
        }
    };
    MultiPathFitResults[] multiResults = multiPathResults.toArray(new MultiPathFitResults[multiPathResults.size()]);
    // Filter with no filter
    MultiPathFilter mpf = new MultiPathFilter(new SignalFilter(0), null, multiFilter.residualsThreshold);
    FractionClassificationResult fractionResult = mpf.fractionScoreSubset(multiResults, Integer.MAX_VALUE, this.results.size(), assignments, scoreStore, CoordinateStoreFactory.create(imp.getWidth(), imp.getHeight(), fitConfig.getDuplicateDistance()));
    double nPredicted = fractionResult.getTP() + fractionResult.getFP();
    final double[][] matchScores = new double[set.size()][];
    int count = 0;
    for (int i = 0; i < assignments.size(); i++) {
        FractionalAssignment[] a = assignments.get(i);
        if (a == null)
            continue;
        for (int j = 0; j < a.length; j++) {
            final PreprocessedPeakResult r = ((PeakFractionalAssignment) a[j]).peakResult;
            set.remove(r.getUniqueId());
            final double precision = Math.sqrt(r.getLocationVariance());
            final double signal = r.getSignal();
            final double snr = r.getSNR();
            final double width = r.getXSDFactor();
            final double xShift = r.getXRelativeShift2();
            final double yShift = r.getYRelativeShift2();
            // Since these two are combined for filtering and the max is what matters.
            final double shift = (xShift > yShift) ? Math.sqrt(xShift) : Math.sqrt(yShift);
            final double eshift = Math.sqrt(xShift + yShift);
            final double[] score = new double[8];
            score[FILTER_SIGNAL] = signal;
            score[FILTER_SNR] = snr;
            score[FILTER_MIN_WIDTH] = width;
            score[FILTER_MAX_WIDTH] = width;
            score[FILTER_SHIFT] = shift;
            score[FILTER_ESHIFT] = eshift;
            score[FILTER_PRECISION] = precision;
            score[FILTER_PRECISION + 1] = a[j].getScore();
            matchScores[count++] = score;
        }
    }
    // Add the rest
    set.forEach(new CustomTIntProcedure(count) {

        public boolean execute(int uniqueId) {
            // This should not be null or something has gone wrong
            PreprocessedPeakResult r = preprocessedPeakResults[uniqueId];
            if (r == null)
                throw new RuntimeException("Missing result: " + uniqueId);
            final double precision = Math.sqrt(r.getLocationVariance());
            final double signal = r.getSignal();
            final double snr = r.getSNR();
            final double width = r.getXSDFactor();
            final double xShift = r.getXRelativeShift2();
            final double yShift = r.getYRelativeShift2();
            // Since these two are combined for filtering and the max is what matters.
            final double shift = (xShift > yShift) ? Math.sqrt(xShift) : Math.sqrt(yShift);
            final double eshift = Math.sqrt(xShift + yShift);
            final double[] score = new double[8];
            score[FILTER_SIGNAL] = signal;
            score[FILTER_SNR] = snr;
            score[FILTER_MIN_WIDTH] = width;
            score[FILTER_MAX_WIDTH] = width;
            score[FILTER_SHIFT] = shift;
            score[FILTER_ESHIFT] = eshift;
            score[FILTER_PRECISION] = precision;
            matchScores[c++] = score;
            return true;
        }
    });
    // Debug the reasons the fit failed
    if (singleStatus != null) {
        String name = PeakFit.getSolverName(fitConfig);
        if (fitConfig.getFitSolver() == FitSolver.MLE && fitConfig.isModelCamera())
            name += " Camera";
        System.out.println("Failure counts: " + name);
        printFailures("Single", singleStatus);
        printFailures("Multi", multiStatus);
        printFailures("Doublet", doubletStatus);
        printFailures("Multi doublet", multiDoubletStatus);
    }
    StringBuilder sb = new StringBuilder(300);
    // Add information about the simulation
    //(simulationParameters.minSignal + simulationParameters.maxSignal) * 0.5;
    final double signal = simulationParameters.signalPerFrame;
    final int n = results.size();
    sb.append(imp.getStackSize()).append("\t");
    final int w = imp.getWidth();
    final int h = imp.getHeight();
    sb.append(w).append("\t");
    sb.append(h).append("\t");
    sb.append(n).append("\t");
    double density = ((double) n / imp.getStackSize()) / (w * h) / (simulationParameters.a * simulationParameters.a / 1e6);
    sb.append(Utils.rounded(density)).append("\t");
    sb.append(Utils.rounded(signal)).append("\t");
    sb.append(Utils.rounded(simulationParameters.s)).append("\t");
    sb.append(Utils.rounded(simulationParameters.a)).append("\t");
    sb.append(Utils.rounded(simulationParameters.depth)).append("\t");
    sb.append(simulationParameters.fixedDepth).append("\t");
    sb.append(Utils.rounded(simulationParameters.gain)).append("\t");
    sb.append(Utils.rounded(simulationParameters.readNoise)).append("\t");
    sb.append(Utils.rounded(simulationParameters.b)).append("\t");
    sb.append(Utils.rounded(simulationParameters.b2)).append("\t");
    // Compute the noise
    double noise = simulationParameters.b2;
    if (simulationParameters.emCCD) {
        // The b2 parameter was computed without application of the EM-CCD noise factor of 2.
        //final double b2 = backgroundVariance + readVariance
        //                = simulationParameters.b + readVariance
        // This should be applied only to the background variance.
        final double readVariance = noise - simulationParameters.b;
        noise = simulationParameters.b * 2 + readVariance;
    }
    if (simulationParameters.fullSimulation) {
    // The total signal is spread over frames
    }
    sb.append(Utils.rounded(signal / Math.sqrt(noise))).append("\t");
    sb.append(Utils.rounded(simulationParameters.s / simulationParameters.a)).append("\t");
    sb.append(spotFilter.getDescription());
    // nP and nN is the fractional score of the spot candidates 
    addCount(sb, nP + nN);
    addCount(sb, nP);
    addCount(sb, nN);
    addCount(sb, fP);
    addCount(sb, fN);
    String name = PeakFit.getSolverName(fitConfig);
    if (fitConfig.getFitSolver() == FitSolver.MLE && fitConfig.isModelCamera())
        name += " Camera";
    add(sb, name);
    add(sb, config.getFitting());
    resultPrefix = sb.toString();
    // Q. Should I add other fit configuration here?
    // The fraction of positive and negative candidates that were included
    add(sb, (100.0 * cTP) / nP);
    add(sb, (100.0 * cFP) / nN);
    // Score the fitting results compared to the original simulation.
    // Score the candidate selection:
    add(sb, cTP + cFP);
    add(sb, cTP);
    add(sb, cFP);
    // TP are all candidates that can be matched to a spot
    // FP are all candidates that cannot be matched to a spot
    // FN = The number of missed spots
    FractionClassificationResult m = new FractionClassificationResult(cTP, cFP, 0, simulationParameters.molecules - cTP);
    add(sb, m.getRecall());
    add(sb, m.getPrecision());
    add(sb, m.getF1Score());
    add(sb, m.getJaccard());
    // Score the fitting results:
    add(sb, failcTP);
    add(sb, failcFP);
    // TP are all fit results that can be matched to a spot
    // FP are all fit results that cannot be matched to a spot
    // FN = The number of missed spots
    add(sb, tp);
    add(sb, fp);
    m = new FractionClassificationResult(tp, fp, 0, simulationParameters.molecules - tp);
    add(sb, m.getRecall());
    add(sb, m.getPrecision());
    add(sb, m.getF1Score());
    add(sb, m.getJaccard());
    // Do it again but pretend we can perfectly filter all the false positives
    //add(sb, tp);
    m = new FractionClassificationResult(tp, 0, 0, simulationParameters.molecules - tp);
    // Recall is unchanged
    // Precision will be 100%
    add(sb, m.getF1Score());
    add(sb, m.getJaccard());
    // The mean may be subject to extreme outliers so use the median
    double median = distanceStats.getMedian();
    add(sb, median);
    WindowOrganiser wo = new WindowOrganiser();
    String label = String.format("Recall = %s. n = %d. Median = %s nm. SD = %s nm", Utils.rounded(m.getRecall()), distanceStats.getN(), Utils.rounded(median), Utils.rounded(distanceStats.getStandardDeviation()));
    int id = Utils.showHistogram(TITLE, distanceStats, "Match Distance (nm)", 0, 0, 0, label);
    if (Utils.isNewWindow())
        wo.add(id);
    median = depthStats.getMedian();
    add(sb, median);
    // Sort by spot intensity and produce correlation
    int[] indices = Utils.newArray(i1.length, 0, 1);
    if (showCorrelation)
        Sort.sort(indices, is, rankByIntensity);
    double[] r = (showCorrelation) ? new double[i1.length] : null;
    double[] sr = (showCorrelation) ? new double[i1.length] : null;
    double[] rank = (showCorrelation) ? new double[i1.length] : null;
    ci = 0;
    FastCorrelator fastCorrelator = new FastCorrelator();
    ArrayList<Ranking> pc1 = new ArrayList<Ranking>();
    ArrayList<Ranking> pc2 = new ArrayList<Ranking>();
    for (int ci2 : indices) {
        fastCorrelator.add((long) Math.round(i1[ci2]), (long) Math.round(i2[ci2]));
        pc1.add(new Ranking(i1[ci2], ci));
        pc2.add(new Ranking(i2[ci2], ci));
        if (showCorrelation) {
            r[ci] = fastCorrelator.getCorrelation();
            sr[ci] = Correlator.correlation(rank(pc1), rank(pc2));
            if (rankByIntensity)
                rank[ci] = is[0] - is[ci];
            else
                rank[ci] = ci;
        }
        ci++;
    }
    final double pearsonCorr = fastCorrelator.getCorrelation();
    final double rankedCorr = Correlator.correlation(rank(pc1), rank(pc2));
    // Get the regression
    SimpleRegression regression = new SimpleRegression(false);
    for (int i = 0; i < pc1.size(); i++) regression.addData(pc1.get(i).value, pc2.get(i).value);
    //final double intercept = regression.getIntercept();
    final double slope = regression.getSlope();
    if (showCorrelation) {
        String title = TITLE + " Intensity";
        Plot plot = new Plot(title, "Candidate", "Spot");
        double[] limits1 = Maths.limits(i1);
        double[] limits2 = Maths.limits(i2);
        plot.setLimits(limits1[0], limits1[1], limits2[0], limits2[1]);
        label = String.format("Correlation=%s; Ranked=%s; Slope=%s", Utils.rounded(pearsonCorr), Utils.rounded(rankedCorr), Utils.rounded(slope));
        plot.addLabel(0, 0, label);
        plot.setColor(Color.red);
        plot.addPoints(i1, i2, Plot.DOT);
        if (slope > 1)
            plot.drawLine(limits1[0], limits1[0] * slope, limits1[1], limits1[1] * slope);
        else
            plot.drawLine(limits2[0] / slope, limits2[0], limits2[1] / slope, limits2[1]);
        PlotWindow pw = Utils.display(title, plot);
        if (Utils.isNewWindow())
            wo.add(pw);
        title = TITLE + " Correlation";
        plot = new Plot(title, "Spot Rank", "Correlation");
        double[] xlimits = Maths.limits(rank);
        double[] ylimits = Maths.limits(r);
        ylimits = Maths.limits(ylimits, sr);
        plot.setLimits(xlimits[0], xlimits[1], ylimits[0], ylimits[1]);
        plot.setColor(Color.red);
        plot.addPoints(rank, r, Plot.LINE);
        plot.setColor(Color.blue);
        plot.addPoints(rank, sr, Plot.LINE);
        plot.setColor(Color.black);
        plot.addLabel(0, 0, label);
        pw = Utils.display(title, plot);
        if (Utils.isNewWindow())
            wo.add(pw);
    }
    add(sb, pearsonCorr);
    add(sb, rankedCorr);
    add(sb, slope);
    label = String.format("n = %d. Median = %s nm", depthStats.getN(), Utils.rounded(median));
    id = Utils.showHistogram(TITLE, depthStats, "Match Depth (nm)", 0, 1, 0, label);
    if (Utils.isNewWindow())
        wo.add(id);
    // Plot histograms of the stats on the same window
    double[] lower = new double[filterCriteria.length];
    double[] upper = new double[lower.length];
    min = new double[lower.length];
    max = new double[lower.length];
    for (int i = 0; i < stats[0].length; i++) {
        double[] limits = showDoubleHistogram(stats, i, wo, matchScores, nPredicted);
        lower[i] = limits[0];
        upper[i] = limits[1];
        min[i] = limits[2];
        max[i] = limits[3];
    }
    // Reconfigure some of the range limits
    // Make this a bit bigger
    upper[FILTER_SIGNAL] *= 2;
    // Make this a bit bigger
    upper[FILTER_SNR] *= 2;
    double factor = 0.25;
    if (lower[FILTER_MIN_WIDTH] != 0)
        // (assuming lower is less than 1)
        upper[FILTER_MIN_WIDTH] = 1 - Math.max(0, factor * (1 - lower[FILTER_MIN_WIDTH]));
    if (upper[FILTER_MIN_WIDTH] != 0)
        // (assuming upper is more than 1)
        lower[FILTER_MAX_WIDTH] = 1 + Math.max(0, factor * (upper[FILTER_MAX_WIDTH] - 1));
    // Round the ranges
    final double[] interval = new double[stats[0].length];
    interval[FILTER_SIGNAL] = SignalFilter.DEFAULT_INCREMENT;
    interval[FILTER_SNR] = SNRFilter.DEFAULT_INCREMENT;
    interval[FILTER_MIN_WIDTH] = WidthFilter2.DEFAULT_MIN_INCREMENT;
    interval[FILTER_MAX_WIDTH] = WidthFilter.DEFAULT_INCREMENT;
    interval[FILTER_SHIFT] = ShiftFilter.DEFAULT_INCREMENT;
    interval[FILTER_ESHIFT] = EShiftFilter.DEFAULT_INCREMENT;
    interval[FILTER_PRECISION] = PrecisionFilter.DEFAULT_INCREMENT;
    interval[FILTER_ITERATIONS] = 0.1;
    interval[FILTER_EVALUATIONS] = 0.1;
    // Create a range increment
    double[] increment = new double[lower.length];
    for (int i = 0; i < increment.length; i++) {
        lower[i] = Maths.floor(lower[i], interval[i]);
        upper[i] = Maths.ceil(upper[i], interval[i]);
        double range = upper[i] - lower[i];
        // Allow clipping if the range is small compared to the min increment
        double multiples = range / interval[i];
        // Use 8 multiples for the equivalent of +/- 4 steps around the centre
        if (multiples < 8) {
            multiples = Math.ceil(multiples);
        } else
            multiples = 8;
        increment[i] = Maths.ceil(range / multiples, interval[i]);
        if (i == FILTER_MIN_WIDTH)
            // Requires clipping based on the upper limit
            lower[i] = upper[i] - increment[i] * multiples;
        else
            upper[i] = lower[i] + increment[i] * multiples;
    }
    for (int i = 0; i < stats[0].length; i++) {
        lower[i] = Maths.round(lower[i]);
        upper[i] = Maths.round(upper[i]);
        min[i] = Maths.round(min[i]);
        max[i] = Maths.round(max[i]);
        increment[i] = Maths.round(increment[i]);
        sb.append("\t").append(min[i]).append(':').append(lower[i]).append('-').append(upper[i]).append(':').append(max[i]);
    }
    // Disable some filters
    increment[FILTER_SIGNAL] = Double.POSITIVE_INFINITY;
    //increment[FILTER_SHIFT] = Double.POSITIVE_INFINITY;
    increment[FILTER_ESHIFT] = Double.POSITIVE_INFINITY;
    wo.tile();
    sb.append("\t").append(Utils.timeToString(runTime / 1000000.0));
    summaryTable.append(sb.toString());
    if (saveFilterRange) {
        GlobalSettings gs = SettingsManager.loadSettings();
        FilterSettings filterSettings = gs.getFilterSettings();
        String filename = (silent) ? filterSettings.filterSetFilename : Utils.getFilename("Filter_range_file", filterSettings.filterSetFilename);
        if (filename == null)
            return;
        // Remove extension to store the filename
        filename = Utils.replaceExtension(filename, ".xml");
        filterSettings.filterSetFilename = filename;
        // Create a filter set using the ranges
        ArrayList<Filter> filters = new ArrayList<Filter>(3);
        filters.add(new MultiFilter2(lower[0], (float) lower[1], lower[2], lower[3], lower[4], lower[5], lower[6]));
        filters.add(new MultiFilter2(upper[0], (float) upper[1], upper[2], upper[3], upper[4], upper[5], upper[6]));
        filters.add(new MultiFilter2(increment[0], (float) increment[1], increment[2], increment[3], increment[4], increment[5], increment[6]));
        if (saveFilters(filename, filters))
            SettingsManager.saveSettings(gs);
        // Create a filter set using the min/max and the initial bounds.
        // Set sensible limits
        min[FILTER_SIGNAL] = Math.max(min[FILTER_SIGNAL], 30);
        max[FILTER_PRECISION] = Math.min(max[FILTER_PRECISION], 100);
        // Commented this out so that the 4-set filters are the same as the 3-set filters.
        // The difference leads to differences when optimising.
        //			// Use half the initial bounds (hoping this is a good starting guess for the optimum)
        //			final boolean[] limitToLower = new boolean[min.length];
        //			limitToLower[FILTER_SIGNAL] = true;
        //			limitToLower[FILTER_SNR] = true;
        //			limitToLower[FILTER_MIN_WIDTH] = true;
        //			limitToLower[FILTER_MAX_WIDTH] = false;
        //			limitToLower[FILTER_SHIFT] = false;
        //			limitToLower[FILTER_ESHIFT] = false;
        //			limitToLower[FILTER_PRECISION] = true;
        //			for (int i = 0; i < limitToLower.length; i++)
        //			{
        //				final double range = (upper[i] - lower[i]) / 2;
        //				if (limitToLower[i])
        //					upper[i] = lower[i] + range;
        //				else
        //					lower[i] = upper[i] - range;
        //			}
        filters = new ArrayList<Filter>(4);
        filters.add(new MultiFilter2(min[0], (float) min[1], min[2], min[3], min[4], min[5], min[6]));
        filters.add(new MultiFilter2(lower[0], (float) lower[1], lower[2], lower[3], lower[4], lower[5], lower[6]));
        filters.add(new MultiFilter2(upper[0], (float) upper[1], upper[2], upper[3], upper[4], upper[5], upper[6]));
        filters.add(new MultiFilter2(max[0], (float) max[1], max[2], max[3], max[4], max[5], max[6]));
        saveFilters(Utils.replaceExtension(filename, ".4.xml"), filters);
    }
}
Also used : ArrayList(java.util.ArrayList) TIntHashSet(gnu.trove.set.hash.TIntHashSet) MultiPathFitResult(gdsc.smlm.results.filter.MultiPathFitResult) FractionalAssignment(gdsc.core.match.FractionalAssignment) PeakFractionalAssignment(gdsc.smlm.results.filter.PeakFractionalAssignment) ImmutableFractionalAssignment(gdsc.core.match.ImmutableFractionalAssignment) FractionClassificationResult(gdsc.core.match.FractionClassificationResult) BasePreprocessedPeakResult(gdsc.smlm.results.filter.BasePreprocessedPeakResult) PreprocessedPeakResult(gdsc.smlm.results.filter.PreprocessedPeakResult) SignalFilter(gdsc.smlm.results.filter.SignalFilter) FilterSettings(gdsc.smlm.ij.settings.FilterSettings) ScoredSpot(gdsc.smlm.ij.plugins.BenchmarkSpotFilter.ScoredSpot) FastCorrelator(gdsc.core.utils.FastCorrelator) Plot(ij.gui.Plot) StoredDataStatistics(gdsc.core.utils.StoredDataStatistics) PlotWindow(ij.gui.PlotWindow) GlobalSettings(gdsc.smlm.ij.settings.GlobalSettings) WindowOrganiser(ij.plugin.WindowOrganiser) PeakResultPoint(gdsc.smlm.ij.plugins.ResultsMatchCalculator.PeakResultPoint) BasePoint(gdsc.core.match.BasePoint) PeakFractionalAssignment(gdsc.smlm.results.filter.PeakFractionalAssignment) FractionScoreStore(gdsc.smlm.results.filter.MultiPathFilter.FractionScoreStore) SimpleRegression(org.apache.commons.math3.stat.regression.SimpleRegression) SignalFilter(gdsc.smlm.results.filter.SignalFilter) DirectFilter(gdsc.smlm.results.filter.DirectFilter) ShiftFilter(gdsc.smlm.results.filter.ShiftFilter) PrecisionFilter(gdsc.smlm.results.filter.PrecisionFilter) Filter(gdsc.smlm.results.filter.Filter) EShiftFilter(gdsc.smlm.results.filter.EShiftFilter) WidthFilter(gdsc.smlm.results.filter.WidthFilter) SNRFilter(gdsc.smlm.results.filter.SNRFilter) MultiPathFilter(gdsc.smlm.results.filter.MultiPathFilter) MaximaSpotFilter(gdsc.smlm.filters.MaximaSpotFilter) MultiFilter2(gdsc.smlm.results.filter.MultiFilter2) MultiPathFitResults(gdsc.smlm.results.filter.MultiPathFitResults) MultiPathFilter(gdsc.smlm.results.filter.MultiPathFilter)

Aggregations

ArrayList (java.util.ArrayList)9 AllelicCount (org.broadinstitute.hellbender.tools.exome.alleliccount.AllelicCount)6 StoredDataStatistics (gdsc.core.utils.StoredDataStatistics)4 Well19937c (org.apache.commons.math3.random.Well19937c)4 BasePoint (gdsc.core.match.BasePoint)3 MemoryPeakResults (gdsc.smlm.results.MemoryPeakResults)3 RandomDataGenerator (org.apache.commons.math3.random.RandomDataGenerator)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 ClusterPoint (gdsc.core.clustering.ClusterPoint)2 BufferedTextWindow (gdsc.core.ij.BufferedTextWindow)2 FractionClassificationResult (gdsc.core.match.FractionClassificationResult)2 FastCorrelator (gdsc.core.utils.FastCorrelator)2 Statistics (gdsc.core.utils.Statistics)2 MaximaSpotFilter (gdsc.smlm.filters.MaximaSpotFilter)2 PeakResultPoint (gdsc.smlm.ij.plugins.ResultsMatchCalculator.PeakResultPoint)2 TDoubleArrayList (gnu.trove.list.array.TDoubleArrayList)2 Plot2 (ij.gui.Plot2)2 IOException (java.io.IOException)2 List (java.util.List)2 ConvergenceException (org.apache.commons.math3.exception.ConvergenceException)2