Search in sources :

Example 16 with Line

use of org.apache.commons.math3.geometry.euclidean.twod.Line in project recordinality by cscotta.

the class RecordinalityTest method buildRun.

private Callable<Result> buildRun(final int kSize, final int numRuns, final List<String> lines) {
    return new Callable<Result>() {

        public Result call() throws Exception {
            long start = System.currentTimeMillis();
            final double[] results = new double[numRuns];
            for (int i = 0; i < numRuns; i++) {
                Recordinality rec = new Recordinality(kSize);
                for (String line : lines) rec.observe(line);
                results[i] = rec.estimateCardinality();
            }
            double mean = new Mean().evaluate(results);
            double stdDev = new StandardDeviation().evaluate(results);
            double stdError = stdDev / 3193;
            long runTime = System.currentTimeMillis() - start;
            return new Result(kSize, mean, stdError, runTime);
        }
    };
}
Also used : Mean(org.apache.commons.math3.stat.descriptive.moment.Mean) StandardDeviation(org.apache.commons.math3.stat.descriptive.moment.StandardDeviation)

Example 17 with Line

use of org.apache.commons.math3.geometry.euclidean.twod.Line in project GDSC-SMLM by aherbert.

the class PCPALMMolecules method calculateAveragePrecision.

/**
	 * Calculate the average precision by fitting a skewed Gaussian to the histogram of the precision distribution.
	 * <p>
	 * A simple mean and SD of the histogram is computed. If the mean of the Skewed Gaussian does not fit within 3 SDs
	 * of the simple mean then the simple mean is returned.
	 * 
	 * @param molecules
	 * @param title
	 *            the plot title (null if no plot should be displayed)
	 * @param histogramBins
	 * @param logFitParameters
	 *            Record the fit parameters to the ImageJ log
	 * @param removeOutliers
	 *            The distribution is created using all values within 1.5x the inter-quartile range (IQR) of the data
	 * @return The average precision
	 */
public double calculateAveragePrecision(ArrayList<Molecule> molecules, String title, int histogramBins, boolean logFitParameters, boolean removeOutliers) {
    // Plot histogram of the precision
    float[] data = new float[molecules.size()];
    DescriptiveStatistics stats = new DescriptiveStatistics();
    double yMin = Double.NEGATIVE_INFINITY, yMax = 0;
    for (int i = 0; i < data.length; i++) {
        data[i] = (float) molecules.get(i).precision;
        stats.addValue(data[i]);
    }
    // Set the min and max y-values using 1.5 x IQR 
    if (removeOutliers) {
        double lower = stats.getPercentile(25);
        double upper = stats.getPercentile(75);
        if (Double.isNaN(lower) || Double.isNaN(upper)) {
            if (logFitParameters)
                Utils.log("Error computing IQR: %f - %f", lower, upper);
        } else {
            double iqr = upper - lower;
            yMin = FastMath.max(lower - iqr, stats.getMin());
            yMax = FastMath.min(upper + iqr, stats.getMax());
            if (logFitParameters)
                Utils.log("  Data range: %f - %f. Plotting 1.5x IQR: %f - %f", stats.getMin(), stats.getMax(), yMin, yMax);
        }
    }
    if (yMin == Double.NEGATIVE_INFINITY) {
        yMin = stats.getMin();
        yMax = stats.getMax();
        if (logFitParameters)
            Utils.log("  Data range: %f - %f", yMin, yMax);
    }
    float[][] hist = Utils.calcHistogram(data, yMin, yMax, histogramBins);
    Plot2 plot = null;
    if (title != null) {
        plot = new Plot2(title, "Precision", "Frequency");
        float[] xValues = hist[0];
        float[] yValues = hist[1];
        if (xValues.length > 0) {
            double xPadding = 0.05 * (xValues[xValues.length - 1] - xValues[0]);
            plot.setLimits(xValues[0] - xPadding, xValues[xValues.length - 1] + xPadding, 0, Maths.max(yValues) * 1.05);
        }
        plot.addPoints(xValues, yValues, Plot2.BAR);
        Utils.display(title, plot);
    }
    // Extract non-zero data
    float[] x = Arrays.copyOf(hist[0], hist[0].length);
    float[] y = hist[1];
    int count = 0;
    float dx = (x[1] - x[0]) * 0.5f;
    for (int i = 0; i < y.length; i++) if (y[i] > 0) {
        x[count] = x[i] + dx;
        y[count] = y[i];
        count++;
    }
    x = Arrays.copyOf(x, count);
    y = Arrays.copyOf(y, count);
    // Sense check to fitted data. Get mean and SD of histogram
    double[] stats2 = Utils.getHistogramStatistics(x, y);
    double mean = stats2[0];
    if (logFitParameters)
        log("  Initial Statistics: %f +/- %f", stats2[0], stats2[1]);
    // Standard Gaussian fit
    double[] parameters = fitGaussian(x, y);
    if (parameters == null) {
        log("  Failed to fit initial Gaussian");
        return mean;
    }
    double newMean = parameters[1];
    double error = Math.abs(stats2[0] - newMean) / stats2[1];
    if (error > 3) {
        log("  Failed to fit Gaussian: %f standard deviations from histogram mean", error);
        return mean;
    }
    if (newMean < yMin || newMean > yMax) {
        log("  Failed to fit Gaussian: %f outside data range %f - %f", newMean, yMin, yMax);
        return mean;
    }
    mean = newMean;
    if (logFitParameters)
        log("  Initial Gaussian: %f @ %f +/- %f", parameters[0], parameters[1], parameters[2]);
    double[] initialSolution = new double[] { parameters[0], parameters[1], parameters[2], -1 };
    // Fit to a skewed Gaussian (or appropriate function)
    double[] skewParameters = fitSkewGaussian(x, y, initialSolution);
    if (skewParameters == null) {
        log("  Failed to fit Skewed Gaussian");
        return mean;
    }
    SkewNormalFunction sn = new SkewNormalFunction(skewParameters);
    if (logFitParameters)
        log("  Skewed Gaussian: %f @ %f +/- %f (a = %f) => %f +/- %f", skewParameters[0], skewParameters[1], skewParameters[2], skewParameters[3], sn.getMean(), Math.sqrt(sn.getVariance()));
    newMean = sn.getMean();
    error = Math.abs(stats2[0] - newMean) / stats2[1];
    if (error > 3) {
        log("  Failed to fit Skewed Gaussian: %f standard deviations from histogram mean", error);
        return mean;
    }
    if (newMean < yMin || newMean > yMax) {
        log("  Failed to fit Skewed Gaussian: %f outside data range %f - %f", newMean, yMin, yMax);
        return mean;
    }
    // Use original histogram x-axis to maintain all the bins
    if (plot != null) {
        x = hist[0];
        for (int i = 0; i < y.length; i++) x[i] += dx;
        plot.setColor(Color.red);
        addToPlot(plot, x, skewParameters, Plot2.LINE);
        plot.setColor(Color.black);
        Utils.display(title, plot);
    }
    // Return the average precision from the fitted curve
    return newMean;
}
Also used : DescriptiveStatistics(org.apache.commons.math3.stat.descriptive.DescriptiveStatistics) Plot2(ij.gui.Plot2) SkewNormalFunction(gdsc.smlm.function.SkewNormalFunction) WeightedObservedPoint(org.apache.commons.math3.fitting.WeightedObservedPoint) ClusterPoint(gdsc.core.clustering.ClusterPoint)

Example 18 with Line

use of org.apache.commons.math3.geometry.euclidean.twod.Line in project GDSC-SMLM by aherbert.

the class PulseActivationAnalysis method createShapes.

private Shape[] createShapes(RandomDataGenerator rdg, int c) {
    RandomGenerator rand = rdg.getRandomGenerator();
    Shape[] shapes;
    double min = sim_size / 20;
    double max = sim_size / 10;
    double range = max - min;
    switch(sim_distribution[c]) {
        case CIRCLE:
            shapes = new Shape[10];
            for (int i = 0; i < shapes.length; i++) {
                float x = nextCoordinate(rand);
                float y = nextCoordinate(rand);
                double radius = rand.nextDouble() * range + min;
                shapes[i] = new Circle(x, y, radius);
            }
            break;
        case LINE:
            shapes = new Shape[10];
            for (int i = 0; i < shapes.length; i++) {
                float x = nextCoordinate(rand);
                float y = nextCoordinate(rand);
                double angle = rand.nextDouble() * Math.PI;
                double radius = rand.nextDouble() * range + min;
                shapes[i] = new Line(x, y, angle, radius);
            }
            break;
        case POINT:
        default:
            shapes = new Shape[sim_nMolecules[c]];
            for (int i = 0; i < shapes.length; i++) {
                float x = nextCoordinate(rand);
                float y = nextCoordinate(rand);
                shapes[i] = new Point(x, y);
            }
    }
    return shapes;
}
Also used : RandomGenerator(org.apache.commons.math3.random.RandomGenerator)

Example 19 with Line

use of org.apache.commons.math3.geometry.euclidean.twod.Line in project GDSC-SMLM by aherbert.

the class FIRE method runQEstimation.

private void runQEstimation() {
    IJ.showStatus(TITLE + " ...");
    if (!showQEstimationInputDialog())
        return;
    MemoryPeakResults results = ResultsManager.loadInputResults(inputOption, false);
    if (results == null || results.size() == 0) {
        IJ.error(TITLE, "No results could be loaded");
        return;
    }
    if (results.getCalibration() == null) {
        IJ.error(TITLE, "The results are not calibrated");
        return;
    }
    results = cropToRoi(results);
    if (results.size() < 2) {
        IJ.error(TITLE, "No results within the crop region");
        return;
    }
    initialise(results, null);
    // We need localisation precision.
    // Build a histogram of the localisation precision.
    // Get the initial mean and SD and plot as a Gaussian.
    PrecisionHistogram histogram = calculatePrecisionHistogram();
    if (histogram == null) {
        IJ.error(TITLE, "No localisation precision available.\n \nPlease choose " + PrecisionMethod.FIXED + " and enter a precision mean and SD.");
        return;
    }
    StoredDataStatistics precision = histogram.precision;
    //String name = results.getName();
    double fourierImageScale = SCALE_VALUES[imageScaleIndex];
    int imageSize = IMAGE_SIZE_VALUES[imageSizeIndex];
    // Create the image and compute the numerator of FRC. 
    // Do not use the signal so results.size() is the number of localisations.
    IJ.showStatus("Computing FRC curve ...");
    FireImages images = createImages(fourierImageScale, imageSize, false);
    // DEBUGGING - Save the two images to disk. Load the images into the Matlab 
    // code that calculates the Q-estimation and make this plugin match the functionality.
    //IJ.save(new ImagePlus("i1", images.ip1), "/scratch/i1.tif");
    //IJ.save(new ImagePlus("i2", images.ip2), "/scratch/i2.tif");
    FRC frc = new FRC();
    frc.progress = progress;
    frc.setFourierMethod(fourierMethod);
    frc.setSamplingMethod(samplingMethod);
    frc.setPerimeterSamplingFactor(perimeterSamplingFactor);
    FRCCurve frcCurve = frc.calculateFrcCurve(images.ip1, images.ip2, images.nmPerPixel);
    if (frcCurve == null) {
        IJ.error(TITLE, "Failed to compute FRC curve");
        return;
    }
    IJ.showStatus("Running Q-estimation ...");
    // Note:
    // The method implemented here is based on Matlab code provided by Bernd Rieger.
    // The idea is to compute the spurious correlation component of the FRC Numerator
    // using an initial estimate of distribution of the localisation precision (assumed 
    // to be Gaussian). This component is the contribution of repeat localisations of 
    // the same molecule to the numerator and is modelled as an exponential decay
    // (exp_decay). The component is scaled by the Q-value which
    // is the average number of times a molecule is seen in addition to the first time.
    // At large spatial frequencies the scaled component should match the numerator,
    // i.e. at high resolution (low FIRE number) the numerator is made up of repeat 
    // localisations of the same molecule and not actual structure in the image.
    // The best fit is where the numerator equals the scaled component, i.e. num / (q*exp_decay) == 1.
    // The FRC Numerator is plotted and Q can be determined by
    // adjusting Q and the precision mean and SD to maximise the cost function.
    // This can be done interactively by the user with the effect on the FRC curve
    // dynamically updated and displayed.
    // Compute the scaled FRC numerator
    double qNorm = (1 / frcCurve.mean1 + 1 / frcCurve.mean2);
    double[] frcnum = new double[frcCurve.getSize()];
    for (int i = 0; i < frcnum.length; i++) {
        FRCCurveResult r = frcCurve.get(i);
        frcnum[i] = qNorm * r.getNumerator() / r.getNumberOfSamples();
    }
    // Compute the spatial frequency and the region for curve fitting
    double[] q = FRC.computeQ(frcCurve, false);
    int low = 0, high = q.length;
    while (high > 0 && q[high - 1] > maxQ) high--;
    while (low < q.length && q[low] < minQ) low++;
    // Require we fit at least 10% of the curve
    if (high - low < q.length * 0.1) {
        IJ.error(TITLE, "Not enough points for Q estimation");
        return;
    }
    // Obtain initial estimate of Q plateau height and decay.
    // This can be done by fitting the precision histogram and then fixing the mean and sigma.
    // Or it can be done by allowing the precision to be sampled and the mean and sigma
    // become parameters for fitting.
    // Check if we can sample precision values
    boolean sampleDecay = precision != null && FIRE.sampleDecay;
    double[] exp_decay;
    if (sampleDecay) {
        // Random sample of precision values from the distribution is used to 
        // construct the decay curve
        int[] sample = Random.sample(10000, precision.getN(), new Well19937c());
        final double four_pi2 = 4 * Math.PI * Math.PI;
        double[] pre = new double[q.length];
        for (int i = 1; i < q.length; i++) pre[i] = -four_pi2 * q[i] * q[i];
        // Sample
        final int n = sample.length;
        double[] hq = new double[n];
        for (int j = 0; j < n; j++) {
            // Scale to SR pixels
            double s2 = precision.getValue(sample[j]) / images.nmPerPixel;
            s2 *= s2;
            for (int i = 1; i < q.length; i++) hq[i] += FastMath.exp(pre[i] * s2);
        }
        for (int i = 1; i < q.length; i++) hq[i] /= n;
        exp_decay = new double[q.length];
        exp_decay[0] = 1;
        for (int i = 1; i < q.length; i++) {
            double sinc_q = sinc(Math.PI * q[i]);
            exp_decay[i] = sinc_q * sinc_q * hq[i];
        }
    } else {
        // Note: The sigma mean and std should be in the units of super-resolution 
        // pixels so scale to SR pixels
        exp_decay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
    }
    // Smoothing
    double[] smooth;
    if (loessSmoothing) {
        // Note: This computes the log then smooths it 
        double bandwidth = 0.1;
        int robustness = 0;
        double[] l = new double[exp_decay.length];
        for (int i = 0; i < l.length; i++) {
            // Original Matlab code computes the log for each array.
            // This is equivalent to a single log on the fraction of the two.
            // Perhaps the two log method is more numerically stable.
            //l[i] = Math.log(Math.abs(frcnum[i])) - Math.log(exp_decay[i]);
            l[i] = Math.log(Math.abs(frcnum[i] / exp_decay[i]));
        }
        try {
            LoessInterpolator loess = new LoessInterpolator(bandwidth, robustness);
            smooth = loess.smooth(q, l);
        } catch (Exception e) {
            IJ.error(TITLE, "LOESS smoothing failed");
            return;
        }
    } else {
        // Note: This smooths the curve before computing the log 
        double[] norm = new double[exp_decay.length];
        for (int i = 0; i < norm.length; i++) {
            norm[i] = frcnum[i] / exp_decay[i];
        }
        // Median window of 5 == radius of 2
        MedianWindow mw = new MedianWindow(norm, 2);
        smooth = new double[exp_decay.length];
        for (int i = 0; i < norm.length; i++) {
            smooth[i] = Math.log(Math.abs(mw.getMedian()));
            mw.increment();
        }
    }
    // Fit with quadratic to find the initial guess.
    // Note: example Matlab code frc_Qcorrection7.m identifies regions of the 
    // smoothed log curve with low derivative and only fits those. The fit is 
    // used for the final estimate. Fitting a subset with low derivative is not 
    // implemented here since the initial estimate is subsequently optimised 
    // to maximise a cost function. 
    Quadratic curve = new Quadratic();
    SimpleCurveFitter fit = SimpleCurveFitter.create(curve, new double[2]);
    WeightedObservedPoints points = new WeightedObservedPoints();
    for (int i = low; i < high; i++) points.add(q[i], smooth[i]);
    double[] estimate = fit.fit(points.toList());
    double qValue = FastMath.exp(estimate[0]);
    //System.out.printf("Initial q-estimate = %s => %.3f\n", Arrays.toString(estimate), qValue);
    // This could be made an option. Just use for debugging
    boolean debug = false;
    if (debug) {
        // Plot the initial fit and the fit curve
        double[] qScaled = FRC.computeQ(frcCurve, true);
        double[] line = new double[q.length];
        for (int i = 0; i < q.length; i++) line[i] = curve.value(q[i], estimate);
        String title = TITLE + " Initial fit";
        Plot2 plot = new Plot2(title, "Spatial Frequency (nm^-1)", "FRC Numerator");
        String label = String.format("Q = %.3f", qValue);
        plot.addPoints(qScaled, smooth, Plot.LINE);
        plot.setColor(Color.red);
        plot.addPoints(qScaled, line, Plot.LINE);
        plot.setColor(Color.black);
        plot.addLabel(0, 0, label);
        Utils.display(title, plot, Utils.NO_TO_FRONT);
    }
    if (fitPrecision) {
        // Q - Should this be optional?
        if (sampleDecay) {
            // If a sample of the precision was used to construct the data for the initial fit 
            // then update the estimate using the fit result since it will be a better start point. 
            histogram.sigma = precision.getStandardDeviation();
            // Normalise sum-of-squares to the SR pixel size
            double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
            histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
        }
        // Do a multivariate fit ...
        SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
        PointValuePair p = null;
        MultiPlateauness f = new MultiPlateauness(frcnum, q, low, high);
        double[] initial = new double[] { histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, qValue };
        p = findMin(p, opt, f, scale(initial, 0.1));
        p = findMin(p, opt, f, scale(initial, 0.5));
        p = findMin(p, opt, f, initial);
        p = findMin(p, opt, f, scale(initial, 2));
        p = findMin(p, opt, f, scale(initial, 10));
        if (p != null) {
            double[] point = p.getPointRef();
            histogram.mean = point[0] * images.nmPerPixel;
            histogram.sigma = point[1] * images.nmPerPixel;
            qValue = point[2];
        }
    } else {
        // If so then this should be optional.
        if (sampleDecay) {
            if (precisionMethod != PrecisionMethod.FIXED) {
                histogram.sigma = precision.getStandardDeviation();
                // Normalise sum-of-squares to the SR pixel size
                double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
                histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
            }
            exp_decay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
        }
        // Estimate spurious component by promoting plateauness.
        // The Matlab code used random initial points for a Simplex optimiser.
        // A Brent line search should be pretty deterministic so do simple repeats.
        // However it will proceed downhill so if the initial point is wrong then 
        // it will find a sub-optimal result.
        UnivariateOptimizer o = new BrentOptimizer(1e-3, 1e-6);
        Plateauness f = new Plateauness(frcnum, exp_decay, low, high);
        UnivariatePointValuePair p = null;
        p = findMin(p, o, f, qValue, 0.1);
        p = findMin(p, o, f, qValue, 0.2);
        p = findMin(p, o, f, qValue, 0.333);
        p = findMin(p, o, f, qValue, 0.5);
        // Do some Simplex repeats as well
        SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
        p = findMin(p, opt, f, qValue * 0.1);
        p = findMin(p, opt, f, qValue * 0.5);
        p = findMin(p, opt, f, qValue);
        p = findMin(p, opt, f, qValue * 2);
        p = findMin(p, opt, f, qValue * 10);
        if (p != null)
            qValue = p.getPoint();
    }
    QPlot qplot = new QPlot(frcCurve, qValue, low, high);
    // Interactive dialog to estimate Q (blinking events per flourophore) using 
    // sliders for the mean and standard deviation of the localisation precision.
    showQEstimationDialog(histogram, qplot, frcCurve, images.nmPerPixel);
    IJ.showStatus(TITLE + " complete");
}
Also used : BrentOptimizer(org.apache.commons.math3.optim.univariate.BrentOptimizer) Plot2(ij.gui.Plot2) Well19937c(org.apache.commons.math3.random.Well19937c) PointValuePair(org.apache.commons.math3.optim.PointValuePair) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair) LoessInterpolator(org.apache.commons.math3.analysis.interpolation.LoessInterpolator) WeightedObservedPoints(org.apache.commons.math3.fitting.WeightedObservedPoints) SimplexOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer) MemoryPeakResults(gdsc.smlm.results.MemoryPeakResults) MedianWindow(gdsc.core.utils.MedianWindow) SimpleCurveFitter(org.apache.commons.math3.fitting.SimpleCurveFitter) FRCCurveResult(gdsc.smlm.ij.frc.FRC.FRCCurveResult) StoredDataStatistics(gdsc.core.utils.StoredDataStatistics) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair) WeightedObservedPoint(org.apache.commons.math3.fitting.WeightedObservedPoint) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) FRCCurve(gdsc.smlm.ij.frc.FRC.FRCCurve) FRC(gdsc.smlm.ij.frc.FRC) UnivariateOptimizer(org.apache.commons.math3.optim.univariate.UnivariateOptimizer)

Example 20 with Line

use of org.apache.commons.math3.geometry.euclidean.twod.Line in project GDSC-SMLM by aherbert.

the class BenchmarkFilterAnalysis method depthAnalysis.

/**
	 * Depth analysis.
	 *
	 * @param allAssignments
	 *            The assignments generated from running the filter (or null)
	 * @param filter
	 *            the filter
	 * @return the assignments
	 */
private ArrayList<FractionalAssignment[]> depthAnalysis(ArrayList<FractionalAssignment[]> allAssignments, DirectFilter filter) {
    if (!depthRecallAnalysis || simulationParameters.fixedDepth)
        return null;
    // Build a histogram of the number of spots at different depths
    final double[] depths = depthStats.getValues();
    double[] limits = Maths.limits(depths);
    //final int bins = Math.max(10, nActual / 100);
    //final int bins = Utils.getBinsSturges(depths.length);
    final int bins = Utils.getBinsSqrt(depths.length);
    double[][] h1 = Utils.calcHistogram(depths, limits[0], limits[1], bins);
    double[][] h2 = Utils.calcHistogram(depthFitStats.getValues(), limits[0], limits[1], bins);
    // manually to get the results that pass.
    if (allAssignments == null)
        allAssignments = getAssignments(filter);
    double[] depths2 = new double[results.size()];
    int count = 0;
    for (FractionalAssignment[] assignments : allAssignments) {
        if (assignments == null)
            continue;
        for (int i = 0; i < assignments.length; i++) {
            final CustomFractionalAssignment c = (CustomFractionalAssignment) assignments[i];
            depths2[count++] = c.peak.error;
        }
    }
    depths2 = Arrays.copyOf(depths2, count);
    // Build a histogram using the same limits
    double[][] h3 = Utils.calcHistogram(depths2, limits[0], limits[1], bins);
    // Convert pixel depth to nm
    for (int i = 0; i < h1[0].length; i++) h1[0][i] *= simulationParameters.a;
    limits[0] *= simulationParameters.a;
    limits[1] *= simulationParameters.a;
    // Produce a histogram of the number of spots at each depth
    String title1 = TITLE + " Depth Histogram";
    Plot2 plot1 = new Plot2(title1, "Depth (nm)", "Frequency");
    plot1.setLimits(limits[0], limits[1], 0, Maths.max(h1[1]));
    plot1.setColor(Color.black);
    plot1.addPoints(h1[0], h1[1], Plot2.BAR);
    plot1.addLabel(0, 0, "Black = Spots; Blue = Fitted; Red = Filtered");
    plot1.setColor(Color.blue);
    plot1.addPoints(h1[0], h2[1], Plot2.BAR);
    plot1.setColor(Color.red);
    plot1.addPoints(h1[0], h3[1], Plot2.BAR);
    plot1.setColor(Color.magenta);
    PlotWindow pw1 = Utils.display(title1, plot1);
    if (Utils.isNewWindow())
        wo.add(pw1);
    // Interpolate
    final double halfBinWidth = (h1[0][1] - h1[0][0]) * 0.5;
    // Remove final value of the histogram as this is at the upper limit of the range (i.e. count zero)
    h1[0] = Arrays.copyOf(h1[0], h1[0].length - 1);
    h1[1] = Arrays.copyOf(h1[1], h1[0].length);
    h2[1] = Arrays.copyOf(h2[1], h1[0].length);
    h3[1] = Arrays.copyOf(h3[1], h1[0].length);
    // TODO : Fix the smoothing since LOESS sometimes does not work.
    // Perhaps allow configuration of the number of histogram bins and the smoothing bandwidth.
    // Use minimum of 3 points for smoothing
    // Ensure we use at least x% of data
    double bandwidth = Math.max(3.0 / h1[0].length, 0.15);
    LoessInterpolator loess = new LoessInterpolator(bandwidth, 1);
    PolynomialSplineFunction spline1 = loess.interpolate(h1[0], h1[1]);
    PolynomialSplineFunction spline2 = loess.interpolate(h1[0], h2[1]);
    PolynomialSplineFunction spline3 = loess.interpolate(h1[0], h3[1]);
    // Use a second interpolator in case the LOESS fails
    LinearInterpolator lin = new LinearInterpolator();
    PolynomialSplineFunction spline1b = lin.interpolate(h1[0], h1[1]);
    PolynomialSplineFunction spline2b = lin.interpolate(h1[0], h2[1]);
    PolynomialSplineFunction spline3b = lin.interpolate(h1[0], h3[1]);
    // Increase the number of points to show a smooth curve
    double[] points = new double[bins * 5];
    limits = Maths.limits(h1[0]);
    final double interval = (limits[1] - limits[0]) / (points.length - 1);
    double[] v = new double[points.length];
    double[] v2 = new double[points.length];
    double[] v3 = new double[points.length];
    for (int i = 0; i < points.length - 1; i++) {
        points[i] = limits[0] + i * interval;
        v[i] = getSplineValue(spline1, spline1b, points[i]);
        v2[i] = getSplineValue(spline2, spline2b, points[i]);
        v3[i] = getSplineValue(spline3, spline3b, points[i]);
        points[i] += halfBinWidth;
    }
    // Final point on the limit of the spline range
    int ii = points.length - 1;
    v[ii] = getSplineValue(spline1, spline1b, limits[1]);
    v2[ii] = getSplineValue(spline2, spline2b, limits[1]);
    v3[ii] = getSplineValue(spline3, spline3b, limits[1]);
    points[ii] = limits[1] + halfBinWidth;
    // Calculate recall
    for (int i = 0; i < v.length; i++) {
        v2[i] = v2[i] / v[i];
        v3[i] = v3[i] / v[i];
    }
    final double halfSummaryDepth = summaryDepth * 0.5;
    String title2 = TITLE + " Depth Histogram (normalised)";
    Plot2 plot2 = new Plot2(title2, "Depth (nm)", "Recall");
    plot2.setLimits(limits[0] + halfBinWidth, limits[1] + halfBinWidth, 0, Maths.min(1, Maths.max(v2)));
    plot2.setColor(Color.black);
    plot2.addLabel(0, 0, "Blue = Fitted; Red = Filtered");
    plot2.setColor(Color.blue);
    plot2.addPoints(points, v2, Plot2.LINE);
    plot2.setColor(Color.red);
    plot2.addPoints(points, v3, Plot2.LINE);
    plot2.setColor(Color.magenta);
    if (-halfSummaryDepth - halfBinWidth >= limits[0]) {
        plot2.drawLine(-halfSummaryDepth, 0, -halfSummaryDepth, getSplineValue(spline3, spline3b, -halfSummaryDepth - halfBinWidth) / getSplineValue(spline1, spline1b, -halfSummaryDepth - halfBinWidth));
    }
    if (halfSummaryDepth - halfBinWidth <= limits[1]) {
        plot2.drawLine(halfSummaryDepth, 0, halfSummaryDepth, getSplineValue(spline3, spline3b, halfSummaryDepth - halfBinWidth) / getSplineValue(spline1, spline1b, halfSummaryDepth - halfBinWidth));
    }
    PlotWindow pw2 = Utils.display(title2, plot2);
    if (Utils.isNewWindow())
        wo.add(pw2);
    return allAssignments;
}
Also used : PlotWindow(ij.gui.PlotWindow) Plot2(ij.gui.Plot2) PolynomialSplineFunction(org.apache.commons.math3.analysis.polynomials.PolynomialSplineFunction) LoessInterpolator(org.apache.commons.math3.analysis.interpolation.LoessInterpolator) FractionalAssignment(gdsc.core.match.FractionalAssignment) PeakFractionalAssignment(gdsc.smlm.results.filter.PeakFractionalAssignment) LinearInterpolator(org.apache.commons.math3.analysis.interpolation.LinearInterpolator)

Aggregations

ArrayList (java.util.ArrayList)9 Line (org.twak.utils.Line)8 Plot2 (ij.gui.Plot2)7 List (java.util.List)7 Map (java.util.Map)7 Set (java.util.Set)7 Point2d (javax.vecmath.Point2d)7 HashSet (java.util.HashSet)6 Collectors (java.util.stream.Collectors)6 Pair (org.twak.utils.Pair)6 Iterator (java.util.Iterator)5 Point3d (javax.vecmath.Point3d)5 Vector2d (javax.vecmath.Vector2d)5 PointValuePair (org.apache.commons.math3.optim.PointValuePair)5 Tweed (org.twak.tweed.Tweed)5 TweedSettings (org.twak.tweed.TweedSettings)5 Material (com.jme3.material.Material)4 ColorRGBA (com.jme3.math.ColorRGBA)4 Geometry (com.jme3.scene.Geometry)4 Mesh (com.jme3.scene.Mesh)4