Search in sources :

Example 1 with Percentile

use of org.apache.commons.math3.stat.descriptive.rank.Percentile in project GDSC-SMLM by aherbert.

the class DoubletAnalysis method summariseResults.

/**
	 * Summarise results.
	 *
	 * @param results
	 *            the results
	 * @param runTime
	 * @param density2
	 */
private void summariseResults(ArrayList<DoubletResult> results, double density, long runTime) {
    // Store results in memory for later analysis
    doubletResults = results;
    // If we are only assessing results with no neighbour candidates
    // TODO - Count the number of actual results that have no neighbours
    numberOfMolecules = this.results.size() - ignored.get();
    // Store details we want in the analysis table
    StringBuilder sb = new StringBuilder();
    sb.append(Utils.rounded(density)).append("\t");
    sb.append(Utils.rounded(getSa())).append("\t");
    sb.append(config.getRelativeFitting()).append("\t");
    sb.append(fitConfig.getFitFunction().toString());
    sb.append(":").append(PeakFit.getSolverName(fitConfig));
    if (fitConfig.getFitSolver() == FitSolver.MLE && fitConfig.isModelCamera()) {
        sb.append(":Camera\t");
        // Add details of the noise model for the MLE
        sb.append("EM=").append(fitConfig.isEmCCD());
        sb.append(":A=").append(Utils.rounded(fitConfig.getAmplification()));
        sb.append(":N=").append(Utils.rounded(fitConfig.getReadNoise()));
        sb.append("\t");
    } else
        sb.append("\t\t");
    analysisPrefix = sb.toString();
    // -=-=-=-=-
    showResults(results, showResults);
    createSummaryTable();
    sb.setLength(0);
    final int n = countN(results);
    // Create the benchmark settings and the fitting settings
    sb.append(numberOfMolecules).append("\t");
    sb.append(n).append("\t");
    sb.append(Utils.rounded(density)).append("\t");
    sb.append(Utils.rounded(simulationParameters.minSignal)).append("\t");
    sb.append(Utils.rounded(simulationParameters.maxSignal)).append("\t");
    sb.append(Utils.rounded(simulationParameters.signalPerFrame)).append("\t");
    sb.append(Utils.rounded(simulationParameters.s)).append("\t");
    sb.append(Utils.rounded(simulationParameters.a)).append("\t");
    sb.append(Utils.rounded(getSa() * simulationParameters.a)).append("\t");
    sb.append(Utils.rounded(simulationParameters.gain)).append("\t");
    sb.append(Utils.rounded(simulationParameters.readNoise)).append("\t");
    sb.append(Utils.rounded(simulationParameters.b)).append("\t");
    // Compute the noise
    double noise = Math.sqrt((simulationParameters.b * ((simulationParameters.emCCD) ? 2 : 1)) / simulationParameters.gain + simulationParameters.readNoise * simulationParameters.readNoise);
    sb.append(Utils.rounded(noise)).append("\t");
    sb.append(Utils.rounded(simulationParameters.signalPerFrame / noise)).append("\t");
    sb.append(config.getRelativeFitting()).append("\t");
    sb.append(fitConfig.getFitFunction().toString());
    sb.append(":").append(PeakFit.getSolverName(fitConfig));
    if (fitConfig.getFitSolver() == FitSolver.MLE && fitConfig.isModelCamera()) {
        sb.append(":Camera\t");
        // Add details of the noise model for the MLE
        sb.append("EM=").append(fitConfig.isEmCCD());
        sb.append(":A=").append(Utils.rounded(fitConfig.getAmplification()));
        sb.append(":N=").append(Utils.rounded(fitConfig.getReadNoise()));
        sb.append("\t");
    } else
        sb.append("\t\t");
    // Now output the actual results ...
    // Show histograms as cumulative to avoid problems with bin width
    // Residuals scores 
    // Iterations and evaluations where fit was OK
    StoredDataStatistics[] stats = new StoredDataStatistics[NAMES2.length];
    for (int i = 0; i < stats.length; i++) stats[i] = new StoredDataStatistics();
    // For Jaccard scoring we need to count the score with no residuals threshold,
    // i.e. Accumulate the score accepting all doublets that were fit 
    double tp = 0;
    double fp = 0;
    double bestTp = 0, bestFp = 0;
    ArrayList<DoubletBonus> data = new ArrayList<DoubletBonus>(results.size());
    for (DoubletResult result : results) {
        final double score = result.getMaxScore();
        // Filter the singles that would be accepted
        if (result.good1) {
            // Filter the doublets that would be accepted
            if (result.good2) {
                final double tp2 = result.tp2a + result.tp2b;
                final double fp2 = result.fp2a + result.fp2b;
                tp += tp2;
                fp += fp2;
                if (result.tp2a > 0.5) {
                    bestTp += result.tp2a;
                    bestFp += result.fp2a;
                }
                if (result.tp2b > 0.5) {
                    bestTp += result.tp2b;
                    bestFp += result.fp2b;
                }
                // Store this as a doublet bonus
                data.add(new DoubletBonus(score, result.getAvScore(), tp2 - result.tp1, fp2 - result.fp1));
            } else {
                // No doublet fit so this will always be the single fit result
                tp += result.tp1;
                fp += result.fp1;
                if (result.tp1 > 0.5) {
                    bestTp += result.tp1;
                    bestFp += result.fp1;
                }
            }
        }
        // Build statistics
        final int c = result.c;
        // True results, i.e. where there was a choice between single or doublet
        if (result.valid) {
            stats[c].add(score);
        }
        // Of those where the fit was good, summarise the iterations and evaluations
        if (result.good1) {
            stats[3].add(result.iter1);
            stats[4].add(result.eval1);
            // about the iteration increase for singles that are not doublets.
            if (c != 0 && result.good2) {
                stats[5].add(result.iter2);
                stats[6].add(result.eval2);
            }
        }
    }
    // Debug the counts
    //		double tpSingle = 0;
    //		double fpSingle = 0;
    //		double tpDoublet = 0;
    //		double fpDoublet = 0;
    //		int nSingle = 0, nDoublet = 0;
    //		for (DoubletResult result : results)
    //		{
    //			if (result.good1)
    //			{
    //				if (result.good2)
    //				{
    //					tpDoublet += result.tp2;
    //					fpDoublet += result.fp2;
    //					nDoublet++;
    //				}
    //				tpSingle += result.tp1;
    //				fpSingle += result.fp1;
    //				nSingle++;
    //			}
    //		}
    //		System.out.printf("Single %.1f,%.1f (%d) : Doublet %.1f,%.1f (%d)\n", tpSingle, fpSingle, nSingle, tpDoublet, fpDoublet, nDoublet*2);
    // Summarise score for true results
    Percentile p = new Percentile(99);
    for (int c = 0; c < stats.length; c++) {
        double[] values = stats[c].getValues();
        // Sorting is need for the percentile and the cumulative histogram so do it once 
        Arrays.sort(values);
        sb.append(Utils.rounded(stats[c].getMean())).append("+/-").append(Utils.rounded(stats[c].getStandardDeviation())).append(" (").append(stats[c].getN()).append(") ").append(Utils.rounded(p.evaluate(values))).append('\t');
        if (showHistograms && displayHistograms[c + NAMES.length])
            showHistogram(values, NAMES2[c]);
    }
    sb.append(MATCHING[matching]).append('\t');
    // Plot a graph of the additional results we would fit at all score thresholds.
    // This assumes we just pick the the doublet if we fit it (NO FILTERING at all!)
    // Initialise the score for residuals 0
    // Store this as it serves as a baseline for the filtering analysis
    computeScores(data, tp, fp, numberOfMolecules, true);
    _residualsScoreMax = this.residualsScore;
    computeScores(data, tp, fp, numberOfMolecules, false);
    _residualsScoreAv = this.residualsScore;
    residualsScore = (useMaxResiduals) ? _residualsScoreMax : _residualsScoreAv;
    if (showJaccardPlot)
        plotJaccard(residualsScore, null);
    String bestJaccard = Utils.rounded(bestTp / (bestFp + numberOfMolecules)) + '\t';
    analysisPrefix += bestJaccard;
    sb.append(bestJaccard);
    addJaccardScores(sb);
    sb.append("\t").append(Utils.timeToString(runTime / 1000000.0));
    summaryTable.append(sb.toString());
}
Also used : Percentile(org.apache.commons.math3.stat.descriptive.rank.Percentile) StoredDataStatistics(gdsc.core.utils.StoredDataStatistics) ArrayList(java.util.ArrayList) PeakResultPoint(gdsc.smlm.ij.plugins.ResultsMatchCalculator.PeakResultPoint) BasePoint(gdsc.core.match.BasePoint)

Example 2 with Percentile

use of org.apache.commons.math3.stat.descriptive.rank.Percentile in project GDSC-SMLM by aherbert.

the class DataEstimator method getPercentile.

/**
	 * Get the percentile value of the data
	 * 
	 * @param percentile
	 *            The percentile
	 * @return the percentile value
	 */
public float getPercentile(double percentile) {
    // Check the input
    if (percentile <= 0)
        percentile = Double.MIN_NORMAL;
    if (percentile > 100)
        percentile = 100;
    // The data should not have NaN so we ignore them for speed.
    final Percentile p = new Percentile(percentile).withNaNStrategy(NaNStrategy.FIXED);
    final int size = width * height;
    final double[] values = new double[size];
    for (int i = 0; i < size; i++) values[i] = data[i];
    return (float) p.evaluate(values);
}
Also used : Percentile(org.apache.commons.math3.stat.descriptive.rank.Percentile)

Example 3 with Percentile

use of org.apache.commons.math3.stat.descriptive.rank.Percentile in project GDSC-SMLM by aherbert.

the class PCPALMMolecules method performDistanceAnalysis.

private void performDistanceAnalysis(double[][] intraHist, int p99) {
    // We want to know the fraction of distances between molecules at the 99th percentile
    // that are intra- rather than inter-molecule.
    // Do single linkage clustering of closest pair at this distance and count the number of 
    // links that are inter and intra.
    // Convert molecules for clustering
    ArrayList<ClusterPoint> points = new ArrayList<ClusterPoint>(molecules.size());
    for (Molecule m : molecules) // Precision was used to store the molecule ID
    points.add(ClusterPoint.newClusterPoint((int) m.precision, m.x, m.y, m.photons));
    ClusteringEngine engine = new ClusteringEngine(Prefs.getThreads(), ClusteringAlgorithm.PARTICLE_SINGLE_LINKAGE, new IJTrackProgress());
    IJ.showStatus("Clustering to check inter-molecule distances");
    engine.setTrackJoins(true);
    ArrayList<Cluster> clusters = engine.findClusters(points, intraHist[0][p99]);
    IJ.showStatus("");
    if (clusters != null) {
        double[] intraIdDistances = engine.getIntraIdDistances();
        double[] interIdDistances = engine.getInterIdDistances();
        int all = interIdDistances.length + intraIdDistances.length;
        log("  * Fraction of inter-molecule particle linkage @ %s nm = %s %%", Utils.rounded(intraHist[0][p99], 4), (all > 0) ? Utils.rounded(100.0 * interIdDistances.length / all, 4) : "0");
        // Show a double cumulative histogram plot
        double[][] intraIdHist = Maths.cumulativeHistogram(intraIdDistances, false);
        double[][] interIdHist = Maths.cumulativeHistogram(interIdDistances, false);
        // Plot
        String title = TITLE + " molecule linkage distance";
        Plot2 plot = new Plot2(title, "Distance", "Frequency", intraIdHist[0], intraIdHist[1]);
        double max = (intraIdHist[1].length > 0) ? intraIdHist[1][intraIdHist[1].length - 1] : 0;
        if (interIdHist[1].length > 0)
            max = FastMath.max(max, interIdHist[1][interIdHist[1].length - 1]);
        plot.setLimits(0, intraIdHist[0][intraIdHist[0].length - 1], 0, max);
        plot.setColor(Color.blue);
        plot.addPoints(interIdHist[0], interIdHist[1], Plot2.LINE);
        plot.setColor(Color.black);
        Utils.display(title, plot);
    } else {
        log("Aborted clustering to check inter-molecule distances");
    }
}
Also used : TDoubleArrayList(gnu.trove.list.array.TDoubleArrayList) ArrayList(java.util.ArrayList) IJTrackProgress(gdsc.core.ij.IJTrackProgress) Cluster(gdsc.core.clustering.Cluster) Plot2(ij.gui.Plot2) WeightedObservedPoint(org.apache.commons.math3.fitting.WeightedObservedPoint) ClusterPoint(gdsc.core.clustering.ClusterPoint) ClusteringEngine(gdsc.core.clustering.ClusteringEngine) ClusterPoint(gdsc.core.clustering.ClusterPoint)

Example 4 with Percentile

use of org.apache.commons.math3.stat.descriptive.rank.Percentile in project GDSC-SMLM by aherbert.

the class PCPALMMolecules method runSimulation.

private void runSimulation(boolean resultsAvailable) {
    if (resultsAvailable && !showSimulationDialog())
        return;
    startLog();
    log("Simulation parameters");
    if (blinkingDistribution == 3) {
        log("  - Clusters = %d", nMolecules);
        log("  - Simulation size = %s um", Utils.rounded(simulationSize, 4));
        log("  - Molecules/cluster = %s", Utils.rounded(blinkingRate, 4));
        log("  - Blinking distribution = %s", BLINKING_DISTRIBUTION[blinkingDistribution]);
        log("  - p-Value = %s", Utils.rounded(p, 4));
    } else {
        log("  - Molecules = %d", nMolecules);
        log("  - Simulation size = %s um", Utils.rounded(simulationSize, 4));
        log("  - Blinking rate = %s", Utils.rounded(blinkingRate, 4));
        log("  - Blinking distribution = %s", BLINKING_DISTRIBUTION[blinkingDistribution]);
    }
    log("  - Average precision = %s nm", Utils.rounded(sigmaS, 4));
    log("  - Clusters simulation = " + CLUSTER_SIMULATION[clusterSimulation]);
    if (clusterSimulation > 0) {
        log("  - Cluster number = %s +/- %s", Utils.rounded(clusterNumber, 4), Utils.rounded(clusterNumberSD, 4));
        log("  - Cluster radius = %s nm", Utils.rounded(clusterRadius, 4));
    }
    final double nmPerPixel = 100;
    double width = simulationSize * 1000.0;
    // Allow a border of 3 x sigma for +/- precision
    //if (blinkingRate > 1)
    width -= 3 * sigmaS;
    RandomGenerator randomGenerator = new Well19937c(System.currentTimeMillis() + System.identityHashCode(this));
    RandomDataGenerator dataGenerator = new RandomDataGenerator(randomGenerator);
    UniformDistribution dist = new UniformDistribution(null, new double[] { width, width, 0 }, randomGenerator.nextInt());
    molecules = new ArrayList<Molecule>(nMolecules);
    // Create some dummy results since the calibration is required for later analysis
    results = new MemoryPeakResults();
    results.setCalibration(new gdsc.smlm.results.Calibration(nmPerPixel, 1, 100));
    results.setSource(new NullSource("Molecule Simulation"));
    results.begin();
    int count = 0;
    // Generate a sequence of coordinates
    ArrayList<double[]> xyz = new ArrayList<double[]>((int) (nMolecules * 1.1));
    Statistics statsRadius = new Statistics();
    Statistics statsSize = new Statistics();
    String maskTitle = TITLE + " Cluster Mask";
    ByteProcessor bp = null;
    double maskScale = 0;
    if (clusterSimulation > 0) {
        // Simulate clusters.
        // Note: In the Veatch et al. paper (Plos 1, e31457) correlation functions are built using circles
        // with small radii of 4-8 Arbitrary Units (AU) or large radii of 10-30 AU. A fluctuations model is
        // created at T = 1.075 Tc. It is not clear exactly how the particles are distributed.
        // It may be that a mask is created first using the model. The particles are placed on the mask using
        // a specified density. This simulation produces a figure to show either a damped cosine function
        // (circles) or an exponential (fluctuations). The number of particles in each circle may be randomly
        // determined just by density. The figure does not discuss the derivation of the cluster size 
        // statistic.
        // 
        // If this plugin simulation is run with a uniform distribution and blinking rate of 1 then the damped
        // cosine function is reproduced. The curve crosses g(r)=1 at a value equivalent to the average
        // distance to the centre-of-mass of each drawn cluster, not the input cluster radius parameter (which 
        // is a hard upper limit on the distance to centre).
        final int maskSize = lowResolutionImageSize;
        int[] mask = null;
        // scale is in nm/pixel
        maskScale = width / maskSize;
        ArrayList<double[]> clusterCentres = new ArrayList<double[]>();
        int totalSteps = 1 + (int) Math.ceil(nMolecules / clusterNumber);
        if (clusterSimulation == 2 || clusterSimulation == 3) {
            // Clusters are non-overlapping circles
            // Ensure the circles do not overlap by using an exclusion mask that accumulates 
            // out-of-bounds pixels by drawing the last cluster (plus some border) on an image. When no
            // more pixels are available then stop generating molecules.
            // This is done by cumulatively filling a mask and using the MaskDistribution to select 
            // a new point. This may be slow but it works.
            // TODO - Allow clusters of different sizes...
            mask = new int[maskSize * maskSize];
            Arrays.fill(mask, 255);
            MaskDistribution maskDistribution = new MaskDistribution(mask, maskSize, maskSize, 0, maskScale, maskScale, randomGenerator);
            double[] centre;
            IJ.showStatus("Computing clusters mask");
            int roiRadius = (int) Math.round((clusterRadius * 2) / maskScale);
            if (clusterSimulation == 3) {
                // Generate a mask of circles then sample from that.
                // If we want to fill the mask completely then adjust the total steps to be the number of 
                // circles that can fit inside the mask.
                totalSteps = (int) (maskSize * maskSize / (Math.PI * Math.pow(clusterRadius / maskScale, 2)));
            }
            while ((centre = maskDistribution.next()) != null && clusterCentres.size() < totalSteps) {
                IJ.showProgress(clusterCentres.size(), totalSteps);
                // The mask returns the coordinates with the centre of the image at 0,0
                centre[0] += width / 2;
                centre[1] += width / 2;
                clusterCentres.add(centre);
                // Fill in the mask around the centre to exclude any more circles that could overlap
                double cx = centre[0] / maskScale;
                double cy = centre[1] / maskScale;
                fillMask(mask, maskSize, (int) cx, (int) cy, roiRadius, 0);
                //Utils.display("Mask", new ColorProcessor(maskSize, maskSize, mask));
                try {
                    maskDistribution = new MaskDistribution(mask, maskSize, maskSize, 0, maskScale, maskScale, randomGenerator);
                } catch (IllegalArgumentException e) {
                    // This can happen when there are no more non-zero pixels
                    log("WARNING: No more room for clusters on the mask area (created %d of estimated %d)", clusterCentres.size(), totalSteps);
                    break;
                }
            }
            IJ.showProgress(1);
            IJ.showStatus("");
        } else {
            // Pick centres randomly from the distribution 
            while (clusterCentres.size() < totalSteps) clusterCentres.add(dist.next());
        }
        if (showClusterMask || clusterSimulation == 3) {
            // Show the mask for the clusters
            if (mask == null)
                mask = new int[maskSize * maskSize];
            else
                Arrays.fill(mask, 0);
            int roiRadius = (int) Math.round((clusterRadius) / maskScale);
            for (double[] c : clusterCentres) {
                double cx = c[0] / maskScale;
                double cy = c[1] / maskScale;
                fillMask(mask, maskSize, (int) cx, (int) cy, roiRadius, 1);
            }
            if (clusterSimulation == 3) {
                // We have the mask. Now pick points at random from the mask.
                MaskDistribution maskDistribution = new MaskDistribution(mask, maskSize, maskSize, 0, maskScale, maskScale, randomGenerator);
                // Allocate each molecule position to a parent circle so defining clusters.
                int[][] clusters = new int[clusterCentres.size()][];
                int[] clusterSize = new int[clusters.length];
                for (int i = 0; i < nMolecules; i++) {
                    double[] centre = maskDistribution.next();
                    // The mask returns the coordinates with the centre of the image at 0,0
                    centre[0] += width / 2;
                    centre[1] += width / 2;
                    xyz.add(centre);
                    // Output statistics on cluster size and number.
                    // TODO - Finding the closest cluster could be done better than an all-vs-all comparison
                    double max = distance2(centre, clusterCentres.get(0));
                    int cluster = 0;
                    for (int j = 1; j < clusterCentres.size(); j++) {
                        double d2 = distance2(centre, clusterCentres.get(j));
                        if (d2 < max) {
                            max = d2;
                            cluster = j;
                        }
                    }
                    // Assign point i to cluster
                    centre[2] = cluster;
                    if (clusterSize[cluster] == 0) {
                        clusters[cluster] = new int[10];
                    }
                    if (clusters[cluster].length <= clusterSize[cluster]) {
                        clusters[cluster] = Arrays.copyOf(clusters[cluster], (int) (clusters[cluster].length * 1.5));
                    }
                    clusters[cluster][clusterSize[cluster]++] = i;
                }
                // Generate real cluster size statistics
                for (int j = 0; j < clusterSize.length; j++) {
                    final int size = clusterSize[j];
                    if (size == 0)
                        continue;
                    statsSize.add(size);
                    if (size == 1) {
                        statsRadius.add(0);
                        continue;
                    }
                    // Find centre of cluster and add the distance to each point
                    double[] com = new double[2];
                    for (int n = 0; n < size; n++) {
                        double[] xy = xyz.get(clusters[j][n]);
                        for (int k = 0; k < 2; k++) com[k] += xy[k];
                    }
                    for (int k = 0; k < 2; k++) com[k] /= size;
                    for (int n = 0; n < size; n++) {
                        double dx = xyz.get(clusters[j][n])[0] - com[0];
                        double dy = xyz.get(clusters[j][n])[1] - com[1];
                        statsRadius.add(Math.sqrt(dx * dx + dy * dy));
                    }
                }
            }
            if (showClusterMask) {
                bp = new ByteProcessor(maskSize, maskSize);
                for (int i = 0; i < mask.length; i++) if (mask[i] != 0)
                    bp.set(i, 128);
                Utils.display(maskTitle, bp);
            }
        }
        // Use the simulated cluster centres to create clusters of the desired size
        if (clusterSimulation == 1 || clusterSimulation == 2) {
            for (double[] clusterCentre : clusterCentres) {
                int clusterN = (int) Math.round((clusterNumberSD > 0) ? dataGenerator.nextGaussian(clusterNumber, clusterNumberSD) : clusterNumber);
                if (clusterN < 1)
                    continue;
                //double[] clusterCentre = dist.next();
                if (clusterN == 1) {
                    // No need for a cluster around a point
                    xyz.add(clusterCentre);
                    statsRadius.add(0);
                    statsSize.add(1);
                } else {
                    // Generate N random points within a circle of the chosen cluster radius.
                    // Locate the centre-of-mass and the average distance to the centre.
                    double[] com = new double[3];
                    int j = 0;
                    while (j < clusterN) {
                        // Generate a random point within a circle uniformly
                        // http://stackoverflow.com/questions/5837572/generate-a-random-point-within-a-circle-uniformly
                        double t = 2.0 * Math.PI * randomGenerator.nextDouble();
                        double u = randomGenerator.nextDouble() + randomGenerator.nextDouble();
                        double r = clusterRadius * ((u > 1) ? 2 - u : u);
                        double x = r * Math.cos(t);
                        double y = r * Math.sin(t);
                        double[] xy = new double[] { clusterCentre[0] + x, clusterCentre[1] + y };
                        xyz.add(xy);
                        for (int k = 0; k < 2; k++) com[k] += xy[k];
                        j++;
                    }
                    // Add the distance of the points from the centre of the cluster.
                    // Note this does not account for the movement due to precision.
                    statsSize.add(j);
                    if (j == 1) {
                        statsRadius.add(0);
                    } else {
                        for (int k = 0; k < 2; k++) com[k] /= j;
                        while (j > 0) {
                            double dx = xyz.get(xyz.size() - j)[0] - com[0];
                            double dy = xyz.get(xyz.size() - j)[1] - com[1];
                            statsRadius.add(Math.sqrt(dx * dx + dy * dy));
                            j--;
                        }
                    }
                }
            }
        }
    } else {
        // Random distribution
        for (int i = 0; i < nMolecules; i++) xyz.add(dist.next());
    }
    // The Gaussian sigma should be applied so the overall distance from the centre
    // ( sqrt(x^2+y^2) ) has a standard deviation of sigmaS?
    final double sigma1D = sigmaS / Math.sqrt(2);
    // Show optional histograms
    StoredDataStatistics intraDistances = null;
    StoredData blinks = null;
    if (showHistograms) {
        int capacity = (int) (xyz.size() * blinkingRate);
        intraDistances = new StoredDataStatistics(capacity);
        blinks = new StoredData(capacity);
    }
    Statistics statsSigma = new Statistics();
    for (int i = 0; i < xyz.size(); i++) {
        int nOccurrences = getBlinks(dataGenerator, blinkingRate);
        if (showHistograms)
            blinks.add(nOccurrences);
        final int size = molecules.size();
        // Get coordinates in nm
        final double[] moleculeXyz = xyz.get(i);
        if (bp != null && nOccurrences > 0) {
            bp.putPixel((int) Math.round(moleculeXyz[0] / maskScale), (int) Math.round(moleculeXyz[1] / maskScale), 255);
        }
        while (nOccurrences-- > 0) {
            final double[] localisationXy = Arrays.copyOf(moleculeXyz, 2);
            // Add random precision
            if (sigma1D > 0) {
                final double dx = dataGenerator.nextGaussian(0, sigma1D);
                final double dy = dataGenerator.nextGaussian(0, sigma1D);
                localisationXy[0] += dx;
                localisationXy[1] += dy;
                if (!dist.isWithinXY(localisationXy))
                    continue;
                // Calculate mean-squared displacement
                statsSigma.add(dx * dx + dy * dy);
            }
            final double x = localisationXy[0];
            final double y = localisationXy[1];
            molecules.add(new Molecule(x, y, i, 1));
            // Store in pixels
            float[] params = new float[7];
            params[Gaussian2DFunction.X_POSITION] = (float) (x / nmPerPixel);
            params[Gaussian2DFunction.Y_POSITION] = (float) (y / nmPerPixel);
            results.addf(i + 1, (int) x, (int) y, 0, 0, 0, params, null);
        }
        if (molecules.size() > size) {
            count++;
            if (showHistograms) {
                int newCount = molecules.size() - size;
                if (newCount == 1) {
                    //intraDistances.add(0);
                    continue;
                }
                // Get the distance matrix between these molecules
                double[][] matrix = new double[newCount][newCount];
                for (int ii = size, x = 0; ii < molecules.size(); ii++, x++) {
                    for (int jj = size + 1, y = 1; jj < molecules.size(); jj++, y++) {
                        final double d2 = molecules.get(ii).distance2(molecules.get(jj));
                        matrix[x][y] = matrix[y][x] = d2;
                    }
                }
                // Get the maximum distance for particle linkage clustering of this molecule
                double max = 0;
                for (int x = 0; x < newCount; x++) {
                    // Compare to all-other molecules and get the minimum distance 
                    // needed to join at least one
                    double linkDistance = Double.POSITIVE_INFINITY;
                    for (int y = 0; y < newCount; y++) {
                        if (x == y)
                            continue;
                        if (matrix[x][y] < linkDistance)
                            linkDistance = matrix[x][y];
                    }
                    // Check if this is larger 
                    if (max < linkDistance)
                        max = linkDistance;
                }
                intraDistances.add(Math.sqrt(max));
            }
        }
    }
    results.end();
    if (bp != null)
        Utils.display(maskTitle, bp);
    // Used for debugging
    //System.out.printf("  * Molecules = %d (%d activated)\n", xyz.size(), count);
    //if (clusterSimulation > 0)
    //	System.out.printf("  * Cluster number = %s +/- %s. Radius = %s +/- %s\n",
    //			Utils.rounded(statsSize.getMean(), 4), Utils.rounded(statsSize.getStandardDeviation(), 4),
    //			Utils.rounded(statsRadius.getMean(), 4), Utils.rounded(statsRadius.getStandardDeviation(), 4));
    log("Simulation results");
    log("  * Molecules = %d (%d activated)", xyz.size(), count);
    log("  * Blinking rate = %s", Utils.rounded((double) molecules.size() / xyz.size(), 4));
    log("  * Precision (Mean-displacement) = %s nm", (statsSigma.getN() > 0) ? Utils.rounded(Math.sqrt(statsSigma.getMean()), 4) : "0");
    if (showHistograms) {
        if (intraDistances.getN() == 0) {
            log("  * Mean Intra-Molecule particle linkage distance = 0 nm");
            log("  * Fraction of inter-molecule particle linkage @ 0 nm = 0 %%");
        } else {
            plot(blinks, "Blinks/Molecule", true);
            double[][] intraHist = plot(intraDistances, "Intra-molecule particle linkage distance", false);
            // Determine 95th and 99th percentile
            int p99 = intraHist[0].length - 1;
            double limit1 = 0.99 * intraHist[1][p99];
            double limit2 = 0.95 * intraHist[1][p99];
            while (intraHist[1][p99] > limit1 && p99 > 0) p99--;
            int p95 = p99;
            while (intraHist[1][p95] > limit2 && p95 > 0) p95--;
            log("  * Mean Intra-Molecule particle linkage distance = %s nm (95%% = %s, 99%% = %s, 100%% = %s)", Utils.rounded(intraDistances.getMean(), 4), Utils.rounded(intraHist[0][p95], 4), Utils.rounded(intraHist[0][p99], 4), Utils.rounded(intraHist[0][intraHist[0].length - 1], 4));
            if (distanceAnalysis) {
                performDistanceAnalysis(intraHist, p99);
            }
        }
    }
    if (clusterSimulation > 0) {
        log("  * Cluster number = %s +/- %s", Utils.rounded(statsSize.getMean(), 4), Utils.rounded(statsSize.getStandardDeviation(), 4));
        log("  * Cluster radius = %s +/- %s nm (mean distance to centre-of-mass)", Utils.rounded(statsRadius.getMean(), 4), Utils.rounded(statsRadius.getStandardDeviation(), 4));
    }
}
Also used : ByteProcessor(ij.process.ByteProcessor) TDoubleArrayList(gnu.trove.list.array.TDoubleArrayList) ArrayList(java.util.ArrayList) MaskDistribution(gdsc.smlm.model.MaskDistribution) Well19937c(org.apache.commons.math3.random.Well19937c) RandomGenerator(org.apache.commons.math3.random.RandomGenerator) MemoryPeakResults(gdsc.smlm.results.MemoryPeakResults) NullSource(gdsc.smlm.results.NullSource) RandomDataGenerator(org.apache.commons.math3.random.RandomDataGenerator) UniformDistribution(gdsc.smlm.model.UniformDistribution) StoredDataStatistics(gdsc.core.utils.StoredDataStatistics) Statistics(gdsc.core.utils.Statistics) StoredDataStatistics(gdsc.core.utils.StoredDataStatistics) DescriptiveStatistics(org.apache.commons.math3.stat.descriptive.DescriptiveStatistics) WeightedObservedPoint(org.apache.commons.math3.fitting.WeightedObservedPoint) ClusterPoint(gdsc.core.clustering.ClusterPoint) StoredData(gdsc.core.utils.StoredData)

Example 5 with Percentile

use of org.apache.commons.math3.stat.descriptive.rank.Percentile in project gatk by broadinstitute.

the class HDF5PCACoveragePoNCreationUtils method subsetReadCountsToUsableTargets.

/**
     * Subsets targets in the input count to the usable ones based on the percentile threshold indicated
     * by the user.
     *
     * <p>
     *     It returns a pair of object, where the left one is the updated read-counts with only the usable
     *     targets, and the right one is the corresponding target factors.
     * </p>
     *
     * @param readCounts the input read-counts.
     * @param targetFactorPercentileThreshold the minimum median count percentile under which targets are not considered useful.
     * @return never {@code null}.
     */
@VisibleForTesting
static Pair<ReadCountCollection, double[]> subsetReadCountsToUsableTargets(final ReadCountCollection readCounts, final double targetFactorPercentileThreshold, final Logger logger) {
    final double[] targetFactors = calculateTargetFactors(readCounts);
    final double threshold = new Percentile(targetFactorPercentileThreshold).evaluate(targetFactors);
    final List<Target> targetByIndex = readCounts.targets();
    final Set<Target> result = IntStream.range(0, targetFactors.length).filter(i -> targetFactors[i] >= threshold).mapToObj(targetByIndex::get).collect(Collectors.toCollection(LinkedHashSet::new));
    if (result.size() == targetByIndex.size()) {
        logger.info(String.format("All %d targets are kept", targetByIndex.size()));
        return new ImmutablePair<>(readCounts, targetFactors);
    } else {
        final int discardedCount = targetFactors.length - result.size();
        logger.info(String.format("Discarded %d target(s) out of %d with factors below %.2g (%.2f percentile)", discardedCount, targetFactors.length, threshold, targetFactorPercentileThreshold));
        final double[] targetFactorSubset = DoubleStream.of(targetFactors).filter(i -> i >= threshold).toArray();
        return new ImmutablePair<>(readCounts.subsetTargets(result), targetFactorSubset);
    }
}
Also used : IntStream(java.util.stream.IntStream) DefaultRealMatrixChangingVisitor(org.apache.commons.math3.linear.DefaultRealMatrixChangingVisitor) SVD(org.broadinstitute.hellbender.utils.svd.SVD) java.util(java.util) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) MatrixSummaryUtils(org.broadinstitute.hellbender.utils.MatrixSummaryUtils) ParamUtils(org.broadinstitute.hellbender.utils.param.ParamUtils) Pair(org.apache.commons.lang3.tuple.Pair) Median(org.apache.commons.math3.stat.descriptive.rank.Median) HDF5File(org.broadinstitute.hdf5.HDF5File) IOUtils(org.broadinstitute.hellbender.utils.io.IOUtils) org.broadinstitute.hellbender.tools.exome(org.broadinstitute.hellbender.tools.exome) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) File(java.io.File) DoubleStream(java.util.stream.DoubleStream) Percentile(org.apache.commons.math3.stat.descriptive.rank.Percentile) Logger(org.apache.logging.log4j.Logger) MathUtils(org.broadinstitute.hellbender.utils.MathUtils) UserException(org.broadinstitute.hellbender.exceptions.UserException) SVDFactory(org.broadinstitute.hellbender.utils.svd.SVDFactory) Utils(org.broadinstitute.hellbender.utils.Utils) RealMatrix(org.apache.commons.math3.linear.RealMatrix) VisibleForTesting(com.google.common.annotations.VisibleForTesting) LogManager(org.apache.logging.log4j.LogManager) Percentile(org.apache.commons.math3.stat.descriptive.rank.Percentile) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

Percentile (org.apache.commons.math3.stat.descriptive.rank.Percentile)31 ArrayList (java.util.ArrayList)16 RealMatrix (org.apache.commons.math3.linear.RealMatrix)16 Array2DRowRealMatrix (org.apache.commons.math3.linear.Array2DRowRealMatrix)14 List (java.util.List)11 Collectors (java.util.stream.Collectors)11 IntStream (java.util.stream.IntStream)11 File (java.io.File)10 DoubleStream (java.util.stream.DoubleStream)10 Median (org.apache.commons.math3.stat.descriptive.rank.Median)10 Logger (org.apache.logging.log4j.Logger)10 Test (org.testng.annotations.Test)10 Random (java.util.Random)9 Stream (java.util.stream.Stream)9 DescriptiveStatistics (org.apache.commons.math3.stat.descriptive.DescriptiveStatistics)9 Level (org.apache.logging.log4j.Level)8 Marker (org.apache.logging.log4j.Marker)8 Message (org.apache.logging.log4j.message.Message)8 AbstractLogger (org.apache.logging.log4j.spi.AbstractLogger)8 SimpleInterval (org.broadinstitute.hellbender.utils.SimpleInterval)8