Search in sources :

Example 36 with Sum

use of org.apache.commons.math3.stat.descriptive.summary.Sum in project GDSC-SMLM by aherbert.

the class AiryPSFModel method createAiryDistribution.

private static synchronized void createAiryDistribution() {
    if (spline != null)
        return;
    final double relativeAccuracy = 1e-4;
    final double absoluteAccuracy = 1e-8;
    final int minimalIterationCount = 3;
    final int maximalIterationCount = 32;
    UnivariateIntegrator integrator = new SimpsonIntegrator(relativeAccuracy, absoluteAccuracy, minimalIterationCount, maximalIterationCount);
    UnivariateFunction f = new UnivariateFunction() {

        public double value(double x) {
            //return AiryPattern.intensity(x) * 2 * Math.PI * x / (4 * Math.PI);
            return AiryPattern.intensity(x) * 0.5 * x;
        }
    };
    // Integrate up to a set number of dark rings
    int samples = 1000;
    final double step = RINGS[SAMPLE_RINGS] / samples;
    double to = 0, from = 0;
    r = new double[samples + 1];
    sum = new double[samples + 1];
    for (int i = 1; i < sum.length; i++) {
        from = to;
        r[i] = to = step * i;
        sum[i] = integrator.integrate(2000, f, from, to) + sum[i - 1];
    }
    if (DoubleEquality.relativeError(sum[samples], POWER[SAMPLE_RINGS]) > 1e-3)
        throw new RuntimeException("Failed to create the Airy distribution");
    SplineInterpolator si = new SplineInterpolator();
    spline = si.interpolate(sum, r);
}
Also used : SimpsonIntegrator(org.apache.commons.math3.analysis.integration.SimpsonIntegrator) UnivariateIntegrator(org.apache.commons.math3.analysis.integration.UnivariateIntegrator) UnivariateFunction(org.apache.commons.math3.analysis.UnivariateFunction) SplineInterpolator(org.apache.commons.math3.analysis.interpolation.SplineInterpolator)

Example 37 with Sum

use of org.apache.commons.math3.stat.descriptive.summary.Sum in project GDSC-SMLM by aherbert.

the class GradientCalculatorSpeedTest method mleGradientCalculatorComputesLikelihood.

@Test
public void mleGradientCalculatorComputesLikelihood() {
    //@formatter:off
    NonLinearFunction func = new NonLinearFunction() {

        double u;

        public void initialise(double[] a) {
            u = a[0];
        }

        public int[] gradientIndices() {
            return null;
        }

        public double eval(int x, double[] dyda) {
            return 0;
        }

        public double eval(int x) {
            return u;
        }

        public double eval(int x, double[] dyda, double[] w) {
            return 0;
        }

        public double evalw(int x, double[] w) {
            return 0;
        }

        public boolean canComputeWeights() {
            return false;
        }

        public int getNumberOfGradients() {
            return 0;
        }
    };
    //@formatter:on
    int[] xx = Utils.newArray(100, 0, 1);
    double[] xxx = Utils.newArray(100, 0, 1.0);
    for (double u : new double[] { 0.79, 2.5, 5.32 }) {
        double ll = 0;
        PoissonDistribution pd = new PoissonDistribution(u);
        for (int x : xx) {
            double o = MLEGradientCalculator.likelihood(u, x);
            double e = pd.probability(x);
            Assert.assertEquals("likelihood", e, o, e * 1e-10);
            o = MLEGradientCalculator.logLikelihood(u, x);
            e = pd.logProbability(x);
            Assert.assertEquals("log likelihood", e, o, Math.abs(e) * 1e-10);
            ll += e;
        }
        MLEGradientCalculator gc = new MLEGradientCalculator(1);
        double o = gc.logLikelihood(xxx, new double[] { u }, func);
        Assert.assertEquals("sum log likelihood", ll, o, Math.abs(ll) * 1e-10);
    }
}
Also used : PoissonDistribution(org.apache.commons.math3.distribution.PoissonDistribution) NonLinearFunction(gdsc.smlm.function.NonLinearFunction) Test(org.junit.Test)

Example 38 with Sum

use of org.apache.commons.math3.stat.descriptive.summary.Sum in project GDSC-SMLM by aherbert.

the class SCMOSLikelihoodWrapperTest method instanceLikelihoodMatches.

private void instanceLikelihoodMatches(final double mu, boolean test) {
    // Determine upper limit for a Poisson
    int limit = new PoissonDistribution(mu).inverseCumulativeProbability(P_LIMIT);
    // Map to observed values using the gain and offset
    double max = limit * G;
    double step = 0.1;
    int n = (int) Math.ceil(max / step);
    // Evaluate all values from (zero+offset) to large n
    double[] k = Utils.newArray(n, O, step);
    double[] a = new double[0];
    double[] gradient = new double[0];
    float[] var = newArray(n, VAR);
    float[] g = newArray(n, G);
    float[] o = newArray(n, O);
    NonLinearFunction nlf = new NonLinearFunction() {

        public void initialise(double[] a) {
        }

        public int[] gradientIndices() {
            return new int[0];
        }

        public double eval(int x, double[] dyda, double[] w) {
            return 0;
        }

        public double eval(int x) {
            return mu;
        }

        public double eval(int x, double[] dyda) {
            return mu;
        }

        public boolean canComputeWeights() {
            return false;
        }

        public double evalw(int x, double[] w) {
            return 0;
        }

        public int getNumberOfGradients() {
            return 0;
        }
    };
    SCMOSLikelihoodWrapper f = new SCMOSLikelihoodWrapper(nlf, a, k, n, var, g, o);
    double total = 0, p = 0;
    double maxp = 0;
    int maxi = 0;
    for (int i = 0; i < n; i++) {
        double nll = f.computeLikelihood(i);
        double nll2 = f.computeLikelihood(gradient, i);
        double nll3 = SCMOSLikelihoodWrapper.negativeLogLikelihood(mu, var[i], g[i], o[i], k[i]);
        total += nll;
        Assert.assertEquals("computeLikelihood @" + i, nll3, nll, nll * 1e-10);
        Assert.assertEquals("computeLikelihood+gradient @" + i, nll3, nll2, nll * 1e-10);
        double pp = FastMath.exp(-nll);
        if (maxp < pp) {
            maxp = pp;
            maxi = i;
        //System.out.printf("mu=%f, e=%f, k=%f, pp=%f\n", mu, mu * G + O, k[i], pp);
        }
        p += pp * step;
    }
    // Expected max of the distribution is the mode of the Poisson distribution.
    // This has two modes for integer input counts. We take the mean of those.
    // https://en.wikipedia.org/wiki/Poisson_distribution
    // Note that the shift of VAR/(G*G) is a constant applied to both the expected and
    // observed values and consequently cancels when predicting the max, i.e. we add
    // a constant count to the observed values and shift the distribution by the same
    // constant. We can thus compute the mode for the unshifted distribution.
    double lambda = mu;
    double mode1 = Math.floor(lambda);
    double mode2 = Math.ceil(lambda) - 1;
    // Scale to observed values
    double kmax = ((mode1 + mode2) * 0.5) * G + O;
    //System.out.printf("mu=%f, p=%f, maxp=%f @ %f  (expected=%f  %f)\n", mu, p, maxp, k[maxi], kmax, kmax - k[maxi]);
    Assert.assertEquals("k-max", kmax, k[maxi], kmax * 1e-3);
    if (test) {
        Assert.assertEquals(String.format("mu=%f", mu), P_LIMIT, p, 0.02);
    }
    // Check the function can compute the same total
    double delta = total * 1e-10;
    double sum, sum2;
    sum = f.computeLikelihood();
    sum2 = f.computeLikelihood(gradient);
    Assert.assertEquals("computeLikelihood", total, sum, delta);
    Assert.assertEquals("computeLikelihood with gradient", total, sum2, delta);
    // Check the function can compute the same total after duplication
    f = f.build(nlf, a);
    sum = f.computeLikelihood();
    sum2 = f.computeLikelihood(gradient);
    Assert.assertEquals("computeLikelihood", total, sum, delta);
    Assert.assertEquals("computeLikelihood with gradient", total, sum2, delta);
}
Also used : PoissonDistribution(org.apache.commons.math3.distribution.PoissonDistribution)

Example 39 with Sum

use of org.apache.commons.math3.stat.descriptive.summary.Sum in project GDSC-SMLM by aherbert.

the class PoissonLikelihoodWrapperTest method cumulativeProbabilityIsOneFromLikelihood.

private void cumulativeProbabilityIsOneFromLikelihood(final double mu) {
    // Determine upper limit for a Poisson
    int limit = new PoissonDistribution(mu).inverseCumulativeProbability(0.999);
    // Expand to allow for the gain
    int n = (int) Math.ceil(limit / alpha);
    // Evaluate all values from zero to large n
    double[] k = Utils.newArray(n, 0, 1.0);
    double[] a = new double[0];
    double[] g = new double[0];
    NonLinearFunction nlf = new NonLinearFunction() {

        public void initialise(double[] a) {
        }

        public int[] gradientIndices() {
            return new int[0];
        }

        public double eval(int x, double[] dyda, double[] w) {
            return 0;
        }

        public double eval(int x) {
            return mu / alpha;
        }

        public double eval(int x, double[] dyda) {
            return mu / alpha;
        }

        public boolean canComputeWeights() {
            return false;
        }

        public double evalw(int x, double[] w) {
            return 0;
        }

        public int getNumberOfGradients() {
            return 0;
        }
    };
    PoissonLikelihoodWrapper f = new PoissonLikelihoodWrapper(nlf, a, Arrays.copyOf(k, n), n, alpha);
    // Keep evaluating up until no difference
    final double changeTolerance = 1e-6;
    double total = 0;
    double p = 0;
    int i = 0;
    while (i < n) {
        double nll = f.computeLikelihood(i);
        double nll2 = f.computeLikelihood(g, i);
        i++;
        Assert.assertEquals("computeLikelihood(i)", nll, nll2, 1e-10);
        total += nll;
        double pp = FastMath.exp(-nll);
        //System.out.printf("mu=%f, o=%f, i=%d, pp=%f\n", mu, mu / alpha, i, pp);
        p += pp;
        if (p > 0.5 && pp / p < changeTolerance)
            break;
    }
    System.out.printf("mu=%f, limit=%d, p=%f\n", mu, limit, p);
    Assert.assertEquals(String.format("mu=%f", mu), 1, p, 0.02);
    // Check the function can compute the same total
    f = new PoissonLikelihoodWrapper(nlf, a, k, i, alpha);
    double sum = f.computeLikelihood();
    double sum2 = f.computeLikelihood(g);
    double delta = total * 1e-10;
    Assert.assertEquals("computeLikelihood", total, sum, delta);
    Assert.assertEquals("computeLikelihood with gradient", total, sum2, delta);
}
Also used : PoissonDistribution(org.apache.commons.math3.distribution.PoissonDistribution)

Example 40 with Sum

use of org.apache.commons.math3.stat.descriptive.summary.Sum in project GDSC-SMLM by aherbert.

the class FIRE method runQEstimation.

private void runQEstimation() {
    IJ.showStatus(TITLE + " ...");
    if (!showQEstimationInputDialog())
        return;
    MemoryPeakResults results = ResultsManager.loadInputResults(inputOption, false);
    if (results == null || results.size() == 0) {
        IJ.error(TITLE, "No results could be loaded");
        return;
    }
    if (results.getCalibration() == null) {
        IJ.error(TITLE, "The results are not calibrated");
        return;
    }
    results = cropToRoi(results);
    if (results.size() < 2) {
        IJ.error(TITLE, "No results within the crop region");
        return;
    }
    initialise(results, null);
    // We need localisation precision.
    // Build a histogram of the localisation precision.
    // Get the initial mean and SD and plot as a Gaussian.
    PrecisionHistogram histogram = calculatePrecisionHistogram();
    if (histogram == null) {
        IJ.error(TITLE, "No localisation precision available.\n \nPlease choose " + PrecisionMethod.FIXED + " and enter a precision mean and SD.");
        return;
    }
    StoredDataStatistics precision = histogram.precision;
    //String name = results.getName();
    double fourierImageScale = SCALE_VALUES[imageScaleIndex];
    int imageSize = IMAGE_SIZE_VALUES[imageSizeIndex];
    // Create the image and compute the numerator of FRC. 
    // Do not use the signal so results.size() is the number of localisations.
    IJ.showStatus("Computing FRC curve ...");
    FireImages images = createImages(fourierImageScale, imageSize, false);
    // DEBUGGING - Save the two images to disk. Load the images into the Matlab 
    // code that calculates the Q-estimation and make this plugin match the functionality.
    //IJ.save(new ImagePlus("i1", images.ip1), "/scratch/i1.tif");
    //IJ.save(new ImagePlus("i2", images.ip2), "/scratch/i2.tif");
    FRC frc = new FRC();
    frc.progress = progress;
    frc.setFourierMethod(fourierMethod);
    frc.setSamplingMethod(samplingMethod);
    frc.setPerimeterSamplingFactor(perimeterSamplingFactor);
    FRCCurve frcCurve = frc.calculateFrcCurve(images.ip1, images.ip2, images.nmPerPixel);
    if (frcCurve == null) {
        IJ.error(TITLE, "Failed to compute FRC curve");
        return;
    }
    IJ.showStatus("Running Q-estimation ...");
    // Note:
    // The method implemented here is based on Matlab code provided by Bernd Rieger.
    // The idea is to compute the spurious correlation component of the FRC Numerator
    // using an initial estimate of distribution of the localisation precision (assumed 
    // to be Gaussian). This component is the contribution of repeat localisations of 
    // the same molecule to the numerator and is modelled as an exponential decay
    // (exp_decay). The component is scaled by the Q-value which
    // is the average number of times a molecule is seen in addition to the first time.
    // At large spatial frequencies the scaled component should match the numerator,
    // i.e. at high resolution (low FIRE number) the numerator is made up of repeat 
    // localisations of the same molecule and not actual structure in the image.
    // The best fit is where the numerator equals the scaled component, i.e. num / (q*exp_decay) == 1.
    // The FRC Numerator is plotted and Q can be determined by
    // adjusting Q and the precision mean and SD to maximise the cost function.
    // This can be done interactively by the user with the effect on the FRC curve
    // dynamically updated and displayed.
    // Compute the scaled FRC numerator
    double qNorm = (1 / frcCurve.mean1 + 1 / frcCurve.mean2);
    double[] frcnum = new double[frcCurve.getSize()];
    for (int i = 0; i < frcnum.length; i++) {
        FRCCurveResult r = frcCurve.get(i);
        frcnum[i] = qNorm * r.getNumerator() / r.getNumberOfSamples();
    }
    // Compute the spatial frequency and the region for curve fitting
    double[] q = FRC.computeQ(frcCurve, false);
    int low = 0, high = q.length;
    while (high > 0 && q[high - 1] > maxQ) high--;
    while (low < q.length && q[low] < minQ) low++;
    // Require we fit at least 10% of the curve
    if (high - low < q.length * 0.1) {
        IJ.error(TITLE, "Not enough points for Q estimation");
        return;
    }
    // Obtain initial estimate of Q plateau height and decay.
    // This can be done by fitting the precision histogram and then fixing the mean and sigma.
    // Or it can be done by allowing the precision to be sampled and the mean and sigma
    // become parameters for fitting.
    // Check if we can sample precision values
    boolean sampleDecay = precision != null && FIRE.sampleDecay;
    double[] exp_decay;
    if (sampleDecay) {
        // Random sample of precision values from the distribution is used to 
        // construct the decay curve
        int[] sample = Random.sample(10000, precision.getN(), new Well19937c());
        final double four_pi2 = 4 * Math.PI * Math.PI;
        double[] pre = new double[q.length];
        for (int i = 1; i < q.length; i++) pre[i] = -four_pi2 * q[i] * q[i];
        // Sample
        final int n = sample.length;
        double[] hq = new double[n];
        for (int j = 0; j < n; j++) {
            // Scale to SR pixels
            double s2 = precision.getValue(sample[j]) / images.nmPerPixel;
            s2 *= s2;
            for (int i = 1; i < q.length; i++) hq[i] += FastMath.exp(pre[i] * s2);
        }
        for (int i = 1; i < q.length; i++) hq[i] /= n;
        exp_decay = new double[q.length];
        exp_decay[0] = 1;
        for (int i = 1; i < q.length; i++) {
            double sinc_q = sinc(Math.PI * q[i]);
            exp_decay[i] = sinc_q * sinc_q * hq[i];
        }
    } else {
        // Note: The sigma mean and std should be in the units of super-resolution 
        // pixels so scale to SR pixels
        exp_decay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
    }
    // Smoothing
    double[] smooth;
    if (loessSmoothing) {
        // Note: This computes the log then smooths it 
        double bandwidth = 0.1;
        int robustness = 0;
        double[] l = new double[exp_decay.length];
        for (int i = 0; i < l.length; i++) {
            // Original Matlab code computes the log for each array.
            // This is equivalent to a single log on the fraction of the two.
            // Perhaps the two log method is more numerically stable.
            //l[i] = Math.log(Math.abs(frcnum[i])) - Math.log(exp_decay[i]);
            l[i] = Math.log(Math.abs(frcnum[i] / exp_decay[i]));
        }
        try {
            LoessInterpolator loess = new LoessInterpolator(bandwidth, robustness);
            smooth = loess.smooth(q, l);
        } catch (Exception e) {
            IJ.error(TITLE, "LOESS smoothing failed");
            return;
        }
    } else {
        // Note: This smooths the curve before computing the log 
        double[] norm = new double[exp_decay.length];
        for (int i = 0; i < norm.length; i++) {
            norm[i] = frcnum[i] / exp_decay[i];
        }
        // Median window of 5 == radius of 2
        MedianWindow mw = new MedianWindow(norm, 2);
        smooth = new double[exp_decay.length];
        for (int i = 0; i < norm.length; i++) {
            smooth[i] = Math.log(Math.abs(mw.getMedian()));
            mw.increment();
        }
    }
    // Fit with quadratic to find the initial guess.
    // Note: example Matlab code frc_Qcorrection7.m identifies regions of the 
    // smoothed log curve with low derivative and only fits those. The fit is 
    // used for the final estimate. Fitting a subset with low derivative is not 
    // implemented here since the initial estimate is subsequently optimised 
    // to maximise a cost function. 
    Quadratic curve = new Quadratic();
    SimpleCurveFitter fit = SimpleCurveFitter.create(curve, new double[2]);
    WeightedObservedPoints points = new WeightedObservedPoints();
    for (int i = low; i < high; i++) points.add(q[i], smooth[i]);
    double[] estimate = fit.fit(points.toList());
    double qValue = FastMath.exp(estimate[0]);
    //System.out.printf("Initial q-estimate = %s => %.3f\n", Arrays.toString(estimate), qValue);
    // This could be made an option. Just use for debugging
    boolean debug = false;
    if (debug) {
        // Plot the initial fit and the fit curve
        double[] qScaled = FRC.computeQ(frcCurve, true);
        double[] line = new double[q.length];
        for (int i = 0; i < q.length; i++) line[i] = curve.value(q[i], estimate);
        String title = TITLE + " Initial fit";
        Plot2 plot = new Plot2(title, "Spatial Frequency (nm^-1)", "FRC Numerator");
        String label = String.format("Q = %.3f", qValue);
        plot.addPoints(qScaled, smooth, Plot.LINE);
        plot.setColor(Color.red);
        plot.addPoints(qScaled, line, Plot.LINE);
        plot.setColor(Color.black);
        plot.addLabel(0, 0, label);
        Utils.display(title, plot, Utils.NO_TO_FRONT);
    }
    if (fitPrecision) {
        // Q - Should this be optional?
        if (sampleDecay) {
            // If a sample of the precision was used to construct the data for the initial fit 
            // then update the estimate using the fit result since it will be a better start point. 
            histogram.sigma = precision.getStandardDeviation();
            // Normalise sum-of-squares to the SR pixel size
            double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
            histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
        }
        // Do a multivariate fit ...
        SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
        PointValuePair p = null;
        MultiPlateauness f = new MultiPlateauness(frcnum, q, low, high);
        double[] initial = new double[] { histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, qValue };
        p = findMin(p, opt, f, scale(initial, 0.1));
        p = findMin(p, opt, f, scale(initial, 0.5));
        p = findMin(p, opt, f, initial);
        p = findMin(p, opt, f, scale(initial, 2));
        p = findMin(p, opt, f, scale(initial, 10));
        if (p != null) {
            double[] point = p.getPointRef();
            histogram.mean = point[0] * images.nmPerPixel;
            histogram.sigma = point[1] * images.nmPerPixel;
            qValue = point[2];
        }
    } else {
        // If so then this should be optional.
        if (sampleDecay) {
            if (precisionMethod != PrecisionMethod.FIXED) {
                histogram.sigma = precision.getStandardDeviation();
                // Normalise sum-of-squares to the SR pixel size
                double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
                histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
            }
            exp_decay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
        }
        // Estimate spurious component by promoting plateauness.
        // The Matlab code used random initial points for a Simplex optimiser.
        // A Brent line search should be pretty deterministic so do simple repeats.
        // However it will proceed downhill so if the initial point is wrong then 
        // it will find a sub-optimal result.
        UnivariateOptimizer o = new BrentOptimizer(1e-3, 1e-6);
        Plateauness f = new Plateauness(frcnum, exp_decay, low, high);
        UnivariatePointValuePair p = null;
        p = findMin(p, o, f, qValue, 0.1);
        p = findMin(p, o, f, qValue, 0.2);
        p = findMin(p, o, f, qValue, 0.333);
        p = findMin(p, o, f, qValue, 0.5);
        // Do some Simplex repeats as well
        SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
        p = findMin(p, opt, f, qValue * 0.1);
        p = findMin(p, opt, f, qValue * 0.5);
        p = findMin(p, opt, f, qValue);
        p = findMin(p, opt, f, qValue * 2);
        p = findMin(p, opt, f, qValue * 10);
        if (p != null)
            qValue = p.getPoint();
    }
    QPlot qplot = new QPlot(frcCurve, qValue, low, high);
    // Interactive dialog to estimate Q (blinking events per flourophore) using 
    // sliders for the mean and standard deviation of the localisation precision.
    showQEstimationDialog(histogram, qplot, frcCurve, images.nmPerPixel);
    IJ.showStatus(TITLE + " complete");
}
Also used : BrentOptimizer(org.apache.commons.math3.optim.univariate.BrentOptimizer) Plot2(ij.gui.Plot2) Well19937c(org.apache.commons.math3.random.Well19937c) PointValuePair(org.apache.commons.math3.optim.PointValuePair) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair) LoessInterpolator(org.apache.commons.math3.analysis.interpolation.LoessInterpolator) WeightedObservedPoints(org.apache.commons.math3.fitting.WeightedObservedPoints) SimplexOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer) MemoryPeakResults(gdsc.smlm.results.MemoryPeakResults) MedianWindow(gdsc.core.utils.MedianWindow) SimpleCurveFitter(org.apache.commons.math3.fitting.SimpleCurveFitter) FRCCurveResult(gdsc.smlm.ij.frc.FRC.FRCCurveResult) StoredDataStatistics(gdsc.core.utils.StoredDataStatistics) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair) WeightedObservedPoint(org.apache.commons.math3.fitting.WeightedObservedPoint) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) FRCCurve(gdsc.smlm.ij.frc.FRC.FRCCurve) FRC(gdsc.smlm.ij.frc.FRC) UnivariateOptimizer(org.apache.commons.math3.optim.univariate.UnivariateOptimizer)

Aggregations

RealMatrix (org.apache.commons.math3.linear.RealMatrix)22 Collectors (java.util.stream.Collectors)15 java.util (java.util)12 Array2DRowRealMatrix (org.apache.commons.math3.linear.Array2DRowRealMatrix)12 List (java.util.List)11 IntStream (java.util.stream.IntStream)11 Logger (org.apache.logging.log4j.Logger)11 ArrayList (java.util.ArrayList)9 LogManager (org.apache.logging.log4j.LogManager)9 IOException (java.io.IOException)8 Map (java.util.Map)8 TooManyEvaluationsException (org.apache.commons.math3.exception.TooManyEvaluationsException)8 UserException (org.broadinstitute.hellbender.exceptions.UserException)8 ParamUtils (org.broadinstitute.hellbender.utils.param.ParamUtils)8 BaseTest (org.broadinstitute.hellbender.utils.test.BaseTest)8 Test (org.testng.annotations.Test)8 File (java.io.File)7 VisibleForTesting (com.google.common.annotations.VisibleForTesting)6 Arrays (java.util.Arrays)6 Nonnull (javax.annotation.Nonnull)6