Search in sources :

Example 1 with Line

use of org.apache.commons.math3.geometry.euclidean.twod.Line in project GDSC-SMLM by aherbert.

the class EMGainAnalysis method fit.

/**
	 * Fit the EM-gain distribution (Gaussian * Gamma)
	 * 
	 * @param h
	 *            The distribution
	 */
private void fit(int[] h) {
    final int[] limits = limits(h);
    final double[] x = getX(limits);
    final double[] y = getY(h, limits);
    Plot2 plot = new Plot2(TITLE, "ADU", "Frequency");
    double yMax = Maths.max(y);
    plot.setLimits(limits[0], limits[1], 0, yMax);
    plot.setColor(Color.black);
    plot.addPoints(x, y, Plot2.DOT);
    Utils.display(TITLE, plot);
    // Estimate remaining parameters. 
    // Assuming a gamma_distribution(shape,scale) then mean = shape * scale
    // scale = gain
    // shape = Photons = mean / gain
    double mean = getMean(h) - bias;
    // Note: if the bias is too high then the mean will be negative. Just move the bias.
    while (mean < 0) {
        bias -= 1;
        mean += 1;
    }
    double photons = mean / gain;
    if (simulate)
        Utils.log("Simulated bias=%d, gain=%s, noise=%s, photons=%s", (int) _bias, Utils.rounded(_gain), Utils.rounded(_noise), Utils.rounded(_photons));
    Utils.log("Estimate bias=%d, gain=%s, noise=%s, photons=%s", (int) bias, Utils.rounded(gain), Utils.rounded(noise), Utils.rounded(photons));
    final int max = (int) x[x.length - 1];
    double[] g = pdf(max, photons, gain, noise, (int) bias);
    plot.setColor(Color.blue);
    plot.addPoints(x, g, Plot2.LINE);
    Utils.display(TITLE, plot);
    // Perform a fit
    CustomPowellOptimizer o = new CustomPowellOptimizer(1e-6, 1e-16, 1e-6, 1e-16);
    double[] startPoint = new double[] { photons, gain, noise, bias };
    int maxEval = 3000;
    String[] paramNames = { "Photons", "Gain", "Noise", "Bias" };
    // Set bounds
    double[] lower = new double[] { 0, 0.5 * gain, 0, bias - noise };
    double[] upper = new double[] { 2 * photons, 2 * gain, gain, bias + noise };
    // Restart until converged.
    // TODO - Maybe fix this with a better optimiser. This needs to be tested on real data.
    PointValuePair solution = null;
    for (int iter = 0; iter < 3; iter++) {
        IJ.showStatus("Fitting histogram ... Iteration " + iter);
        try {
            // Basic Powell optimiser
            MultivariateFunction fun = getFunction(limits, y, max, maxEval);
            PointValuePair optimum = o.optimize(new MaxEval(maxEval), new ObjectiveFunction(fun), GoalType.MINIMIZE, new InitialGuess((solution == null) ? startPoint : solution.getPointRef()));
            if (solution == null || optimum.getValue() < solution.getValue()) {
                double[] point = optimum.getPointRef();
                // Check the bounds
                for (int i = 0; i < point.length; i++) {
                    if (point[i] < lower[i] || point[i] > upper[i]) {
                        throw new RuntimeException(String.format("Fit out of of estimated range: %s %f", paramNames[i], point[i]));
                    }
                }
                solution = optimum;
            }
        } catch (Exception e) {
            IJ.log("Powell error: " + e.getMessage());
            if (e instanceof TooManyEvaluationsException) {
                maxEval = (int) (maxEval * 1.5);
            }
        }
        try {
            // Bounded Powell optimiser
            MultivariateFunction fun = getFunction(limits, y, max, maxEval);
            MultivariateFunctionMappingAdapter adapter = new MultivariateFunctionMappingAdapter(fun, lower, upper);
            PointValuePair optimum = o.optimize(new MaxEval(maxEval), new ObjectiveFunction(adapter), GoalType.MINIMIZE, new InitialGuess(adapter.boundedToUnbounded((solution == null) ? startPoint : solution.getPointRef())));
            double[] point = adapter.unboundedToBounded(optimum.getPointRef());
            optimum = new PointValuePair(point, optimum.getValue());
            if (solution == null || optimum.getValue() < solution.getValue()) {
                solution = optimum;
            }
        } catch (Exception e) {
            IJ.log("Bounded Powell error: " + e.getMessage());
            if (e instanceof TooManyEvaluationsException) {
                maxEval = (int) (maxEval * 1.5);
            }
        }
    }
    IJ.showStatus("");
    IJ.showProgress(1);
    if (solution == null) {
        Utils.log("Failed to fit the distribution");
        return;
    }
    double[] point = solution.getPointRef();
    photons = point[0];
    gain = point[1];
    noise = point[2];
    bias = (int) Math.round(point[3]);
    String label = String.format("Fitted bias=%d, gain=%s, noise=%s, photons=%s", (int) bias, Utils.rounded(gain), Utils.rounded(noise), Utils.rounded(photons));
    Utils.log(label);
    if (simulate) {
        Utils.log("Relative Error bias=%s, gain=%s, noise=%s, photons=%s", Utils.rounded(relativeError(bias, _bias)), Utils.rounded(relativeError(gain, _gain)), Utils.rounded(relativeError(noise, _noise)), Utils.rounded(relativeError(photons, _photons)));
    }
    // Show the PoissonGammaGaussian approximation
    double[] f = null;
    if (showApproximation) {
        f = new double[x.length];
        PoissonGammaGaussianFunction fun = new PoissonGammaGaussianFunction(1.0 / gain, noise);
        final double expected = photons * gain;
        for (int i = 0; i < f.length; i++) {
            f[i] = fun.likelihood(x[i] - bias, expected);
        //System.out.printf("x=%d, g=%f, f=%f, error=%f\n", (int) x[i], g[i], f[i],
        //		gdsc.smlm.fitting.utils.DoubleEquality.relativeError(g[i], f[i]));
        }
        yMax = Maths.maxDefault(yMax, f);
    }
    // Replot
    g = pdf(max, photons, gain, noise, (int) bias);
    plot = new Plot2(TITLE, "ADU", "Frequency");
    plot.setLimits(limits[0], limits[1], 0, yMax * 1.05);
    plot.setColor(Color.black);
    plot.addPoints(x, y, Plot2.DOT);
    plot.setColor(Color.red);
    plot.addPoints(x, g, Plot2.LINE);
    plot.addLabel(0, 0, label);
    if (showApproximation) {
        plot.setColor(Color.blue);
        plot.addPoints(x, f, Plot2.LINE);
    }
    Utils.display(TITLE, plot);
}
Also used : MaxEval(org.apache.commons.math3.optim.MaxEval) InitialGuess(org.apache.commons.math3.optim.InitialGuess) ObjectiveFunction(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction) Plot2(ij.gui.Plot2) Point(java.awt.Point) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) PointValuePair(org.apache.commons.math3.optim.PointValuePair) MultivariateFunction(org.apache.commons.math3.analysis.MultivariateFunction) MultivariateFunctionMappingAdapter(org.apache.commons.math3.optim.nonlinear.scalar.MultivariateFunctionMappingAdapter) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) CustomPowellOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.CustomPowellOptimizer) PoissonGammaGaussianFunction(gdsc.smlm.function.PoissonGammaGaussianFunction)

Example 2 with Line

use of org.apache.commons.math3.geometry.euclidean.twod.Line in project GDSC-SMLM by aherbert.

the class BoundedNonLinearConjugateGradientOptimizer method doOptimize.

/** {@inheritDoc} */
@Override
protected PointValuePair doOptimize() {
    final ConvergenceChecker<PointValuePair> checker = getConvergenceChecker();
    final double[] point = getStartPoint();
    final GoalType goal = getGoalType();
    final int n = point.length;
    sign = (goal == GoalType.MINIMIZE) ? -1 : 1;
    double[] unbounded = point.clone();
    applyBounds(point);
    double[] r = computeObjectiveGradient(point);
    checkGradients(r, unbounded);
    if (goal == GoalType.MINIMIZE) {
        for (int i = 0; i < n; i++) {
            r[i] = -r[i];
        }
    }
    // Initial search direction.
    double[] steepestDescent = preconditioner.precondition(point, r);
    double[] searchDirection = steepestDescent.clone();
    double delta = 0;
    for (int i = 0; i < n; ++i) {
        delta += r[i] * searchDirection[i];
    }
    // Used for non-gradient based line search
    LineSearch line = null;
    double rel = 1e-6;
    double abs = 1e-10;
    if (getConvergenceChecker() instanceof SimpleValueChecker) {
        rel = ((SimpleValueChecker) getConvergenceChecker()).getRelativeThreshold();
        abs = ((SimpleValueChecker) getConvergenceChecker()).getRelativeThreshold();
    }
    line = new LineSearch(Math.sqrt(rel), Math.sqrt(abs));
    PointValuePair current = null;
    int maxEval = getMaxEvaluations();
    while (true) {
        incrementIterationCount();
        final double objective = computeObjectiveValue(point);
        PointValuePair previous = current;
        current = new PointValuePair(point, objective);
        if (previous != null && checker.converged(getIterations(), previous, current)) {
            // We have found an optimum.
            return current;
        }
        double step;
        if (useGradientLineSearch) {
            // Classic code using the gradient function for the line search:
            // Find the optimal step in the search direction.
            final UnivariateFunction lsf = new LineSearchFunction(point, searchDirection);
            final double uB;
            try {
                uB = findUpperBound(lsf, 0, initialStep);
                // Check if the bracket found a minimum. Otherwise just move to the new point.
                if (noBracket)
                    step = uB;
                else {
                    // XXX Last parameters is set to a value close to zero in order to
                    // work around the divergence problem in the "testCircleFitting"
                    // unit test (see MATH-439).
                    //System.out.printf("Bracket %f - %f - %f\n", 0., 1e-15, uB);
                    step = solver.solve(maxEval, lsf, 0, uB, 1e-15);
                    // Subtract used up evaluations.
                    maxEval -= solver.getEvaluations();
                }
            } catch (MathIllegalStateException e) {
                //System.out.printf("Failed to bracket %s @ %s\n", Arrays.toString(point), Arrays.toString(searchDirection));
                // Line search without gradient (as per Powell optimiser)
                final UnivariatePointValuePair optimum = line.search(point, searchDirection);
                step = optimum.getPoint();
            //throw e;
            }
        } else {
            // Line search without gradient (as per Powell optimiser)
            final UnivariatePointValuePair optimum = line.search(point, searchDirection);
            step = optimum.getPoint();
        }
        //System.out.printf("Step = %f x %s\n", step, Arrays.toString(searchDirection));
        for (int i = 0; i < point.length; ++i) {
            point[i] += step * searchDirection[i];
        }
        unbounded = point.clone();
        applyBounds(point);
        r = computeObjectiveGradient(point);
        checkGradients(r, unbounded);
        if (goal == GoalType.MINIMIZE) {
            for (int i = 0; i < n; ++i) {
                r[i] = -r[i];
            }
        }
        // Compute beta.
        final double deltaOld = delta;
        final double[] newSteepestDescent = preconditioner.precondition(point, r);
        delta = 0;
        for (int i = 0; i < n; ++i) {
            delta += r[i] * newSteepestDescent[i];
        }
        if (delta == 0)
            return new PointValuePair(point, computeObjectiveValue(point));
        final double beta;
        switch(updateFormula) {
            case FLETCHER_REEVES:
                beta = delta / deltaOld;
                break;
            case POLAK_RIBIERE:
                double deltaMid = 0;
                for (int i = 0; i < r.length; ++i) {
                    deltaMid += r[i] * steepestDescent[i];
                }
                beta = (delta - deltaMid) / deltaOld;
                break;
            default:
                // Should never happen.
                throw new MathInternalError();
        }
        steepestDescent = newSteepestDescent;
        // Compute conjugate search direction.
        if (getIterations() % n == 0 || beta < 0) {
            // Break conjugation: reset search direction.
            searchDirection = steepestDescent.clone();
        } else {
            // Compute new conjugate search direction.
            for (int i = 0; i < n; ++i) {
                searchDirection[i] = steepestDescent[i] + beta * searchDirection[i];
            }
        }
        // The gradient has already been adjusted for the search direction
        checkGradients(searchDirection, unbounded, -sign);
    }
}
Also used : UnivariateFunction(org.apache.commons.math3.analysis.UnivariateFunction) GoalType(org.apache.commons.math3.optim.nonlinear.scalar.GoalType) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair) SimpleValueChecker(org.apache.commons.math3.optim.SimpleValueChecker) MathIllegalStateException(org.apache.commons.math3.exception.MathIllegalStateException) PointValuePair(org.apache.commons.math3.optim.PointValuePair) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair) MathInternalError(org.apache.commons.math3.exception.MathInternalError)

Example 3 with Line

use of org.apache.commons.math3.geometry.euclidean.twod.Line in project GDSC-SMLM by aherbert.

the class TraceDiffusion method fitMSD.

/**
	 * Fit the MSD using a linear fit that must pass through 0,0.
	 * <p>
	 * Update the plot by adding the fit line.
	 * 
	 * @param x
	 * @param y
	 * @param title
	 * @param plot
	 * @return [D, precision]
	 */
private double[] fitMSD(double[] x, double[] y, String title, Plot2 plot) {
    // The Weimann paper (Plos One e64287) fits:
    // MSD(n dt) = 4D n dt + 4s^2
    // n = number of jumps
    // dt = time difference between frames
    // s = localisation precision
    // Thus we should fit an intercept as well.
    // From the fit D = gradient / (4*exposureTime)
    double D = 0;
    double intercept = 0;
    double precision = 0;
    LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
    Optimum lvmSolution;
    double ic = 0;
    // Fit with no intercept
    try {
        final LinearFunction function = new LinearFunction(x, y, settings.fitLength);
        double[] parameters = new double[] { function.guess() };
        //@formatter:off
        LeastSquaresProblem problem = new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(3000).start(parameters).target(function.getY()).weight(new DiagonalMatrix(function.getWeights())).model(function, new MultivariateMatrixFunction() {

            public double[][] value(double[] point) throws IllegalArgumentException {
                return function.jacobian(point);
            }
        }).build();
        //@formatter:on
        lvmSolution = optimizer.optimize(problem);
        double ss = lvmSolution.getResiduals().dotProduct(lvmSolution.getResiduals());
        //double ss = 0;
        //double[] obs = function.getY();
        //double[] exp = lvmSolution.getValue();
        //for (int i = 0; i < obs.length; i++)
        //	ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
        ic = Maths.getAkaikeInformationCriterionFromResiduals(ss, function.getY().length, 1);
        double gradient = lvmSolution.getPoint().getEntry(0);
        D = gradient / 4;
        Utils.log("Linear fit (%d points) : Gradient = %s, D = %s um^2/s, SS = %s, IC = %s (%d evaluations)", function.getY().length, Utils.rounded(gradient, 4), Utils.rounded(D, 4), Utils.rounded(ss), Utils.rounded(ic), lvmSolution.getEvaluations());
    } catch (TooManyIterationsException e) {
        Utils.log("Failed to fit : Too many iterations (%s)", e.getMessage());
    } catch (ConvergenceException e) {
        Utils.log("Failed to fit : %s", e.getMessage());
    }
    // Fit with intercept.
    // Optionally include the intercept (which is the estimated precision).
    boolean fitIntercept = true;
    try {
        final LinearFunctionWithIntercept function = new LinearFunctionWithIntercept(x, y, settings.fitLength, fitIntercept);
        //@formatter:off
        LeastSquaresProblem problem = new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(3000).start(function.guess()).target(function.getY()).weight(new DiagonalMatrix(function.getWeights())).model(function, new MultivariateMatrixFunction() {

            public double[][] value(double[] point) throws IllegalArgumentException {
                return function.jacobian(point);
            }
        }).build();
        //@formatter:on
        lvmSolution = optimizer.optimize(problem);
        double ss = lvmSolution.getResiduals().dotProduct(lvmSolution.getResiduals());
        //double ss = 0;
        //double[] obs = function.getY();
        //double[] exp = lvmSolution.getValue();
        //for (int i = 0; i < obs.length; i++)
        //	ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
        double ic2 = Maths.getAkaikeInformationCriterionFromResiduals(ss, function.getY().length, 2);
        double gradient = lvmSolution.getPoint().getEntry(0);
        final double s = lvmSolution.getPoint().getEntry(1);
        double intercept2 = 4 * s * s;
        if (ic2 < ic || debugFitting) {
            // Convert fitted precision in um to nm
            Utils.log("Linear fit with intercept (%d points) : Gradient = %s, Intercept = %s, D = %s um^2/s, precision = %s nm, SS = %s, IC = %s (%d evaluations)", function.getY().length, Utils.rounded(gradient, 4), Utils.rounded(intercept2, 4), Utils.rounded(gradient / 4, 4), Utils.rounded(s * 1000, 4), Utils.rounded(ss), Utils.rounded(ic2), lvmSolution.getEvaluations());
        }
        if (lvmSolution == null || ic2 < ic) {
            intercept = intercept2;
            D = gradient / 4;
            precision = s;
        }
    } catch (TooManyIterationsException e) {
        Utils.log("Failed to fit with intercept : Too many iterations (%s)", e.getMessage());
    } catch (ConvergenceException e) {
        Utils.log("Failed to fit with intercept : %s", e.getMessage());
    }
    if (settings.msdCorrection) {
        // i.e. the intercept is allowed to be a small negative.
        try {
            // This function fits the jump distance (n) not the time (nt) so update x
            double[] x2 = new double[x.length];
            for (int i = 0; i < x2.length; i++) x2[i] = x[i] / exposureTime;
            final LinearFunctionWithMSDCorrectedIntercept function = new LinearFunctionWithMSDCorrectedIntercept(x2, y, settings.fitLength, fitIntercept);
            //@formatter:off
            LeastSquaresProblem problem = new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(3000).start(function.guess()).target(function.getY()).weight(new DiagonalMatrix(function.getWeights())).model(function, new MultivariateMatrixFunction() {

                public double[][] value(double[] point) throws IllegalArgumentException {
                    return function.jacobian(point);
                }
            }).build();
            //@formatter:on
            lvmSolution = optimizer.optimize(problem);
            double ss = lvmSolution.getResiduals().dotProduct(lvmSolution.getResiduals());
            //double ss = 0;
            //double[] obs = function.getY();
            //double[] exp = lvmSolution.getValue();
            //for (int i = 0; i < obs.length; i++)
            //	ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
            double ic2 = Maths.getAkaikeInformationCriterionFromResiduals(ss, function.getY().length, 2);
            double gradient = lvmSolution.getPoint().getEntry(0);
            final double s = lvmSolution.getPoint().getEntry(1);
            double intercept2 = 4 * s * s - gradient / 3;
            // Q. Is this working?
            // Try fixed precision fitting. Is the gradient correct?
            // Revisit all the equations to see if they are wrong.
            // Try adding the x[0] datapoint using the precision.
            // Change the formula to not be linear at x[0] and to just fit the precision, i.e. the intercept2 = 4 * s * s - gradient / 3 is wrong as the 
            // equation is not linear below n=1.
            // Incorporate the exposure time into the gradient to allow comparison to other fits 
            gradient /= exposureTime;
            if (ic2 < ic || debugFitting) {
                // Convert fitted precision in um to nm
                Utils.log("Linear fit with MSD corrected intercept (%d points) : Gradient = %s, Intercept = %s, D = %s um^2/s, precision = %s nm, SS = %s, IC = %s (%d evaluations)", function.getY().length, Utils.rounded(gradient, 4), Utils.rounded(intercept2, 4), Utils.rounded(gradient / 4, 4), Utils.rounded(s * 1000, 4), Utils.rounded(ss), Utils.rounded(ic2), lvmSolution.getEvaluations());
            }
            if (lvmSolution == null || ic2 < ic) {
                intercept = intercept2;
                D = gradient / 4;
                precision = s;
            }
        } catch (TooManyIterationsException e) {
            Utils.log("Failed to fit with intercept : Too many iterations (%s)", e.getMessage());
        } catch (ConvergenceException e) {
            Utils.log("Failed to fit with intercept : %s", e.getMessage());
        }
    }
    // Add the fit to the plot
    if (D > 0) {
        plot.setColor(Color.magenta);
        plot.drawLine(0, intercept, x[x.length - 1], 4 * D * x[x.length - 1] + intercept);
        display(title, plot);
        checkTraceDistance(D);
    }
    return new double[] { D, precision };
}
Also used : LeastSquaresBuilder(org.apache.commons.math3.fitting.leastsquares.LeastSquaresBuilder) Optimum(org.apache.commons.math3.fitting.leastsquares.LeastSquaresOptimizer.Optimum) LevenbergMarquardtOptimizer(org.apache.commons.math3.fitting.leastsquares.LevenbergMarquardtOptimizer) DiagonalMatrix(org.apache.commons.math3.linear.DiagonalMatrix) ConvergenceException(org.apache.commons.math3.exception.ConvergenceException) TooManyIterationsException(org.apache.commons.math3.exception.TooManyIterationsException) LeastSquaresProblem(org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem) MultivariateMatrixFunction(org.apache.commons.math3.analysis.MultivariateMatrixFunction)

Example 4 with Line

use of org.apache.commons.math3.geometry.euclidean.twod.Line in project GDSC-SMLM by aherbert.

the class CMOSAnalysis method simulate.

private void simulate() {
    // Create the offset, variance and gain for each pixel
    int n = size * size;
    float[] pixelOffset = new float[n];
    float[] pixelVariance = new float[n];
    float[] pixelGain = new float[n];
    IJ.showStatus("Creating random per-pixel readout");
    long start = System.currentTimeMillis();
    RandomGenerator rg = new Well19937c();
    PoissonDistribution pd = new PoissonDistribution(rg, offset, PoissonDistribution.DEFAULT_EPSILON, PoissonDistribution.DEFAULT_MAX_ITERATIONS);
    ExponentialDistribution ed = new ExponentialDistribution(rg, variance, ExponentialDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
    totalProgress = n;
    stepProgress = Utils.getProgressInterval(totalProgress);
    for (int i = 0; i < n; i++) {
        if (i % n == 0)
            IJ.showProgress(i, n);
        // Q. Should these be clipped to a sensible range?
        pixelOffset[i] = (float) pd.sample();
        pixelVariance[i] = (float) ed.sample();
        pixelGain[i] = (float) (gain + rg.nextGaussian() * gainSD);
    }
    IJ.showProgress(1);
    // Avoid all the file saves from updating the progress bar and status line
    Utils.setShowStatus(false);
    Utils.setShowProgress(false);
    JLabel statusLine = Utils.getStatusLine();
    progressBar = Utils.getProgressBar();
    // Save to the directory as a stack
    ImageStack simulationStack = new ImageStack(size, size);
    simulationStack.addSlice("Offset", pixelOffset);
    simulationStack.addSlice("Variance", pixelVariance);
    simulationStack.addSlice("Gain", pixelGain);
    simulationImp = new ImagePlus("PerPixel", simulationStack);
    // Only the info property is saved to the TIFF file
    simulationImp.setProperty("Info", String.format("Offset=%s; Variance=%s; Gain=%s +/- %s", Utils.rounded(offset), Utils.rounded(variance), Utils.rounded(gain), Utils.rounded(gainSD)));
    IJ.save(simulationImp, new File(directory, "perPixelSimulation.tif").getPath());
    // Create thread pool and workers
    ExecutorService executor = Executors.newFixedThreadPool(getThreads());
    TurboList<Future<?>> futures = new TurboList<Future<?>>(nThreads);
    // Simulate the zero exposure input.
    // Simulate 20 - 200 photon images.
    int[] photons = new int[] { 0, 20, 50, 100, 200 };
    totalProgress = photons.length * frames;
    stepProgress = Utils.getProgressInterval(totalProgress);
    progress = 0;
    progressBar.show(0);
    // For saving stacks
    int blockSize = 10;
    int nPerThread = (int) Math.ceil((double) frames / nThreads);
    // Convert to fit the block size
    nPerThread = (int) Math.ceil((double) nPerThread / blockSize) * blockSize;
    long seed = start;
    for (int p : photons) {
        statusLine.setText("Simulating " + Utils.pleural(p, "photon"));
        // Create the directory
        File out = new File(directory, String.format("photon%03d", p));
        if (!out.exists())
            out.mkdir();
        for (int from = 0; from < frames; ) {
            int to = Math.min(from + nPerThread, frames);
            futures.add(executor.submit(new SimulationWorker(seed++, out.getPath(), simulationStack, from, to, blockSize, p)));
            from = to;
        }
        // Wait for all to finish
        for (int t = futures.size(); t-- > 0; ) {
            try {
                // The future .get() method will block until completed
                futures.get(t).get();
            } catch (Exception e) {
                // This should not happen. 
                e.printStackTrace();
            }
        }
        futures.clear();
    }
    Utils.setShowStatus(true);
    Utils.setShowProgress(true);
    IJ.showProgress(1);
    executor.shutdown();
    Utils.log("Simulation time = " + Utils.timeToString(System.currentTimeMillis() - start));
}
Also used : PoissonDistribution(org.apache.commons.math3.distribution.PoissonDistribution) TurboList(gdsc.core.utils.TurboList) ImageStack(ij.ImageStack) ExponentialDistribution(org.apache.commons.math3.distribution.ExponentialDistribution) JLabel(javax.swing.JLabel) Well19937c(org.apache.commons.math3.random.Well19937c) ImagePlus(ij.ImagePlus) PseudoRandomGenerator(gdsc.core.utils.PseudoRandomGenerator) RandomGenerator(org.apache.commons.math3.random.RandomGenerator) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) File(java.io.File)

Example 5 with Line

use of org.apache.commons.math3.geometry.euclidean.twod.Line in project GDSC-SMLM by aherbert.

the class MaximumLikelihoodFitter method computeFit.

/*
	 * (non-Javadoc)
	 * 
	 * @see gdsc.smlm.fitting.nonlinear.BaseFunctionSolver#computeFit(double[], double[], double[], double[])
	 */
public FitStatus computeFit(double[] y, double[] y_fit, double[] a, double[] a_dev) {
    final int n = y.length;
    LikelihoodWrapper maximumLikelihoodFunction = createLikelihoodWrapper((NonLinearFunction) f, n, y, a);
    @SuppressWarnings("rawtypes") BaseOptimizer baseOptimiser = null;
    try {
        double[] startPoint = getInitialSolution(a);
        PointValuePair optimum = null;
        if (searchMethod == SearchMethod.POWELL || searchMethod == SearchMethod.POWELL_BOUNDED || searchMethod == SearchMethod.POWELL_ADAPTER) {
            // Non-differentiable version using Powell Optimiser
            // This is as per the method in Numerical Recipes 10.5 (Direction Set (Powell's) method)
            // I could extend the optimiser and implement bounds on the directions moved. However the mapping
            // adapter seems to work OK.
            final boolean basisConvergence = false;
            // Perhaps these thresholds should be tighter?
            // The default is to use the sqrt() of the overall tolerance
            //final double lineRel = FastMath.sqrt(relativeThreshold);
            //final double lineAbs = FastMath.sqrt(absoluteThreshold);
            //final double lineRel = relativeThreshold * 1e2;
            //final double lineAbs = absoluteThreshold * 1e2;
            // Since we are fitting only a small number of parameters then just use the same tolerance 
            // for each search direction
            final double lineRel = relativeThreshold;
            final double lineAbs = absoluteThreshold;
            CustomPowellOptimizer o = new CustomPowellOptimizer(relativeThreshold, absoluteThreshold, lineRel, lineAbs, null, basisConvergence);
            baseOptimiser = o;
            OptimizationData maxIterationData = null;
            if (getMaxIterations() > 0)
                maxIterationData = new MaxIter(getMaxIterations());
            if (searchMethod == SearchMethod.POWELL_ADAPTER) {
                // Try using the mapping adapter for a bounded Powell search
                MultivariateFunctionMappingAdapter adapter = new MultivariateFunctionMappingAdapter(new MultivariateLikelihood(maximumLikelihoodFunction), lower, upper);
                optimum = o.optimize(maxIterationData, new MaxEval(getMaxEvaluations()), new ObjectiveFunction(adapter), GoalType.MINIMIZE, new InitialGuess(adapter.boundedToUnbounded(startPoint)));
                double[] solution = adapter.unboundedToBounded(optimum.getPointRef());
                optimum = new PointValuePair(solution, optimum.getValue());
            } else {
                if (powellFunction == null) {
                    // Python code by using the sqrt of the number of photons and background.
                    if (mapGaussian) {
                        Gaussian2DFunction gf = (Gaussian2DFunction) f;
                        // Re-map signal and background using the sqrt
                        int[] indices = gf.gradientIndices();
                        int[] map = new int[indices.length];
                        int count = 0;
                        // Background is always first
                        if (indices[0] == Gaussian2DFunction.BACKGROUND) {
                            map[count++] = 0;
                        }
                        // Look for the Signal in multiple peak 2D Gaussians
                        for (int i = 1; i < indices.length; i++) if (indices[i] % 6 == Gaussian2DFunction.SIGNAL) {
                            map[count++] = i;
                        }
                        if (count > 0) {
                            powellFunction = new MappedMultivariateLikelihood(maximumLikelihoodFunction, Arrays.copyOf(map, count));
                        }
                    }
                    if (powellFunction == null) {
                        powellFunction = new MultivariateLikelihood(maximumLikelihoodFunction);
                    }
                }
                // Update the maximum likelihood function in the Powell function wrapper
                powellFunction.fun = maximumLikelihoodFunction;
                OptimizationData positionChecker = null;
                // new org.apache.commons.math3.optim.PositionChecker(relativeThreshold, absoluteThreshold);
                SimpleBounds simpleBounds = null;
                if (powellFunction.isMapped()) {
                    MappedMultivariateLikelihood adapter = (MappedMultivariateLikelihood) powellFunction;
                    if (searchMethod == SearchMethod.POWELL_BOUNDED)
                        simpleBounds = new SimpleBounds(adapter.map(lower), adapter.map(upper));
                    optimum = o.optimize(maxIterationData, new MaxEval(getMaxEvaluations()), new ObjectiveFunction(powellFunction), GoalType.MINIMIZE, new InitialGuess(adapter.map(startPoint)), positionChecker, simpleBounds);
                    double[] solution = adapter.unmap(optimum.getPointRef());
                    optimum = new PointValuePair(solution, optimum.getValue());
                } else {
                    if (searchMethod == SearchMethod.POWELL_BOUNDED)
                        simpleBounds = new SimpleBounds(lower, upper);
                    optimum = o.optimize(maxIterationData, new MaxEval(getMaxEvaluations()), new ObjectiveFunction(powellFunction), GoalType.MINIMIZE, new InitialGuess(startPoint), positionChecker, simpleBounds);
                }
            }
        } else if (searchMethod == SearchMethod.BOBYQA) {
            // Differentiable approximation using Powell's BOBYQA algorithm.
            // This is slower than the Powell optimiser and requires a high number of evaluations.
            int numberOfInterpolationPoints = this.getNumberOfFittedParameters() + 2;
            BOBYQAOptimizer o = new BOBYQAOptimizer(numberOfInterpolationPoints);
            baseOptimiser = o;
            optimum = o.optimize(new MaxEval(getMaxEvaluations()), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), GoalType.MINIMIZE, new InitialGuess(startPoint), new SimpleBounds(lower, upper));
        } else if (searchMethod == SearchMethod.CMAES) {
            // TODO - Understand why the CMAES optimiser does not fit very well on test data. It appears 
            // to converge too early and the likelihood scores are not as low as the other optimisers.
            // CMAESOptimiser based on Matlab code:
            // https://www.lri.fr/~hansen/cmaes.m
            // Take the defaults from the Matlab documentation
            //Double.NEGATIVE_INFINITY;
            double stopFitness = 0;
            boolean isActiveCMA = true;
            int diagonalOnly = 0;
            int checkFeasableCount = 1;
            RandomGenerator random = new Well19937c();
            boolean generateStatistics = false;
            // The sigma determines the search range for the variables. It should be 1/3 of the initial search region.
            double[] sigma = new double[lower.length];
            for (int i = 0; i < sigma.length; i++) sigma[i] = (upper[i] - lower[i]) / 3;
            int popSize = (int) (4 + Math.floor(3 * Math.log(sigma.length)));
            // The CMAES optimiser is random and restarting can overcome problems with quick convergence.
            // The Apache commons documentations states that convergence should occur between 30N and 300N^2
            // function evaluations
            final int n30 = FastMath.min(sigma.length * sigma.length * 30, getMaxEvaluations() / 2);
            evaluations = 0;
            OptimizationData[] data = new OptimizationData[] { new InitialGuess(startPoint), new CMAESOptimizer.PopulationSize(popSize), new MaxEval(getMaxEvaluations()), new CMAESOptimizer.Sigma(sigma), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), GoalType.MINIMIZE, new SimpleBounds(lower, upper) };
            // Iterate to prevent early convergence
            int repeat = 0;
            while (evaluations < n30) {
                if (repeat++ > 1) {
                    // Update the start point and population size
                    data[0] = new InitialGuess(optimum.getPointRef());
                    popSize *= 2;
                    data[1] = new CMAESOptimizer.PopulationSize(popSize);
                }
                CMAESOptimizer o = new CMAESOptimizer(getMaxIterations(), stopFitness, isActiveCMA, diagonalOnly, checkFeasableCount, random, generateStatistics, new SimpleValueChecker(relativeThreshold, absoluteThreshold));
                baseOptimiser = o;
                PointValuePair result = o.optimize(data);
                iterations += o.getIterations();
                evaluations += o.getEvaluations();
                //		o.getEvaluations(), totalEvaluations);
                if (optimum == null || result.getValue() < optimum.getValue()) {
                    optimum = result;
                }
            }
            // Prevent incrementing the iterations again
            baseOptimiser = null;
        } else if (searchMethod == SearchMethod.BFGS) {
            // BFGS can use an approximate line search minimisation where as Powell and conjugate gradient
            // methods require a more accurate line minimisation. The BFGS search does not do a full 
            // minimisation but takes appropriate steps in the direction of the current gradient.
            // Do not use the convergence checker on the value of the function. Use the convergence on the 
            // point coordinate and gradient
            //BFGSOptimizer o = new BFGSOptimizer(new SimpleValueChecker(rel, abs));
            BFGSOptimizer o = new BFGSOptimizer();
            baseOptimiser = o;
            // Configure maximum step length for each dimension using the bounds
            double[] stepLength = new double[lower.length];
            for (int i = 0; i < stepLength.length; i++) {
                stepLength[i] = (upper[i] - lower[i]) * 0.3333333;
                if (stepLength[i] <= 0)
                    stepLength[i] = Double.POSITIVE_INFINITY;
            }
            // The GoalType is always minimise so no need to pass this in
            OptimizationData positionChecker = null;
            //new org.apache.commons.math3.optim.PositionChecker(relativeThreshold, absoluteThreshold);
            optimum = o.optimize(new MaxEval(getMaxEvaluations()), new ObjectiveFunctionGradient(new MultivariateVectorLikelihood(maximumLikelihoodFunction)), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), new InitialGuess(startPoint), new SimpleBounds(lowerConstraint, upperConstraint), new BFGSOptimizer.GradientTolerance(relativeThreshold), positionChecker, new BFGSOptimizer.StepLength(stepLength));
        } else {
            // The line search algorithm often fails. This is due to searching into a region where the 
            // function evaluates to a negative so has been clipped. This means the upper bound of the line
            // cannot be found.
            // Note that running it on an easy problem (200 photons with fixed fitting (no background)) the algorithm
            // does sometimes produces results better than the Powell algorithm but it is slower.
            BoundedNonLinearConjugateGradientOptimizer o = new BoundedNonLinearConjugateGradientOptimizer((searchMethod == SearchMethod.CONJUGATE_GRADIENT_FR) ? Formula.FLETCHER_REEVES : Formula.POLAK_RIBIERE, new SimpleValueChecker(relativeThreshold, absoluteThreshold));
            baseOptimiser = o;
            // Note: The gradients may become unstable at the edge of the bounds. Or they will not change 
            // direction if the true solution is on the bounds since the gradient will always continue 
            // towards the bounds. This is key to the conjugate gradient method. It searches along a vector 
            // until the direction of the gradient is in the opposite direction (using dot products, i.e. 
            // cosine of angle between them)
            // NR 10.7 states there is no advantage of the variable metric DFP or BFGS methods over
            // conjugate gradient methods. So I will try these first.
            // Try this:
            // Adapt the conjugate gradient optimiser to use the gradient to pick the search direction
            // and then for the line minimisation. However if the function is out of bounds then clip the 
            // variables at the bounds and continue. 
            // If the current point is at the bounds and the gradient is to continue out of bounds then 
            // clip the gradient too.
            // Or: just use the gradient for the search direction then use the line minimisation/rest
            // as per the Powell optimiser. The bounds should limit the search.
            // I tried a Bounded conjugate gradient optimiser with clipped variables:
            // This sometimes works. However when the variables go a long way out of the expected range the gradients
            // can have vastly different magnitudes. This results in the algorithm stalling since the gradients
            // can be close to zero and the some of the parameters are no longer adjusted.
            // Perhaps this can be looked for and the algorithm then gives up and resorts to a Powell optimiser from 
            // the current point.
            // Changed the bracketing step to very small (default is 1, changed to 0.001). This improves the 
            // performance. The gradient direction is very sensitive to small changes in the coordinates so a 
            // tighter bracketing of the line search helps.
            // Tried using a non-gradient method for the line search copied from the Powell optimiser:
            // This also works when the bracketing step is small but the number of iterations is higher.
            // 24.10.2014: I have tried to get conjugate gradient to work but the gradient function 
            // must not behave suitably for the optimiser. In the current state both methods of using a 
            // Bounded Conjugate Gradient Optimiser perform poorly relative to other optimisers:
            // Simulated : n=1000, signal=200, x=0.53, y=0.47
            // LVM : n=1000, signal=171, x=0.537, y=0.471 (1.003s)
            // Powell : n=1000, signal=187, x=0.537, y=0.48 (1.238s)
            // Gradient based PR (constrained): n=858, signal=161, x=0.533, y=0.474 (2.54s)
            // Gradient based PR (bounded): n=948, signal=161, x=0.533, y=0.473 (2.67s)
            // Non-gradient based : n=1000, signal=151.47, x=0.535, y=0.474 (1.626s)
            // The conjugate optimisers are slower, under predict the signal by the most and in the case of 
            // the gradient based optimiser, fail to converge on some problems. This is worse when constrained
            // fitting is used and not tightly bounded fitting.
            // I will leave the code in as an option but would not recommend using it. I may remove it in the 
            // future.
            // Note: It is strange that the non-gradient based line minimisation is more successful.
            // It may be that the gradient function is not accurate (due to round off error) or that it is
            // simply wrong when far from the optimum. My JUnit tests only evaluate the function within the 
            // expected range of the answer.
            // Note the default step size on the Powell optimiser is 1 but the initial directions are unit vectors.
            // So our bracketing step should be a minimum of 1 / average length of the first gradient vector to prevent
            // the first step being too large when bracketing.
            final double[] gradient = new double[startPoint.length];
            maximumLikelihoodFunction.likelihood(startPoint, gradient);
            double l = 0;
            for (double d : gradient) l += d * d;
            final double bracketingStep = FastMath.min(0.001, ((l > 1) ? 1.0 / l : 1));
            //System.out.printf("Bracketing step = %f (length=%f)\n", bracketingStep, l);
            o.setUseGradientLineSearch(gradientLineMinimisation);
            optimum = o.optimize(new MaxEval(getMaxEvaluations()), new ObjectiveFunctionGradient(new MultivariateVectorLikelihood(maximumLikelihoodFunction)), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), GoalType.MINIMIZE, new InitialGuess(startPoint), new SimpleBounds(lowerConstraint, upperConstraint), new BoundedNonLinearConjugateGradientOptimizer.BracketingStep(bracketingStep));
        //maximumLikelihoodFunction.value(solution, gradient);
        //System.out.printf("Iter = %d, %g @ %s : %s\n", iterations, ll, Arrays.toString(solution),
        //		Arrays.toString(gradient));
        }
        final double[] solution = optimum.getPointRef();
        setSolution(a, solution);
        if (a_dev != null) {
            // Assume the Maximum Likelihood estimator returns the optimum fit (achieves the Cramer Roa
            // lower bounds) and so the covariance can be obtained from the Fisher Information Matrix.
            FisherInformationMatrix m = new FisherInformationMatrix(maximumLikelihoodFunction.fisherInformation(a));
            setDeviations(a_dev, m.crlb(true));
        }
        // Reverse negative log likelihood for maximum likelihood score
        value = -optimum.getValue();
    } catch (TooManyIterationsException e) {
        //e.printStackTrace();
        return FitStatus.TOO_MANY_ITERATIONS;
    } catch (TooManyEvaluationsException e) {
        //e.printStackTrace();
        return FitStatus.TOO_MANY_EVALUATIONS;
    } catch (ConvergenceException e) {
        //System.out.printf("Singular non linear model = %s\n", e.getMessage());
        return FitStatus.SINGULAR_NON_LINEAR_MODEL;
    } catch (BFGSOptimizer.LineSearchRoundoffException e) {
        //e.printStackTrace();
        return FitStatus.FAILED_TO_CONVERGE;
    } catch (Exception e) {
        //System.out.printf("Unknown error = %s\n", e.getMessage());
        e.printStackTrace();
        return FitStatus.UNKNOWN;
    } finally {
        if (baseOptimiser != null) {
            iterations += baseOptimiser.getIterations();
            evaluations += baseOptimiser.getEvaluations();
        }
    }
    // Check this as likelihood functions can go wrong
    if (Double.isInfinite(value) || Double.isNaN(value))
        return FitStatus.INVALID_LIKELIHOOD;
    return FitStatus.OK;
}
Also used : MaxEval(org.apache.commons.math3.optim.MaxEval) InitialGuess(org.apache.commons.math3.optim.InitialGuess) BOBYQAOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.BOBYQAOptimizer) SimpleBounds(org.apache.commons.math3.optim.SimpleBounds) ObjectiveFunction(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction) Well19937c(org.apache.commons.math3.random.Well19937c) SimpleValueChecker(org.apache.commons.math3.optim.SimpleValueChecker) RandomGenerator(org.apache.commons.math3.random.RandomGenerator) BFGSOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.gradient.BFGSOptimizer) PointValuePair(org.apache.commons.math3.optim.PointValuePair) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) Gaussian2DFunction(gdsc.smlm.function.gaussian.Gaussian2DFunction) ConvergenceException(org.apache.commons.math3.exception.ConvergenceException) BoundedNonLinearConjugateGradientOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.gradient.BoundedNonLinearConjugateGradientOptimizer) TooManyIterationsException(org.apache.commons.math3.exception.TooManyIterationsException) BaseOptimizer(org.apache.commons.math3.optim.BaseOptimizer) CMAESOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.CMAESOptimizer) FisherInformationMatrix(gdsc.smlm.fitting.FisherInformationMatrix) PoissonGammaGaussianLikelihoodWrapper(gdsc.smlm.function.PoissonGammaGaussianLikelihoodWrapper) PoissonGaussianLikelihoodWrapper(gdsc.smlm.function.PoissonGaussianLikelihoodWrapper) PoissonLikelihoodWrapper(gdsc.smlm.function.PoissonLikelihoodWrapper) LikelihoodWrapper(gdsc.smlm.function.LikelihoodWrapper) ConvergenceException(org.apache.commons.math3.exception.ConvergenceException) TooManyIterationsException(org.apache.commons.math3.exception.TooManyIterationsException) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) ObjectiveFunctionGradient(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunctionGradient) MultivariateFunctionMappingAdapter(org.apache.commons.math3.optim.nonlinear.scalar.MultivariateFunctionMappingAdapter) OptimizationData(org.apache.commons.math3.optim.OptimizationData) CustomPowellOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.CustomPowellOptimizer) MaxIter(org.apache.commons.math3.optim.MaxIter)

Aggregations

ArrayList (java.util.ArrayList)10 List (java.util.List)8 PointValuePair (org.apache.commons.math3.optim.PointValuePair)8 Line (org.twak.utils.Line)8 Plot2 (ij.gui.Plot2)7 HashSet (java.util.HashSet)7 Map (java.util.Map)7 Set (java.util.Set)7 Collectors (java.util.stream.Collectors)7 Point2d (javax.vecmath.Point2d)7 Collections (java.util.Collections)6 Pair (org.twak.utils.Pair)6 Iterator (java.util.Iterator)5 Point3d (javax.vecmath.Point3d)5 Vector2d (javax.vecmath.Vector2d)5 LoessInterpolator (org.apache.commons.math3.analysis.interpolation.LoessInterpolator)4 ConvergenceException (org.apache.commons.math3.exception.ConvergenceException)4 TooManyIterationsException (org.apache.commons.math3.exception.TooManyIterationsException)4 SimpleValueChecker (org.apache.commons.math3.optim.SimpleValueChecker)4 Mathz (org.twak.utils.Mathz)4