Search in sources :

Example 1 with GoalType

use of org.apache.commons.math3.optim.nonlinear.scalar.GoalType in project GDSC-SMLM by aherbert.

the class BinomialFitter method fitBinomial.

/**
	 * Fit the binomial distribution (n,p) to the cumulative histogram. Performs fitting assuming a fixed n value and
	 * attempts to optimise p.
	 * 
	 * @param histogram
	 *            The input histogram
	 * @param mean
	 *            The histogram mean (used to estimate p). Calculated if NaN.
	 * @param n
	 *            The n to evaluate
	 * @param zeroTruncated
	 *            True if the model should ignore n=0 (zero-truncated binomial)
	 * @return The best fit (n, p)
	 * @throws IllegalArgumentException
	 *             If any of the input data values are negative
	 * @throws IllegalArgumentException
	 *             If any fitting a zero truncated binomial and there are no values above zero
	 */
public PointValuePair fitBinomial(double[] histogram, double mean, int n, boolean zeroTruncated) {
    if (Double.isNaN(mean))
        mean = getMean(histogram);
    if (zeroTruncated && histogram[0] > 0) {
        log("Fitting zero-truncated histogram but there are zero values - Renormalising to ignore zero");
        double cumul = 0;
        for (int i = 1; i < histogram.length; i++) cumul += histogram[i];
        if (cumul == 0)
            throw new IllegalArgumentException("Fitting zero-truncated histogram but there are no non-zero values");
        histogram[0] = 0;
        for (int i = 1; i < histogram.length; i++) histogram[i] /= cumul;
    }
    int nFittedPoints = Math.min(histogram.length, n + 1) - ((zeroTruncated) ? 1 : 0);
    if (nFittedPoints < 1) {
        log("No points to fit (%d): Histogram.length = %d, n = %d, zero-truncated = %b", nFittedPoints, histogram.length, n, zeroTruncated);
        return null;
    }
    // The model is only fitting the probability p
    // For a binomial n*p = mean => p = mean/n
    double[] initialSolution = new double[] { FastMath.min(mean / n, 1) };
    // Create the function
    BinomialModelFunction function = new BinomialModelFunction(histogram, n, zeroTruncated);
    double[] lB = new double[1];
    double[] uB = new double[] { 1 };
    SimpleBounds bounds = new SimpleBounds(lB, uB);
    // Fit
    // CMAESOptimizer or BOBYQAOptimizer support bounds
    // CMAESOptimiser based on Matlab code:
    // https://www.lri.fr/~hansen/cmaes.m
    // Take the defaults from the Matlab documentation
    int maxIterations = 2000;
    //Double.NEGATIVE_INFINITY;
    double stopFitness = 0;
    boolean isActiveCMA = true;
    int diagonalOnly = 0;
    int checkFeasableCount = 1;
    RandomGenerator random = new Well19937c();
    boolean generateStatistics = false;
    ConvergenceChecker<PointValuePair> checker = new SimpleValueChecker(1e-6, 1e-10);
    // The sigma determines the search range for the variables. It should be 1/3 of the initial search region.
    OptimizationData sigma = new CMAESOptimizer.Sigma(new double[] { (uB[0] - lB[0]) / 3 });
    OptimizationData popSize = new CMAESOptimizer.PopulationSize((int) (4 + Math.floor(3 * Math.log(2))));
    try {
        PointValuePair solution = null;
        boolean noRefit = maximumLikelihood;
        if (n == 1 && zeroTruncated) {
            // No need to fit
            solution = new PointValuePair(new double[] { 1 }, 0);
            noRefit = true;
        } else {
            GoalType goalType = (maximumLikelihood) ? GoalType.MAXIMIZE : GoalType.MINIMIZE;
            // Iteratively fit
            CMAESOptimizer opt = new CMAESOptimizer(maxIterations, stopFitness, isActiveCMA, diagonalOnly, checkFeasableCount, random, generateStatistics, checker);
            for (int iteration = 0; iteration <= fitRestarts; iteration++) {
                try {
                    // Start from the initial solution
                    PointValuePair result = opt.optimize(new InitialGuess(initialSolution), new ObjectiveFunction(function), goalType, bounds, sigma, popSize, new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
                    //		opt.getEvaluations());
                    if (solution == null || result.getValue() < solution.getValue()) {
                        solution = result;
                    }
                } catch (TooManyEvaluationsException e) {
                } catch (TooManyIterationsException e) {
                }
                if (solution == null)
                    continue;
                try {
                    // Also restart from the current optimum
                    PointValuePair result = opt.optimize(new InitialGuess(solution.getPointRef()), new ObjectiveFunction(function), goalType, bounds, sigma, popSize, new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
                    //		opt.getEvaluations());
                    if (result.getValue() < solution.getValue()) {
                        solution = result;
                    }
                } catch (TooManyEvaluationsException e) {
                } catch (TooManyIterationsException e) {
                }
            }
            if (solution == null)
                return null;
        }
        if (noRefit) {
            // Although we fit the log-likelihood, return the sum-of-squares to allow 
            // comparison across different n
            double p = solution.getPointRef()[0];
            double ss = 0;
            double[] obs = function.p;
            double[] exp = function.getP(p);
            for (int i = 0; i < obs.length; i++) ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
            return new PointValuePair(solution.getPointRef(), ss);
        } else // We can do a LVM refit if the number of fitted points is more than 1
        if (nFittedPoints > 1) {
            // Improve SS fit with a gradient based LVM optimizer
            LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
            try {
                final BinomialModelFunctionGradient gradientFunction = new BinomialModelFunctionGradient(histogram, n, zeroTruncated);
                //@formatter:off
                LeastSquaresProblem problem = new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(3000).start(solution.getPointRef()).target(gradientFunction.p).weight(new DiagonalMatrix(gradientFunction.getWeights())).model(gradientFunction, new MultivariateMatrixFunction() {

                    public double[][] value(double[] point) throws IllegalArgumentException {
                        return gradientFunction.jacobian(point);
                    }
                }).build();
                //@formatter:on
                Optimum lvmSolution = optimizer.optimize(problem);
                // Check the pValue is valid since the LVM is not bounded.
                double p = lvmSolution.getPoint().getEntry(0);
                if (p <= 1 && p >= 0) {
                    // True if the weights are 1
                    double ss = lvmSolution.getResiduals().dotProduct(lvmSolution.getResiduals());
                    //	ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
                    if (ss < solution.getValue()) {
                        //		Utils.rounded(100 * (solution.getValue() - ss) / solution.getValue(), 4));
                        return new PointValuePair(lvmSolution.getPoint().toArray(), ss);
                    }
                }
            } catch (TooManyIterationsException e) {
                log("Failed to re-fit: Too many iterations: %s", e.getMessage());
            } catch (ConvergenceException e) {
                log("Failed to re-fit: %s", e.getMessage());
            } catch (Exception e) {
            // Ignore this ...
            }
        }
        return solution;
    } catch (Exception e) {
        log("Failed to fit Binomial distribution with N=%d : %s", n, e.getMessage());
    }
    return null;
}
Also used : InitialGuess(org.apache.commons.math3.optim.InitialGuess) MaxEval(org.apache.commons.math3.optim.MaxEval) SimpleBounds(org.apache.commons.math3.optim.SimpleBounds) ObjectiveFunction(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction) Well19937c(org.apache.commons.math3.random.Well19937c) SimpleValueChecker(org.apache.commons.math3.optim.SimpleValueChecker) RandomGenerator(org.apache.commons.math3.random.RandomGenerator) PointValuePair(org.apache.commons.math3.optim.PointValuePair) LeastSquaresBuilder(org.apache.commons.math3.fitting.leastsquares.LeastSquaresBuilder) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) DiagonalMatrix(org.apache.commons.math3.linear.DiagonalMatrix) ConvergenceException(org.apache.commons.math3.exception.ConvergenceException) TooManyIterationsException(org.apache.commons.math3.exception.TooManyIterationsException) LeastSquaresProblem(org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem) MultivariateMatrixFunction(org.apache.commons.math3.analysis.MultivariateMatrixFunction) CMAESOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.CMAESOptimizer) GoalType(org.apache.commons.math3.optim.nonlinear.scalar.GoalType) ConvergenceException(org.apache.commons.math3.exception.ConvergenceException) TooManyIterationsException(org.apache.commons.math3.exception.TooManyIterationsException) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) Optimum(org.apache.commons.math3.fitting.leastsquares.LeastSquaresOptimizer.Optimum) LevenbergMarquardtOptimizer(org.apache.commons.math3.fitting.leastsquares.LevenbergMarquardtOptimizer) OptimizationData(org.apache.commons.math3.optim.OptimizationData) MaxIter(org.apache.commons.math3.optim.MaxIter)

Example 2 with GoalType

use of org.apache.commons.math3.optim.nonlinear.scalar.GoalType in project GDSC-SMLM by aherbert.

the class CustomPowellOptimizer method doOptimize.

/** {@inheritDoc} */
@Override
protected PointValuePair doOptimize() {
    final GoalType goal = getGoalType();
    final double[] guess = getStartPoint();
    final int n = guess.length;
    // Mark when we have modified the basis vectors
    boolean nonBasis = false;
    double[][] direc = createBasisVectors(n);
    final ConvergenceChecker<PointValuePair> checker = getConvergenceChecker();
    //int resets = 0;
    //PointValuePair solution = null;
    //PointValuePair finalSolution = null;
    //int solutionIter = 0, solutionEval = 0;
    //double startValue = 0;
    //try
    //{
    double[] x = guess;
    // Ensure the point is within bounds
    applyBounds(x);
    double fVal = computeObjectiveValue(x);
    //startValue = fVal;
    double[] x1 = x.clone();
    while (true) {
        incrementIterationCount();
        final double fX = fVal;
        double fX2 = 0;
        double delta = 0;
        int bigInd = 0;
        for (int i = 0; i < n; i++) {
            fX2 = fVal;
            final UnivariatePointValuePair optimum = line.search(x, direc[i]);
            fVal = optimum.getValue();
            x = newPoint(x, direc[i], optimum.getPoint());
            if ((fX2 - fVal) > delta) {
                delta = fX2 - fVal;
                bigInd = i;
            }
        }
        boolean stop = false;
        if (positionChecker != null) {
            // Check for convergence on the position
            stop = positionChecker.converged(x1, x);
        }
        if (!stop) {
            // Check if we have improved from an impossible position
            if (Double.isInfinite(fX) || Double.isNaN(fX)) {
                if (Double.isInfinite(fVal) || Double.isNaN(fVal)) {
                    // Nowhere to go 
                    stop = true;
                }
            // else: this is better as we now have a value, so continue
            } else {
                stop = DoubleEquality.almostEqualRelativeOrAbsolute(fX, fVal, relativeThreshold, absoluteThreshold);
            }
        }
        final PointValuePair previous = new PointValuePair(x1, fX);
        final PointValuePair current = new PointValuePair(x, fVal);
        if (!stop && checker != null) {
            // User-defined stopping criteria.
            stop = checker.converged(getIterations(), previous, current);
        }
        boolean reset = false;
        if (stop) {
            // Only allow convergence using the basis vectors, i.e. we cannot move along any dimension
            if (basisConvergence && nonBasis) {
                // Reset to the basis vectors and continue
                reset = true;
            //resets++;
            } else {
                //System.out.printf("Resets = %d\n", resets);
                final PointValuePair answer;
                if (goal == GoalType.MINIMIZE) {
                    answer = (fVal < fX) ? current : previous;
                } else {
                    answer = (fVal > fX) ? current : previous;
                }
                return answer;
            // XXX Debugging
            // Continue the algorithm to see how far it goes
            //if (solution == null)
            //{
            //	solution = answer;
            //	solutionIter = getIterations();
            //	solutionEval = getEvaluations();
            //}
            //finalSolution = answer;
            }
        }
        if (reset) {
            direc = createBasisVectors(n);
            nonBasis = false;
        }
        final double[] d = new double[n];
        final double[] x2 = new double[n];
        for (int i = 0; i < n; i++) {
            d[i] = x[i] - x1[i];
            x2[i] = x[i] + d[i];
        }
        applyBounds(x2);
        x1 = x.clone();
        fX2 = computeObjectiveValue(x2);
        // See if we can continue along the overall search direction to find a better value
        if (fX > fX2) {
            // Check if:
            // 1. The decrease along the average direction was not due to any single direction's decrease
            // 2. There is a substantial second derivative along the average direction and we are close to
            // it minimum
            double t = 2 * (fX + fX2 - 2 * fVal);
            double temp = fX - fVal - delta;
            t *= temp * temp;
            temp = fX - fX2;
            t -= delta * temp * temp;
            if (t < 0.0) {
                final UnivariatePointValuePair optimum = line.search(x, d);
                fVal = optimum.getValue();
                if (reset) {
                    x = newPoint(x, d, optimum.getPoint());
                    continue;
                } else {
                    final double[][] result = newPointAndDirection(x, d, optimum.getPoint());
                    x = result[0];
                    final int lastInd = n - 1;
                    direc[bigInd] = direc[lastInd];
                    direc[lastInd] = result[1];
                    nonBasis = true;
                }
            }
        }
    }
//}
//catch (RuntimeException e)
//{
//	if (solution != null)
//	{
//		System.out.printf("Start %f : Initial %f (%d,%d) : Final %f (%d,%d) : %f\n", startValue,
//				solution.getValue(), solutionIter, solutionEval, finalSolution.getValue(), getIterations(),
//				getEvaluations(), DoubleEquality.relativeError(finalSolution.getValue(), solution.getValue()));
//		return finalSolution;
//	}
//	throw e;
//}
}
Also used : GoalType(org.apache.commons.math3.optim.nonlinear.scalar.GoalType) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair) PointValuePair(org.apache.commons.math3.optim.PointValuePair) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair)

Example 3 with GoalType

use of org.apache.commons.math3.optim.nonlinear.scalar.GoalType in project GDSC-SMLM by aherbert.

the class MaximumLikelihoodFitter method computeFit.

/*
	 * (non-Javadoc)
	 * 
	 * @see gdsc.smlm.fitting.nonlinear.BaseFunctionSolver#computeFit(double[], double[], double[], double[])
	 */
public FitStatus computeFit(double[] y, double[] y_fit, double[] a, double[] a_dev) {
    final int n = y.length;
    LikelihoodWrapper maximumLikelihoodFunction = createLikelihoodWrapper((NonLinearFunction) f, n, y, a);
    @SuppressWarnings("rawtypes") BaseOptimizer baseOptimiser = null;
    try {
        double[] startPoint = getInitialSolution(a);
        PointValuePair optimum = null;
        if (searchMethod == SearchMethod.POWELL || searchMethod == SearchMethod.POWELL_BOUNDED || searchMethod == SearchMethod.POWELL_ADAPTER) {
            // Non-differentiable version using Powell Optimiser
            // This is as per the method in Numerical Recipes 10.5 (Direction Set (Powell's) method)
            // I could extend the optimiser and implement bounds on the directions moved. However the mapping
            // adapter seems to work OK.
            final boolean basisConvergence = false;
            // Perhaps these thresholds should be tighter?
            // The default is to use the sqrt() of the overall tolerance
            //final double lineRel = FastMath.sqrt(relativeThreshold);
            //final double lineAbs = FastMath.sqrt(absoluteThreshold);
            //final double lineRel = relativeThreshold * 1e2;
            //final double lineAbs = absoluteThreshold * 1e2;
            // Since we are fitting only a small number of parameters then just use the same tolerance 
            // for each search direction
            final double lineRel = relativeThreshold;
            final double lineAbs = absoluteThreshold;
            CustomPowellOptimizer o = new CustomPowellOptimizer(relativeThreshold, absoluteThreshold, lineRel, lineAbs, null, basisConvergence);
            baseOptimiser = o;
            OptimizationData maxIterationData = null;
            if (getMaxIterations() > 0)
                maxIterationData = new MaxIter(getMaxIterations());
            if (searchMethod == SearchMethod.POWELL_ADAPTER) {
                // Try using the mapping adapter for a bounded Powell search
                MultivariateFunctionMappingAdapter adapter = new MultivariateFunctionMappingAdapter(new MultivariateLikelihood(maximumLikelihoodFunction), lower, upper);
                optimum = o.optimize(maxIterationData, new MaxEval(getMaxEvaluations()), new ObjectiveFunction(adapter), GoalType.MINIMIZE, new InitialGuess(adapter.boundedToUnbounded(startPoint)));
                double[] solution = adapter.unboundedToBounded(optimum.getPointRef());
                optimum = new PointValuePair(solution, optimum.getValue());
            } else {
                if (powellFunction == null) {
                    // Python code by using the sqrt of the number of photons and background.
                    if (mapGaussian) {
                        Gaussian2DFunction gf = (Gaussian2DFunction) f;
                        // Re-map signal and background using the sqrt
                        int[] indices = gf.gradientIndices();
                        int[] map = new int[indices.length];
                        int count = 0;
                        // Background is always first
                        if (indices[0] == Gaussian2DFunction.BACKGROUND) {
                            map[count++] = 0;
                        }
                        // Look for the Signal in multiple peak 2D Gaussians
                        for (int i = 1; i < indices.length; i++) if (indices[i] % 6 == Gaussian2DFunction.SIGNAL) {
                            map[count++] = i;
                        }
                        if (count > 0) {
                            powellFunction = new MappedMultivariateLikelihood(maximumLikelihoodFunction, Arrays.copyOf(map, count));
                        }
                    }
                    if (powellFunction == null) {
                        powellFunction = new MultivariateLikelihood(maximumLikelihoodFunction);
                    }
                }
                // Update the maximum likelihood function in the Powell function wrapper
                powellFunction.fun = maximumLikelihoodFunction;
                OptimizationData positionChecker = null;
                // new org.apache.commons.math3.optim.PositionChecker(relativeThreshold, absoluteThreshold);
                SimpleBounds simpleBounds = null;
                if (powellFunction.isMapped()) {
                    MappedMultivariateLikelihood adapter = (MappedMultivariateLikelihood) powellFunction;
                    if (searchMethod == SearchMethod.POWELL_BOUNDED)
                        simpleBounds = new SimpleBounds(adapter.map(lower), adapter.map(upper));
                    optimum = o.optimize(maxIterationData, new MaxEval(getMaxEvaluations()), new ObjectiveFunction(powellFunction), GoalType.MINIMIZE, new InitialGuess(adapter.map(startPoint)), positionChecker, simpleBounds);
                    double[] solution = adapter.unmap(optimum.getPointRef());
                    optimum = new PointValuePair(solution, optimum.getValue());
                } else {
                    if (searchMethod == SearchMethod.POWELL_BOUNDED)
                        simpleBounds = new SimpleBounds(lower, upper);
                    optimum = o.optimize(maxIterationData, new MaxEval(getMaxEvaluations()), new ObjectiveFunction(powellFunction), GoalType.MINIMIZE, new InitialGuess(startPoint), positionChecker, simpleBounds);
                }
            }
        } else if (searchMethod == SearchMethod.BOBYQA) {
            // Differentiable approximation using Powell's BOBYQA algorithm.
            // This is slower than the Powell optimiser and requires a high number of evaluations.
            int numberOfInterpolationPoints = this.getNumberOfFittedParameters() + 2;
            BOBYQAOptimizer o = new BOBYQAOptimizer(numberOfInterpolationPoints);
            baseOptimiser = o;
            optimum = o.optimize(new MaxEval(getMaxEvaluations()), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), GoalType.MINIMIZE, new InitialGuess(startPoint), new SimpleBounds(lower, upper));
        } else if (searchMethod == SearchMethod.CMAES) {
            // TODO - Understand why the CMAES optimiser does not fit very well on test data. It appears 
            // to converge too early and the likelihood scores are not as low as the other optimisers.
            // CMAESOptimiser based on Matlab code:
            // https://www.lri.fr/~hansen/cmaes.m
            // Take the defaults from the Matlab documentation
            //Double.NEGATIVE_INFINITY;
            double stopFitness = 0;
            boolean isActiveCMA = true;
            int diagonalOnly = 0;
            int checkFeasableCount = 1;
            RandomGenerator random = new Well19937c();
            boolean generateStatistics = false;
            // The sigma determines the search range for the variables. It should be 1/3 of the initial search region.
            double[] sigma = new double[lower.length];
            for (int i = 0; i < sigma.length; i++) sigma[i] = (upper[i] - lower[i]) / 3;
            int popSize = (int) (4 + Math.floor(3 * Math.log(sigma.length)));
            // The CMAES optimiser is random and restarting can overcome problems with quick convergence.
            // The Apache commons documentations states that convergence should occur between 30N and 300N^2
            // function evaluations
            final int n30 = FastMath.min(sigma.length * sigma.length * 30, getMaxEvaluations() / 2);
            evaluations = 0;
            OptimizationData[] data = new OptimizationData[] { new InitialGuess(startPoint), new CMAESOptimizer.PopulationSize(popSize), new MaxEval(getMaxEvaluations()), new CMAESOptimizer.Sigma(sigma), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), GoalType.MINIMIZE, new SimpleBounds(lower, upper) };
            // Iterate to prevent early convergence
            int repeat = 0;
            while (evaluations < n30) {
                if (repeat++ > 1) {
                    // Update the start point and population size
                    data[0] = new InitialGuess(optimum.getPointRef());
                    popSize *= 2;
                    data[1] = new CMAESOptimizer.PopulationSize(popSize);
                }
                CMAESOptimizer o = new CMAESOptimizer(getMaxIterations(), stopFitness, isActiveCMA, diagonalOnly, checkFeasableCount, random, generateStatistics, new SimpleValueChecker(relativeThreshold, absoluteThreshold));
                baseOptimiser = o;
                PointValuePair result = o.optimize(data);
                iterations += o.getIterations();
                evaluations += o.getEvaluations();
                //		o.getEvaluations(), totalEvaluations);
                if (optimum == null || result.getValue() < optimum.getValue()) {
                    optimum = result;
                }
            }
            // Prevent incrementing the iterations again
            baseOptimiser = null;
        } else if (searchMethod == SearchMethod.BFGS) {
            // BFGS can use an approximate line search minimisation where as Powell and conjugate gradient
            // methods require a more accurate line minimisation. The BFGS search does not do a full 
            // minimisation but takes appropriate steps in the direction of the current gradient.
            // Do not use the convergence checker on the value of the function. Use the convergence on the 
            // point coordinate and gradient
            //BFGSOptimizer o = new BFGSOptimizer(new SimpleValueChecker(rel, abs));
            BFGSOptimizer o = new BFGSOptimizer();
            baseOptimiser = o;
            // Configure maximum step length for each dimension using the bounds
            double[] stepLength = new double[lower.length];
            for (int i = 0; i < stepLength.length; i++) {
                stepLength[i] = (upper[i] - lower[i]) * 0.3333333;
                if (stepLength[i] <= 0)
                    stepLength[i] = Double.POSITIVE_INFINITY;
            }
            // The GoalType is always minimise so no need to pass this in
            OptimizationData positionChecker = null;
            //new org.apache.commons.math3.optim.PositionChecker(relativeThreshold, absoluteThreshold);
            optimum = o.optimize(new MaxEval(getMaxEvaluations()), new ObjectiveFunctionGradient(new MultivariateVectorLikelihood(maximumLikelihoodFunction)), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), new InitialGuess(startPoint), new SimpleBounds(lowerConstraint, upperConstraint), new BFGSOptimizer.GradientTolerance(relativeThreshold), positionChecker, new BFGSOptimizer.StepLength(stepLength));
        } else {
            // The line search algorithm often fails. This is due to searching into a region where the 
            // function evaluates to a negative so has been clipped. This means the upper bound of the line
            // cannot be found.
            // Note that running it on an easy problem (200 photons with fixed fitting (no background)) the algorithm
            // does sometimes produces results better than the Powell algorithm but it is slower.
            BoundedNonLinearConjugateGradientOptimizer o = new BoundedNonLinearConjugateGradientOptimizer((searchMethod == SearchMethod.CONJUGATE_GRADIENT_FR) ? Formula.FLETCHER_REEVES : Formula.POLAK_RIBIERE, new SimpleValueChecker(relativeThreshold, absoluteThreshold));
            baseOptimiser = o;
            // Note: The gradients may become unstable at the edge of the bounds. Or they will not change 
            // direction if the true solution is on the bounds since the gradient will always continue 
            // towards the bounds. This is key to the conjugate gradient method. It searches along a vector 
            // until the direction of the gradient is in the opposite direction (using dot products, i.e. 
            // cosine of angle between them)
            // NR 10.7 states there is no advantage of the variable metric DFP or BFGS methods over
            // conjugate gradient methods. So I will try these first.
            // Try this:
            // Adapt the conjugate gradient optimiser to use the gradient to pick the search direction
            // and then for the line minimisation. However if the function is out of bounds then clip the 
            // variables at the bounds and continue. 
            // If the current point is at the bounds and the gradient is to continue out of bounds then 
            // clip the gradient too.
            // Or: just use the gradient for the search direction then use the line minimisation/rest
            // as per the Powell optimiser. The bounds should limit the search.
            // I tried a Bounded conjugate gradient optimiser with clipped variables:
            // This sometimes works. However when the variables go a long way out of the expected range the gradients
            // can have vastly different magnitudes. This results in the algorithm stalling since the gradients
            // can be close to zero and the some of the parameters are no longer adjusted.
            // Perhaps this can be looked for and the algorithm then gives up and resorts to a Powell optimiser from 
            // the current point.
            // Changed the bracketing step to very small (default is 1, changed to 0.001). This improves the 
            // performance. The gradient direction is very sensitive to small changes in the coordinates so a 
            // tighter bracketing of the line search helps.
            // Tried using a non-gradient method for the line search copied from the Powell optimiser:
            // This also works when the bracketing step is small but the number of iterations is higher.
            // 24.10.2014: I have tried to get conjugate gradient to work but the gradient function 
            // must not behave suitably for the optimiser. In the current state both methods of using a 
            // Bounded Conjugate Gradient Optimiser perform poorly relative to other optimisers:
            // Simulated : n=1000, signal=200, x=0.53, y=0.47
            // LVM : n=1000, signal=171, x=0.537, y=0.471 (1.003s)
            // Powell : n=1000, signal=187, x=0.537, y=0.48 (1.238s)
            // Gradient based PR (constrained): n=858, signal=161, x=0.533, y=0.474 (2.54s)
            // Gradient based PR (bounded): n=948, signal=161, x=0.533, y=0.473 (2.67s)
            // Non-gradient based : n=1000, signal=151.47, x=0.535, y=0.474 (1.626s)
            // The conjugate optimisers are slower, under predict the signal by the most and in the case of 
            // the gradient based optimiser, fail to converge on some problems. This is worse when constrained
            // fitting is used and not tightly bounded fitting.
            // I will leave the code in as an option but would not recommend using it. I may remove it in the 
            // future.
            // Note: It is strange that the non-gradient based line minimisation is more successful.
            // It may be that the gradient function is not accurate (due to round off error) or that it is
            // simply wrong when far from the optimum. My JUnit tests only evaluate the function within the 
            // expected range of the answer.
            // Note the default step size on the Powell optimiser is 1 but the initial directions are unit vectors.
            // So our bracketing step should be a minimum of 1 / average length of the first gradient vector to prevent
            // the first step being too large when bracketing.
            final double[] gradient = new double[startPoint.length];
            maximumLikelihoodFunction.likelihood(startPoint, gradient);
            double l = 0;
            for (double d : gradient) l += d * d;
            final double bracketingStep = FastMath.min(0.001, ((l > 1) ? 1.0 / l : 1));
            //System.out.printf("Bracketing step = %f (length=%f)\n", bracketingStep, l);
            o.setUseGradientLineSearch(gradientLineMinimisation);
            optimum = o.optimize(new MaxEval(getMaxEvaluations()), new ObjectiveFunctionGradient(new MultivariateVectorLikelihood(maximumLikelihoodFunction)), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), GoalType.MINIMIZE, new InitialGuess(startPoint), new SimpleBounds(lowerConstraint, upperConstraint), new BoundedNonLinearConjugateGradientOptimizer.BracketingStep(bracketingStep));
        //maximumLikelihoodFunction.value(solution, gradient);
        //System.out.printf("Iter = %d, %g @ %s : %s\n", iterations, ll, Arrays.toString(solution),
        //		Arrays.toString(gradient));
        }
        final double[] solution = optimum.getPointRef();
        setSolution(a, solution);
        if (a_dev != null) {
            // Assume the Maximum Likelihood estimator returns the optimum fit (achieves the Cramer Roa
            // lower bounds) and so the covariance can be obtained from the Fisher Information Matrix.
            FisherInformationMatrix m = new FisherInformationMatrix(maximumLikelihoodFunction.fisherInformation(a));
            setDeviations(a_dev, m.crlb(true));
        }
        // Reverse negative log likelihood for maximum likelihood score
        value = -optimum.getValue();
    } catch (TooManyIterationsException e) {
        //e.printStackTrace();
        return FitStatus.TOO_MANY_ITERATIONS;
    } catch (TooManyEvaluationsException e) {
        //e.printStackTrace();
        return FitStatus.TOO_MANY_EVALUATIONS;
    } catch (ConvergenceException e) {
        //System.out.printf("Singular non linear model = %s\n", e.getMessage());
        return FitStatus.SINGULAR_NON_LINEAR_MODEL;
    } catch (BFGSOptimizer.LineSearchRoundoffException e) {
        //e.printStackTrace();
        return FitStatus.FAILED_TO_CONVERGE;
    } catch (Exception e) {
        //System.out.printf("Unknown error = %s\n", e.getMessage());
        e.printStackTrace();
        return FitStatus.UNKNOWN;
    } finally {
        if (baseOptimiser != null) {
            iterations += baseOptimiser.getIterations();
            evaluations += baseOptimiser.getEvaluations();
        }
    }
    // Check this as likelihood functions can go wrong
    if (Double.isInfinite(value) || Double.isNaN(value))
        return FitStatus.INVALID_LIKELIHOOD;
    return FitStatus.OK;
}
Also used : MaxEval(org.apache.commons.math3.optim.MaxEval) InitialGuess(org.apache.commons.math3.optim.InitialGuess) BOBYQAOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.BOBYQAOptimizer) SimpleBounds(org.apache.commons.math3.optim.SimpleBounds) ObjectiveFunction(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction) Well19937c(org.apache.commons.math3.random.Well19937c) SimpleValueChecker(org.apache.commons.math3.optim.SimpleValueChecker) RandomGenerator(org.apache.commons.math3.random.RandomGenerator) BFGSOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.gradient.BFGSOptimizer) PointValuePair(org.apache.commons.math3.optim.PointValuePair) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) Gaussian2DFunction(gdsc.smlm.function.gaussian.Gaussian2DFunction) ConvergenceException(org.apache.commons.math3.exception.ConvergenceException) BoundedNonLinearConjugateGradientOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.gradient.BoundedNonLinearConjugateGradientOptimizer) TooManyIterationsException(org.apache.commons.math3.exception.TooManyIterationsException) BaseOptimizer(org.apache.commons.math3.optim.BaseOptimizer) CMAESOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.CMAESOptimizer) FisherInformationMatrix(gdsc.smlm.fitting.FisherInformationMatrix) PoissonGammaGaussianLikelihoodWrapper(gdsc.smlm.function.PoissonGammaGaussianLikelihoodWrapper) PoissonGaussianLikelihoodWrapper(gdsc.smlm.function.PoissonGaussianLikelihoodWrapper) PoissonLikelihoodWrapper(gdsc.smlm.function.PoissonLikelihoodWrapper) LikelihoodWrapper(gdsc.smlm.function.LikelihoodWrapper) ConvergenceException(org.apache.commons.math3.exception.ConvergenceException) TooManyIterationsException(org.apache.commons.math3.exception.TooManyIterationsException) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) ObjectiveFunctionGradient(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunctionGradient) MultivariateFunctionMappingAdapter(org.apache.commons.math3.optim.nonlinear.scalar.MultivariateFunctionMappingAdapter) OptimizationData(org.apache.commons.math3.optim.OptimizationData) CustomPowellOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.CustomPowellOptimizer) MaxIter(org.apache.commons.math3.optim.MaxIter)

Example 4 with GoalType

use of org.apache.commons.math3.optim.nonlinear.scalar.GoalType in project GDSC-SMLM by aherbert.

the class BoundedNonLinearConjugateGradientOptimizer method doOptimize.

/** {@inheritDoc} */
@Override
protected PointValuePair doOptimize() {
    final ConvergenceChecker<PointValuePair> checker = getConvergenceChecker();
    final double[] point = getStartPoint();
    final GoalType goal = getGoalType();
    final int n = point.length;
    sign = (goal == GoalType.MINIMIZE) ? -1 : 1;
    double[] unbounded = point.clone();
    applyBounds(point);
    double[] r = computeObjectiveGradient(point);
    checkGradients(r, unbounded);
    if (goal == GoalType.MINIMIZE) {
        for (int i = 0; i < n; i++) {
            r[i] = -r[i];
        }
    }
    // Initial search direction.
    double[] steepestDescent = preconditioner.precondition(point, r);
    double[] searchDirection = steepestDescent.clone();
    double delta = 0;
    for (int i = 0; i < n; ++i) {
        delta += r[i] * searchDirection[i];
    }
    // Used for non-gradient based line search
    LineSearch line = null;
    double rel = 1e-6;
    double abs = 1e-10;
    if (getConvergenceChecker() instanceof SimpleValueChecker) {
        rel = ((SimpleValueChecker) getConvergenceChecker()).getRelativeThreshold();
        abs = ((SimpleValueChecker) getConvergenceChecker()).getRelativeThreshold();
    }
    line = new LineSearch(Math.sqrt(rel), Math.sqrt(abs));
    PointValuePair current = null;
    int maxEval = getMaxEvaluations();
    while (true) {
        incrementIterationCount();
        final double objective = computeObjectiveValue(point);
        PointValuePair previous = current;
        current = new PointValuePair(point, objective);
        if (previous != null && checker.converged(getIterations(), previous, current)) {
            // We have found an optimum.
            return current;
        }
        double step;
        if (useGradientLineSearch) {
            // Classic code using the gradient function for the line search:
            // Find the optimal step in the search direction.
            final UnivariateFunction lsf = new LineSearchFunction(point, searchDirection);
            final double uB;
            try {
                uB = findUpperBound(lsf, 0, initialStep);
                // Check if the bracket found a minimum. Otherwise just move to the new point.
                if (noBracket)
                    step = uB;
                else {
                    // XXX Last parameters is set to a value close to zero in order to
                    // work around the divergence problem in the "testCircleFitting"
                    // unit test (see MATH-439).
                    //System.out.printf("Bracket %f - %f - %f\n", 0., 1e-15, uB);
                    step = solver.solve(maxEval, lsf, 0, uB, 1e-15);
                    // Subtract used up evaluations.
                    maxEval -= solver.getEvaluations();
                }
            } catch (MathIllegalStateException e) {
                //System.out.printf("Failed to bracket %s @ %s\n", Arrays.toString(point), Arrays.toString(searchDirection));
                // Line search without gradient (as per Powell optimiser)
                final UnivariatePointValuePair optimum = line.search(point, searchDirection);
                step = optimum.getPoint();
            //throw e;
            }
        } else {
            // Line search without gradient (as per Powell optimiser)
            final UnivariatePointValuePair optimum = line.search(point, searchDirection);
            step = optimum.getPoint();
        }
        //System.out.printf("Step = %f x %s\n", step, Arrays.toString(searchDirection));
        for (int i = 0; i < point.length; ++i) {
            point[i] += step * searchDirection[i];
        }
        unbounded = point.clone();
        applyBounds(point);
        r = computeObjectiveGradient(point);
        checkGradients(r, unbounded);
        if (goal == GoalType.MINIMIZE) {
            for (int i = 0; i < n; ++i) {
                r[i] = -r[i];
            }
        }
        // Compute beta.
        final double deltaOld = delta;
        final double[] newSteepestDescent = preconditioner.precondition(point, r);
        delta = 0;
        for (int i = 0; i < n; ++i) {
            delta += r[i] * newSteepestDescent[i];
        }
        if (delta == 0)
            return new PointValuePair(point, computeObjectiveValue(point));
        final double beta;
        switch(updateFormula) {
            case FLETCHER_REEVES:
                beta = delta / deltaOld;
                break;
            case POLAK_RIBIERE:
                double deltaMid = 0;
                for (int i = 0; i < r.length; ++i) {
                    deltaMid += r[i] * steepestDescent[i];
                }
                beta = (delta - deltaMid) / deltaOld;
                break;
            default:
                // Should never happen.
                throw new MathInternalError();
        }
        steepestDescent = newSteepestDescent;
        // Compute conjugate search direction.
        if (getIterations() % n == 0 || beta < 0) {
            // Break conjugation: reset search direction.
            searchDirection = steepestDescent.clone();
        } else {
            // Compute new conjugate search direction.
            for (int i = 0; i < n; ++i) {
                searchDirection[i] = steepestDescent[i] + beta * searchDirection[i];
            }
        }
        // The gradient has already been adjusted for the search direction
        checkGradients(searchDirection, unbounded, -sign);
    }
}
Also used : UnivariateFunction(org.apache.commons.math3.analysis.UnivariateFunction) GoalType(org.apache.commons.math3.optim.nonlinear.scalar.GoalType) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair) SimpleValueChecker(org.apache.commons.math3.optim.SimpleValueChecker) MathIllegalStateException(org.apache.commons.math3.exception.MathIllegalStateException) PointValuePair(org.apache.commons.math3.optim.PointValuePair) UnivariatePointValuePair(org.apache.commons.math3.optim.univariate.UnivariatePointValuePair) MathInternalError(org.apache.commons.math3.exception.MathInternalError)

Example 5 with GoalType

use of org.apache.commons.math3.optim.nonlinear.scalar.GoalType in project GDSC-SMLM by aherbert.

the class PCPALMFitting method runBoundedOptimiser.

private PointValuePair runBoundedOptimiser(double[][] gr, double[] initialSolution, double[] lB, double[] uB, SumOfSquaresModelFunction function) {
    // Create the functions to optimise
    ObjectiveFunction objective = new ObjectiveFunction(new SumOfSquaresMultivariateFunction(function));
    ObjectiveFunctionGradient gradient = new ObjectiveFunctionGradient(new SumOfSquaresMultivariateVectorFunction(function));
    final boolean debug = false;
    // Try a BFGS optimiser since this will produce a deterministic solution and can respect bounds.
    PointValuePair optimum = null;
    boundedEvaluations = 0;
    final MaxEval maxEvaluations = new MaxEval(2000);
    MultivariateOptimizer opt = null;
    for (int iteration = 0; iteration <= fitRestarts; iteration++) {
        try {
            opt = new BFGSOptimizer();
            final double relativeThreshold = 1e-6;
            // Configure maximum step length for each dimension using the bounds
            double[] stepLength = new double[lB.length];
            for (int i = 0; i < stepLength.length; i++) stepLength[i] = (uB[i] - lB[i]) * 0.3333333;
            // The GoalType is always minimise so no need to pass this in
            optimum = opt.optimize(maxEvaluations, gradient, objective, new InitialGuess((optimum == null) ? initialSolution : optimum.getPointRef()), new SimpleBounds(lB, uB), new BFGSOptimizer.GradientTolerance(relativeThreshold), new BFGSOptimizer.StepLength(stepLength));
            if (debug)
                System.out.printf("BFGS Iter %d = %g (%d)\n", iteration, optimum.getValue(), opt.getEvaluations());
        } catch (TooManyEvaluationsException e) {
            // No need to restart
            break;
        } catch (RuntimeException e) {
            // No need to restart
            break;
        } finally {
            boundedEvaluations += opt.getEvaluations();
        }
    }
    // Try a CMAES optimiser which is non-deterministic. To overcome this we perform restarts.
    // CMAESOptimiser based on Matlab code:
    // https://www.lri.fr/~hansen/cmaes.m
    // Take the defaults from the Matlab documentation
    //Double.NEGATIVE_INFINITY;
    double stopFitness = 0;
    boolean isActiveCMA = true;
    int diagonalOnly = 0;
    int checkFeasableCount = 1;
    //Well19937c();
    RandomGenerator random = new Well44497b();
    boolean generateStatistics = false;
    ConvergenceChecker<PointValuePair> checker = new SimpleValueChecker(1e-6, 1e-10);
    // The sigma determines the search range for the variables. It should be 1/3 of the initial search region.
    double[] range = new double[lB.length];
    for (int i = 0; i < lB.length; i++) range[i] = (uB[i] - lB[i]) / 3;
    OptimizationData sigma = new CMAESOptimizer.Sigma(range);
    OptimizationData popSize = new CMAESOptimizer.PopulationSize((int) (4 + Math.floor(3 * Math.log(initialSolution.length))));
    SimpleBounds bounds = new SimpleBounds(lB, uB);
    opt = new CMAESOptimizer(maxEvaluations.getMaxEval(), stopFitness, isActiveCMA, diagonalOnly, checkFeasableCount, random, generateStatistics, checker);
    // Restart the optimiser several times and take the best answer.
    for (int iteration = 0; iteration <= fitRestarts; iteration++) {
        try {
            // Start from the initial solution
            PointValuePair constrainedSolution = opt.optimize(new InitialGuess(initialSolution), objective, GoalType.MINIMIZE, bounds, sigma, popSize, maxEvaluations);
            if (debug)
                System.out.printf("CMAES Iter %d initial = %g (%d)\n", iteration, constrainedSolution.getValue(), opt.getEvaluations());
            boundedEvaluations += opt.getEvaluations();
            if (optimum == null || constrainedSolution.getValue() < optimum.getValue()) {
                optimum = constrainedSolution;
            }
        } catch (TooManyEvaluationsException e) {
        } catch (TooManyIterationsException e) {
        } finally {
            boundedEvaluations += maxEvaluations.getMaxEval();
        }
        if (optimum == null)
            continue;
        try {
            // Also restart from the current optimum
            PointValuePair constrainedSolution = opt.optimize(new InitialGuess(optimum.getPointRef()), objective, GoalType.MINIMIZE, bounds, sigma, popSize, maxEvaluations);
            if (debug)
                System.out.printf("CMAES Iter %d restart = %g (%d)\n", iteration, constrainedSolution.getValue(), opt.getEvaluations());
            if (constrainedSolution.getValue() < optimum.getValue()) {
                optimum = constrainedSolution;
            }
        } catch (TooManyEvaluationsException e) {
        } catch (TooManyIterationsException e) {
        } finally {
            boundedEvaluations += maxEvaluations.getMaxEval();
        }
    }
    return optimum;
}
Also used : MultivariateOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer) MaxEval(org.apache.commons.math3.optim.MaxEval) InitialGuess(org.apache.commons.math3.optim.InitialGuess) SimpleBounds(org.apache.commons.math3.optim.SimpleBounds) ObjectiveFunction(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction) SimpleValueChecker(org.apache.commons.math3.optim.SimpleValueChecker) BFGSOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.gradient.BFGSOptimizer) RandomGenerator(org.apache.commons.math3.random.RandomGenerator) PointValuePair(org.apache.commons.math3.optim.PointValuePair) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) TooManyIterationsException(org.apache.commons.math3.exception.TooManyIterationsException) CMAESOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.CMAESOptimizer) ObjectiveFunctionGradient(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunctionGradient) Well44497b(org.apache.commons.math3.random.Well44497b) OptimizationData(org.apache.commons.math3.optim.OptimizationData)

Aggregations

PointValuePair (org.apache.commons.math3.optim.PointValuePair)5 SimpleValueChecker (org.apache.commons.math3.optim.SimpleValueChecker)4 TooManyEvaluationsException (org.apache.commons.math3.exception.TooManyEvaluationsException)3 TooManyIterationsException (org.apache.commons.math3.exception.TooManyIterationsException)3 InitialGuess (org.apache.commons.math3.optim.InitialGuess)3 MaxEval (org.apache.commons.math3.optim.MaxEval)3 OptimizationData (org.apache.commons.math3.optim.OptimizationData)3 SimpleBounds (org.apache.commons.math3.optim.SimpleBounds)3 GoalType (org.apache.commons.math3.optim.nonlinear.scalar.GoalType)3 ObjectiveFunction (org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction)3 CMAESOptimizer (org.apache.commons.math3.optim.nonlinear.scalar.noderiv.CMAESOptimizer)3 RandomGenerator (org.apache.commons.math3.random.RandomGenerator)3 ConvergenceException (org.apache.commons.math3.exception.ConvergenceException)2 MaxIter (org.apache.commons.math3.optim.MaxIter)2 ObjectiveFunctionGradient (org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunctionGradient)2 BFGSOptimizer (org.apache.commons.math3.optim.nonlinear.scalar.gradient.BFGSOptimizer)2 UnivariatePointValuePair (org.apache.commons.math3.optim.univariate.UnivariatePointValuePair)2 Well19937c (org.apache.commons.math3.random.Well19937c)2 FisherInformationMatrix (gdsc.smlm.fitting.FisherInformationMatrix)1 LikelihoodWrapper (gdsc.smlm.function.LikelihoodWrapper)1