Search in sources :

Example 1 with FastMath.sqrt

use of org.apache.commons.math3.util.FastMath.sqrt in project EnrichmentMapApp by BaderLab.

the class MannWhitneyUTestSided method calculateAsymptoticPValue.

/**
     * @param Umin smallest Mann-Whitney U value
     * @param Umin smallest Mann-Whitney U1 value
     * @param Umin smallest Mann-Whitney U2 value
     * @param n1 number of subjects in first sample
     * @param n2 number of subjects in second sample
     * @return two-sided asymptotic p-value
     * @throws ConvergenceException if the p-value can not be computed
     * due to a convergence error
     * @throws MaxCountExceededException if the maximum number of
     * iterations is exceeded
     */
private double calculateAsymptoticPValue(final double Umin, final double U1, final double U2, final int n1, final int n2, final Type side) throws ConvergenceException, MaxCountExceededException {
    /* long multiplication to avoid overflow (double not used due to efficiency
         * and to avoid precision loss)
         */
    final long n1n2prod = (long) n1 * n2;
    // http://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U#Normal_approximation
    final double EU = n1n2prod / 2.0;
    final double VarU = n1n2prod * (n1 + n2 + 1) / 12.0;
    final double z = (Umin - EU) / FastMath.sqrt(VarU);
    // No try-catch or advertised exception because args are valid
    final NormalDistribution standardNormal = new NormalDistribution(0, 1);
    double p = 2 * standardNormal.cumulativeProbability(z);
    if (side == Type.TWO_SIDED) {
        return p;
    }
    if (side == Type.LESS) {
        if (U1 < U2) {
            return 0.5 * p;
        } else {
            return 1.0 - (0.5 * p);
        }
    } else {
        if (U1 > U2) {
            return 0.5 * p;
        } else {
            return 1.0 - (0.5 * p);
        }
    }
}
Also used : NormalDistribution(org.apache.commons.math3.distribution.NormalDistribution)

Example 2 with FastMath.sqrt

use of org.apache.commons.math3.util.FastMath.sqrt in project GDSC-SMLM by aherbert.

the class MaximumLikelihoodFitter method computeFit.

/*
	 * (non-Javadoc)
	 * 
	 * @see gdsc.smlm.fitting.nonlinear.BaseFunctionSolver#computeFit(double[], double[], double[], double[])
	 */
public FitStatus computeFit(double[] y, double[] y_fit, double[] a, double[] a_dev) {
    final int n = y.length;
    LikelihoodWrapper maximumLikelihoodFunction = createLikelihoodWrapper((NonLinearFunction) f, n, y, a);
    @SuppressWarnings("rawtypes") BaseOptimizer baseOptimiser = null;
    try {
        double[] startPoint = getInitialSolution(a);
        PointValuePair optimum = null;
        if (searchMethod == SearchMethod.POWELL || searchMethod == SearchMethod.POWELL_BOUNDED || searchMethod == SearchMethod.POWELL_ADAPTER) {
            // Non-differentiable version using Powell Optimiser
            // This is as per the method in Numerical Recipes 10.5 (Direction Set (Powell's) method)
            // I could extend the optimiser and implement bounds on the directions moved. However the mapping
            // adapter seems to work OK.
            final boolean basisConvergence = false;
            // Perhaps these thresholds should be tighter?
            // The default is to use the sqrt() of the overall tolerance
            //final double lineRel = FastMath.sqrt(relativeThreshold);
            //final double lineAbs = FastMath.sqrt(absoluteThreshold);
            //final double lineRel = relativeThreshold * 1e2;
            //final double lineAbs = absoluteThreshold * 1e2;
            // Since we are fitting only a small number of parameters then just use the same tolerance 
            // for each search direction
            final double lineRel = relativeThreshold;
            final double lineAbs = absoluteThreshold;
            CustomPowellOptimizer o = new CustomPowellOptimizer(relativeThreshold, absoluteThreshold, lineRel, lineAbs, null, basisConvergence);
            baseOptimiser = o;
            OptimizationData maxIterationData = null;
            if (getMaxIterations() > 0)
                maxIterationData = new MaxIter(getMaxIterations());
            if (searchMethod == SearchMethod.POWELL_ADAPTER) {
                // Try using the mapping adapter for a bounded Powell search
                MultivariateFunctionMappingAdapter adapter = new MultivariateFunctionMappingAdapter(new MultivariateLikelihood(maximumLikelihoodFunction), lower, upper);
                optimum = o.optimize(maxIterationData, new MaxEval(getMaxEvaluations()), new ObjectiveFunction(adapter), GoalType.MINIMIZE, new InitialGuess(adapter.boundedToUnbounded(startPoint)));
                double[] solution = adapter.unboundedToBounded(optimum.getPointRef());
                optimum = new PointValuePair(solution, optimum.getValue());
            } else {
                if (powellFunction == null) {
                    // Python code by using the sqrt of the number of photons and background.
                    if (mapGaussian) {
                        Gaussian2DFunction gf = (Gaussian2DFunction) f;
                        // Re-map signal and background using the sqrt
                        int[] indices = gf.gradientIndices();
                        int[] map = new int[indices.length];
                        int count = 0;
                        // Background is always first
                        if (indices[0] == Gaussian2DFunction.BACKGROUND) {
                            map[count++] = 0;
                        }
                        // Look for the Signal in multiple peak 2D Gaussians
                        for (int i = 1; i < indices.length; i++) if (indices[i] % 6 == Gaussian2DFunction.SIGNAL) {
                            map[count++] = i;
                        }
                        if (count > 0) {
                            powellFunction = new MappedMultivariateLikelihood(maximumLikelihoodFunction, Arrays.copyOf(map, count));
                        }
                    }
                    if (powellFunction == null) {
                        powellFunction = new MultivariateLikelihood(maximumLikelihoodFunction);
                    }
                }
                // Update the maximum likelihood function in the Powell function wrapper
                powellFunction.fun = maximumLikelihoodFunction;
                OptimizationData positionChecker = null;
                // new org.apache.commons.math3.optim.PositionChecker(relativeThreshold, absoluteThreshold);
                SimpleBounds simpleBounds = null;
                if (powellFunction.isMapped()) {
                    MappedMultivariateLikelihood adapter = (MappedMultivariateLikelihood) powellFunction;
                    if (searchMethod == SearchMethod.POWELL_BOUNDED)
                        simpleBounds = new SimpleBounds(adapter.map(lower), adapter.map(upper));
                    optimum = o.optimize(maxIterationData, new MaxEval(getMaxEvaluations()), new ObjectiveFunction(powellFunction), GoalType.MINIMIZE, new InitialGuess(adapter.map(startPoint)), positionChecker, simpleBounds);
                    double[] solution = adapter.unmap(optimum.getPointRef());
                    optimum = new PointValuePair(solution, optimum.getValue());
                } else {
                    if (searchMethod == SearchMethod.POWELL_BOUNDED)
                        simpleBounds = new SimpleBounds(lower, upper);
                    optimum = o.optimize(maxIterationData, new MaxEval(getMaxEvaluations()), new ObjectiveFunction(powellFunction), GoalType.MINIMIZE, new InitialGuess(startPoint), positionChecker, simpleBounds);
                }
            }
        } else if (searchMethod == SearchMethod.BOBYQA) {
            // Differentiable approximation using Powell's BOBYQA algorithm.
            // This is slower than the Powell optimiser and requires a high number of evaluations.
            int numberOfInterpolationPoints = this.getNumberOfFittedParameters() + 2;
            BOBYQAOptimizer o = new BOBYQAOptimizer(numberOfInterpolationPoints);
            baseOptimiser = o;
            optimum = o.optimize(new MaxEval(getMaxEvaluations()), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), GoalType.MINIMIZE, new InitialGuess(startPoint), new SimpleBounds(lower, upper));
        } else if (searchMethod == SearchMethod.CMAES) {
            // TODO - Understand why the CMAES optimiser does not fit very well on test data. It appears 
            // to converge too early and the likelihood scores are not as low as the other optimisers.
            // CMAESOptimiser based on Matlab code:
            // https://www.lri.fr/~hansen/cmaes.m
            // Take the defaults from the Matlab documentation
            //Double.NEGATIVE_INFINITY;
            double stopFitness = 0;
            boolean isActiveCMA = true;
            int diagonalOnly = 0;
            int checkFeasableCount = 1;
            RandomGenerator random = new Well19937c();
            boolean generateStatistics = false;
            // The sigma determines the search range for the variables. It should be 1/3 of the initial search region.
            double[] sigma = new double[lower.length];
            for (int i = 0; i < sigma.length; i++) sigma[i] = (upper[i] - lower[i]) / 3;
            int popSize = (int) (4 + Math.floor(3 * Math.log(sigma.length)));
            // The CMAES optimiser is random and restarting can overcome problems with quick convergence.
            // The Apache commons documentations states that convergence should occur between 30N and 300N^2
            // function evaluations
            final int n30 = FastMath.min(sigma.length * sigma.length * 30, getMaxEvaluations() / 2);
            evaluations = 0;
            OptimizationData[] data = new OptimizationData[] { new InitialGuess(startPoint), new CMAESOptimizer.PopulationSize(popSize), new MaxEval(getMaxEvaluations()), new CMAESOptimizer.Sigma(sigma), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), GoalType.MINIMIZE, new SimpleBounds(lower, upper) };
            // Iterate to prevent early convergence
            int repeat = 0;
            while (evaluations < n30) {
                if (repeat++ > 1) {
                    // Update the start point and population size
                    data[0] = new InitialGuess(optimum.getPointRef());
                    popSize *= 2;
                    data[1] = new CMAESOptimizer.PopulationSize(popSize);
                }
                CMAESOptimizer o = new CMAESOptimizer(getMaxIterations(), stopFitness, isActiveCMA, diagonalOnly, checkFeasableCount, random, generateStatistics, new SimpleValueChecker(relativeThreshold, absoluteThreshold));
                baseOptimiser = o;
                PointValuePair result = o.optimize(data);
                iterations += o.getIterations();
                evaluations += o.getEvaluations();
                //		o.getEvaluations(), totalEvaluations);
                if (optimum == null || result.getValue() < optimum.getValue()) {
                    optimum = result;
                }
            }
            // Prevent incrementing the iterations again
            baseOptimiser = null;
        } else if (searchMethod == SearchMethod.BFGS) {
            // BFGS can use an approximate line search minimisation where as Powell and conjugate gradient
            // methods require a more accurate line minimisation. The BFGS search does not do a full 
            // minimisation but takes appropriate steps in the direction of the current gradient.
            // Do not use the convergence checker on the value of the function. Use the convergence on the 
            // point coordinate and gradient
            //BFGSOptimizer o = new BFGSOptimizer(new SimpleValueChecker(rel, abs));
            BFGSOptimizer o = new BFGSOptimizer();
            baseOptimiser = o;
            // Configure maximum step length for each dimension using the bounds
            double[] stepLength = new double[lower.length];
            for (int i = 0; i < stepLength.length; i++) {
                stepLength[i] = (upper[i] - lower[i]) * 0.3333333;
                if (stepLength[i] <= 0)
                    stepLength[i] = Double.POSITIVE_INFINITY;
            }
            // The GoalType is always minimise so no need to pass this in
            OptimizationData positionChecker = null;
            //new org.apache.commons.math3.optim.PositionChecker(relativeThreshold, absoluteThreshold);
            optimum = o.optimize(new MaxEval(getMaxEvaluations()), new ObjectiveFunctionGradient(new MultivariateVectorLikelihood(maximumLikelihoodFunction)), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), new InitialGuess(startPoint), new SimpleBounds(lowerConstraint, upperConstraint), new BFGSOptimizer.GradientTolerance(relativeThreshold), positionChecker, new BFGSOptimizer.StepLength(stepLength));
        } else {
            // The line search algorithm often fails. This is due to searching into a region where the 
            // function evaluates to a negative so has been clipped. This means the upper bound of the line
            // cannot be found.
            // Note that running it on an easy problem (200 photons with fixed fitting (no background)) the algorithm
            // does sometimes produces results better than the Powell algorithm but it is slower.
            BoundedNonLinearConjugateGradientOptimizer o = new BoundedNonLinearConjugateGradientOptimizer((searchMethod == SearchMethod.CONJUGATE_GRADIENT_FR) ? Formula.FLETCHER_REEVES : Formula.POLAK_RIBIERE, new SimpleValueChecker(relativeThreshold, absoluteThreshold));
            baseOptimiser = o;
            // Note: The gradients may become unstable at the edge of the bounds. Or they will not change 
            // direction if the true solution is on the bounds since the gradient will always continue 
            // towards the bounds. This is key to the conjugate gradient method. It searches along a vector 
            // until the direction of the gradient is in the opposite direction (using dot products, i.e. 
            // cosine of angle between them)
            // NR 10.7 states there is no advantage of the variable metric DFP or BFGS methods over
            // conjugate gradient methods. So I will try these first.
            // Try this:
            // Adapt the conjugate gradient optimiser to use the gradient to pick the search direction
            // and then for the line minimisation. However if the function is out of bounds then clip the 
            // variables at the bounds and continue. 
            // If the current point is at the bounds and the gradient is to continue out of bounds then 
            // clip the gradient too.
            // Or: just use the gradient for the search direction then use the line minimisation/rest
            // as per the Powell optimiser. The bounds should limit the search.
            // I tried a Bounded conjugate gradient optimiser with clipped variables:
            // This sometimes works. However when the variables go a long way out of the expected range the gradients
            // can have vastly different magnitudes. This results in the algorithm stalling since the gradients
            // can be close to zero and the some of the parameters are no longer adjusted.
            // Perhaps this can be looked for and the algorithm then gives up and resorts to a Powell optimiser from 
            // the current point.
            // Changed the bracketing step to very small (default is 1, changed to 0.001). This improves the 
            // performance. The gradient direction is very sensitive to small changes in the coordinates so a 
            // tighter bracketing of the line search helps.
            // Tried using a non-gradient method for the line search copied from the Powell optimiser:
            // This also works when the bracketing step is small but the number of iterations is higher.
            // 24.10.2014: I have tried to get conjugate gradient to work but the gradient function 
            // must not behave suitably for the optimiser. In the current state both methods of using a 
            // Bounded Conjugate Gradient Optimiser perform poorly relative to other optimisers:
            // Simulated : n=1000, signal=200, x=0.53, y=0.47
            // LVM : n=1000, signal=171, x=0.537, y=0.471 (1.003s)
            // Powell : n=1000, signal=187, x=0.537, y=0.48 (1.238s)
            // Gradient based PR (constrained): n=858, signal=161, x=0.533, y=0.474 (2.54s)
            // Gradient based PR (bounded): n=948, signal=161, x=0.533, y=0.473 (2.67s)
            // Non-gradient based : n=1000, signal=151.47, x=0.535, y=0.474 (1.626s)
            // The conjugate optimisers are slower, under predict the signal by the most and in the case of 
            // the gradient based optimiser, fail to converge on some problems. This is worse when constrained
            // fitting is used and not tightly bounded fitting.
            // I will leave the code in as an option but would not recommend using it. I may remove it in the 
            // future.
            // Note: It is strange that the non-gradient based line minimisation is more successful.
            // It may be that the gradient function is not accurate (due to round off error) or that it is
            // simply wrong when far from the optimum. My JUnit tests only evaluate the function within the 
            // expected range of the answer.
            // Note the default step size on the Powell optimiser is 1 but the initial directions are unit vectors.
            // So our bracketing step should be a minimum of 1 / average length of the first gradient vector to prevent
            // the first step being too large when bracketing.
            final double[] gradient = new double[startPoint.length];
            maximumLikelihoodFunction.likelihood(startPoint, gradient);
            double l = 0;
            for (double d : gradient) l += d * d;
            final double bracketingStep = FastMath.min(0.001, ((l > 1) ? 1.0 / l : 1));
            //System.out.printf("Bracketing step = %f (length=%f)\n", bracketingStep, l);
            o.setUseGradientLineSearch(gradientLineMinimisation);
            optimum = o.optimize(new MaxEval(getMaxEvaluations()), new ObjectiveFunctionGradient(new MultivariateVectorLikelihood(maximumLikelihoodFunction)), new ObjectiveFunction(new MultivariateLikelihood(maximumLikelihoodFunction)), GoalType.MINIMIZE, new InitialGuess(startPoint), new SimpleBounds(lowerConstraint, upperConstraint), new BoundedNonLinearConjugateGradientOptimizer.BracketingStep(bracketingStep));
        //maximumLikelihoodFunction.value(solution, gradient);
        //System.out.printf("Iter = %d, %g @ %s : %s\n", iterations, ll, Arrays.toString(solution),
        //		Arrays.toString(gradient));
        }
        final double[] solution = optimum.getPointRef();
        setSolution(a, solution);
        if (a_dev != null) {
            // Assume the Maximum Likelihood estimator returns the optimum fit (achieves the Cramer Roa
            // lower bounds) and so the covariance can be obtained from the Fisher Information Matrix.
            FisherInformationMatrix m = new FisherInformationMatrix(maximumLikelihoodFunction.fisherInformation(a));
            setDeviations(a_dev, m.crlb(true));
        }
        // Reverse negative log likelihood for maximum likelihood score
        value = -optimum.getValue();
    } catch (TooManyIterationsException e) {
        //e.printStackTrace();
        return FitStatus.TOO_MANY_ITERATIONS;
    } catch (TooManyEvaluationsException e) {
        //e.printStackTrace();
        return FitStatus.TOO_MANY_EVALUATIONS;
    } catch (ConvergenceException e) {
        //System.out.printf("Singular non linear model = %s\n", e.getMessage());
        return FitStatus.SINGULAR_NON_LINEAR_MODEL;
    } catch (BFGSOptimizer.LineSearchRoundoffException e) {
        //e.printStackTrace();
        return FitStatus.FAILED_TO_CONVERGE;
    } catch (Exception e) {
        //System.out.printf("Unknown error = %s\n", e.getMessage());
        e.printStackTrace();
        return FitStatus.UNKNOWN;
    } finally {
        if (baseOptimiser != null) {
            iterations += baseOptimiser.getIterations();
            evaluations += baseOptimiser.getEvaluations();
        }
    }
    // Check this as likelihood functions can go wrong
    if (Double.isInfinite(value) || Double.isNaN(value))
        return FitStatus.INVALID_LIKELIHOOD;
    return FitStatus.OK;
}
Also used : MaxEval(org.apache.commons.math3.optim.MaxEval) InitialGuess(org.apache.commons.math3.optim.InitialGuess) BOBYQAOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.BOBYQAOptimizer) SimpleBounds(org.apache.commons.math3.optim.SimpleBounds) ObjectiveFunction(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction) Well19937c(org.apache.commons.math3.random.Well19937c) SimpleValueChecker(org.apache.commons.math3.optim.SimpleValueChecker) RandomGenerator(org.apache.commons.math3.random.RandomGenerator) BFGSOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.gradient.BFGSOptimizer) PointValuePair(org.apache.commons.math3.optim.PointValuePair) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) Gaussian2DFunction(gdsc.smlm.function.gaussian.Gaussian2DFunction) ConvergenceException(org.apache.commons.math3.exception.ConvergenceException) BoundedNonLinearConjugateGradientOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.gradient.BoundedNonLinearConjugateGradientOptimizer) TooManyIterationsException(org.apache.commons.math3.exception.TooManyIterationsException) BaseOptimizer(org.apache.commons.math3.optim.BaseOptimizer) CMAESOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.CMAESOptimizer) FisherInformationMatrix(gdsc.smlm.fitting.FisherInformationMatrix) PoissonGammaGaussianLikelihoodWrapper(gdsc.smlm.function.PoissonGammaGaussianLikelihoodWrapper) PoissonGaussianLikelihoodWrapper(gdsc.smlm.function.PoissonGaussianLikelihoodWrapper) PoissonLikelihoodWrapper(gdsc.smlm.function.PoissonLikelihoodWrapper) LikelihoodWrapper(gdsc.smlm.function.LikelihoodWrapper) ConvergenceException(org.apache.commons.math3.exception.ConvergenceException) TooManyIterationsException(org.apache.commons.math3.exception.TooManyIterationsException) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) ObjectiveFunctionGradient(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunctionGradient) MultivariateFunctionMappingAdapter(org.apache.commons.math3.optim.nonlinear.scalar.MultivariateFunctionMappingAdapter) OptimizationData(org.apache.commons.math3.optim.OptimizationData) CustomPowellOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.CustomPowellOptimizer) MaxIter(org.apache.commons.math3.optim.MaxIter)

Example 3 with FastMath.sqrt

use of org.apache.commons.math3.util.FastMath.sqrt in project nd4j by deeplearning4j.

the class BaseDistribution method inverseCumulativeProbability.

/**
 * {@inheritDoc}
 * <p/>
 * The default implementation returns
 * <ul>
 * <li>{@link #getSupportLowerBound()} for {@code p = 0},</li>
 * <li>{@link #getSupportUpperBound()} for {@code p = 1}.</li>
 * </ul>
 */
@Override
public double inverseCumulativeProbability(final double p) throws OutOfRangeException {
    /*
         * IMPLEMENTATION NOTES
         * --------------------
         * Where applicable, use is made of the one-sided Chebyshev inequality
         * to bracket the root. This inequality states that
         * P(X - mu >= k * sig) <= 1 / (1 + k^2),
         * mu: mean, sig: standard deviation. Equivalently
         * 1 - P(X < mu + k * sig) <= 1 / (1 + k^2),
         * F(mu + k * sig) >= k^2 / (1 + k^2).
         *
         * For k = sqrt(p / (1 - p)), we find
         * F(mu + k * sig) >= p,
         * and (mu + k * sig) is an upper-bound for the root.
         *
         * Then, introducing Y = -X, mean(Y) = -mu, sd(Y) = sig, and
         * P(Y >= -mu + k * sig) <= 1 / (1 + k^2),
         * P(-X >= -mu + k * sig) <= 1 / (1 + k^2),
         * P(X <= mu - k * sig) <= 1 / (1 + k^2),
         * F(mu - k * sig) <= 1 / (1 + k^2).
         *
         * For k = sqrt((1 - p) / p), we find
         * F(mu - k * sig) <= p,
         * and (mu - k * sig) is a lower-bound for the root.
         *
         * In cases where the Chebyshev inequality does not apply, geometric
         * progressions 1, 2, 4, ... and -1, -2, -4, ... are used to bracket
         * the root.
         */
    if (p < 0.0 || p > 1.0) {
        throw new OutOfRangeException(p, 0, 1);
    }
    double lowerBound = getSupportLowerBound();
    if (p == 0.0) {
        return lowerBound;
    }
    double upperBound = getSupportUpperBound();
    if (p == 1.0) {
        return upperBound;
    }
    final double mu = getNumericalMean();
    final double sig = FastMath.sqrt(getNumericalVariance());
    final boolean chebyshevApplies;
    chebyshevApplies = !(Double.isInfinite(mu) || Double.isNaN(mu) || Double.isInfinite(sig) || Double.isNaN(sig));
    if (lowerBound == Double.NEGATIVE_INFINITY) {
        if (chebyshevApplies) {
            lowerBound = mu - sig * FastMath.sqrt((1. - p) / p);
        } else {
            lowerBound = -1.0;
            while (cumulativeProbability(lowerBound) >= p) {
                lowerBound *= 2.0;
            }
        }
    }
    if (upperBound == Double.POSITIVE_INFINITY) {
        if (chebyshevApplies) {
            upperBound = mu + sig * FastMath.sqrt(p / (1. - p));
        } else {
            upperBound = 1.0;
            while (cumulativeProbability(upperBound) < p) {
                upperBound *= 2.0;
            }
        }
    }
    final UnivariateFunction toSolve = new UnivariateFunction() {

        public double value(final double x) {
            return cumulativeProbability(x) - p;
        }
    };
    double x = UnivariateSolverUtils.solve(toSolve, lowerBound, upperBound, getSolverAbsoluteAccuracy());
    if (!isSupportConnected()) {
        /* Test for plateau. */
        final double dx = getSolverAbsoluteAccuracy();
        if (x - dx >= getSupportLowerBound()) {
            double px = cumulativeProbability(x);
            if (cumulativeProbability(x - dx) == px) {
                upperBound = x;
                while (upperBound - lowerBound > dx) {
                    final double midPoint = 0.5 * (lowerBound + upperBound);
                    if (cumulativeProbability(midPoint) < px) {
                        lowerBound = midPoint;
                    } else {
                        upperBound = midPoint;
                    }
                }
                return upperBound;
            }
        }
    }
    return x;
}
Also used : UnivariateFunction(org.apache.commons.math3.analysis.UnivariateFunction) OutOfRangeException(org.apache.commons.math3.exception.OutOfRangeException)

Example 4 with FastMath.sqrt

use of org.apache.commons.math3.util.FastMath.sqrt in project ffx by mjschnie.

the class Crystal method minDistOverSymOps.

/**
 * Minimum distance between two coordinates over all symmetry operators.
 *
 * @param xyzA Coordinate A
 * @param xyzB Coordinate B
 * @return Minimum distance in crystal
 */
public double minDistOverSymOps(double[] xyzA, double[] xyzB) {
    double dist = 0;
    for (int i = 0; i < 3; i++) {
        double dx = xyzA[i] - xyzB[i];
        dist += (dx * dx);
    }
    double[] symB = new double[3];
    for (SymOp symOp : spaceGroup.symOps) {
        applySymOp(xyzB, symB, symOp);
        for (int i = 0; i < 3; i++) {
            symB[i] -= xyzA[i];
        }
        double d = image(symB);
        dist = d < dist ? d : dist;
    }
    return FastMath.sqrt(dist);
}
Also used : FastMath.rint(org.apache.commons.math3.util.FastMath.rint)

Example 5 with FastMath.sqrt

use of org.apache.commons.math3.util.FastMath.sqrt in project narchy by automenta.

the class MyCMAESOptimizerTest method testRastrigin.

@Test
public void testRastrigin() {
    double[] startPoint = point(DIM, 0.1);
    double[] insigma = point(DIM, 0.1);
    double[][] boundaries = null;
    PointValuePair expected = new PointValuePair(point(DIM, 0.0), 0.0);
    doTest(new Rastrigin(), startPoint, insigma, boundaries, GoalType.MINIMIZE, (int) (200 * FastMath.sqrt(DIM)), true, 0, 1e-13, 1e-13, 1e-6, 200000, expected);
    doTest(new Rastrigin(), startPoint, insigma, boundaries, GoalType.MINIMIZE, (int) (200 * FastMath.sqrt(DIM)), false, 0, 1e-13, 1e-13, 1e-6, 200000, expected);
}
Also used : PointValuePair(org.apache.commons.math3.optim.PointValuePair) Test(org.junit.jupiter.api.Test)

Aggregations

PointValuePair (org.apache.commons.math3.optim.PointValuePair)2 FisherInformationMatrix (gdsc.smlm.fitting.FisherInformationMatrix)1 LikelihoodWrapper (gdsc.smlm.function.LikelihoodWrapper)1 PoissonGammaGaussianLikelihoodWrapper (gdsc.smlm.function.PoissonGammaGaussianLikelihoodWrapper)1 PoissonGaussianLikelihoodWrapper (gdsc.smlm.function.PoissonGaussianLikelihoodWrapper)1 PoissonLikelihoodWrapper (gdsc.smlm.function.PoissonLikelihoodWrapper)1 Gaussian2DFunction (gdsc.smlm.function.gaussian.Gaussian2DFunction)1 UnivariateFunction (org.apache.commons.math3.analysis.UnivariateFunction)1 NormalDistribution (org.apache.commons.math3.distribution.NormalDistribution)1 ConvergenceException (org.apache.commons.math3.exception.ConvergenceException)1 OutOfRangeException (org.apache.commons.math3.exception.OutOfRangeException)1 TooManyEvaluationsException (org.apache.commons.math3.exception.TooManyEvaluationsException)1 TooManyIterationsException (org.apache.commons.math3.exception.TooManyIterationsException)1 BaseOptimizer (org.apache.commons.math3.optim.BaseOptimizer)1 InitialGuess (org.apache.commons.math3.optim.InitialGuess)1 MaxEval (org.apache.commons.math3.optim.MaxEval)1 MaxIter (org.apache.commons.math3.optim.MaxIter)1 OptimizationData (org.apache.commons.math3.optim.OptimizationData)1 SimpleBounds (org.apache.commons.math3.optim.SimpleBounds)1 SimpleValueChecker (org.apache.commons.math3.optim.SimpleValueChecker)1