Search in sources :

Example 11 with MultivariateOptimizer

use of org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer in project narchy by automenta.

the class MyCMAESOptimizer method doOptimize.

// /**
// * {@inheritDoc}
// *
// * @param optData Optimization data. In addition to those documented in
// * {@link MultivariateOptimizer#parseOptimizationData(OptimizationData[])
// * MultivariateOptimizer}, this method will register the following data:
// * <ul>
// *  <li>{@link MyCMAESOptimizer.Sigma}</li>
// *  <li>{@link MyCMAESOptimizer.PopulationSize}</li>
// * </ul>
// * @return {@inheritDoc}
// * @throws TooManyEvaluationsException if the maximal number of
// * evaluations is exceeded.
// * @throws DimensionMismatchException if the initial guess, target, and weight
// * arguments have inconsistent dimensions.
// */
// @Override
// public PointValuePair optimize(OptimizationData... optData)
// throws TooManyEvaluationsException,
// DimensionMismatchException {
// // Set up base class and perform computation.
// return super.optimize(optData);
// }
/**
 * {@inheritDoc}
 */
@Override
protected PointValuePair doOptimize() {
    // -------------------- Initialization --------------------------------
    isMinimize = getGoalType().equals(GoalType.MINIMIZE);
    final MyCMAESOptimizer.FitnessFunction fitfun = new MyCMAESOptimizer.FitnessFunction();
    final double[] guess = getStartPoint();
    // number of objective variables/problem dimension
    dimension = guess.length;
    initializeCMA(guess);
    iterations = 0;
    MyCMAESOptimizer.ValuePenaltyPair valuePenalty = fitfun.value(guess);
    double bestValue = valuePenalty.value + valuePenalty.penalty;
    push(fitnessHistory, bestValue);
    PointValuePair optimum = new PointValuePair(guess, isMinimize ? bestValue : -bestValue);
    PointValuePair lastResult = null;
    final double[] lB = MyCMAESOptimizer.this.getLowerBound();
    final double[] uB = MyCMAESOptimizer.this.getUpperBound();
    generationLoop: for (iterations = 1; iterations <= maxIterations; iterations++) {
        incrementIterationCount();
        // Generate and evaluate lambda offspring
        final RealMatrix arz = randn1(dimension, lambda);
        final RealMatrix arx = zeros(dimension, lambda);
        final double[] fitness = new double[lambda];
        final MyCMAESOptimizer.ValuePenaltyPair[] valuePenaltyPairs = new MyCMAESOptimizer.ValuePenaltyPair[lambda];
        // generate random offspring
        for (int k = 0; k < lambda; k++) {
            RealMatrix arzK = arz.getColumnMatrix(k);
            RealMatrix arxk = null;
            for (int i = 0; i < checkFeasableCount + 1; i++) {
                if (diagonalOnly <= 0) {
                    arxk = xmean.add(BD.multiply(arzK).scalarMultiply(// m + sig * Normal(0,C)
                    sigma));
                } else {
                    arxk = xmean.add(times(diagD, arzK).scalarMultiply(sigma));
                }
                if (i >= checkFeasableCount || fitfun.isFeasible(arxk.getColumn(0), lB, uB)) {
                    break;
                }
                // regenerate random arguments for row
                arz.setColumn(k, randn(dimension));
            }
            copyColumn(arxk, 0, arx, k);
            try {
                // compute fitness
                valuePenaltyPairs[k] = fitfun.value(arx.getColumn(k));
            } catch (TooManyEvaluationsException e) {
                break generationLoop;
            }
        }
        // Compute fitnesses by adding value and penalty after scaling by value range.
        double valueRange = valueRange(valuePenaltyPairs);
        for (int iValue = 0; iValue < valuePenaltyPairs.length; iValue++) {
            fitness[iValue] = valuePenaltyPairs[iValue].value + valuePenaltyPairs[iValue].penalty * valueRange;
        }
        // Sort by fitness and compute weighted mean into xmean
        final int[] arindex = sortedIndices(fitness);
        // Calculate new xmean, this is selection and recombination
        // for speed up of Eq. (2) and (3)
        final RealMatrix xold = xmean;
        int[] arMu = MathArrays.copyOf(arindex, mu);
        final RealMatrix bestArx = selectColumns(arx, arMu);
        xmean = bestArx.multiply(weights);
        final RealMatrix bestArz = selectColumns(arz, arMu);
        final RealMatrix zmean = bestArz.multiply(weights);
        final boolean hsig = updateEvolutionPaths(zmean, xold);
        if (diagonalOnly <= 0) {
            updateCovariance(hsig, bestArx, arz, arindex, xold);
        } else {
            updateCovarianceDiagonalOnly(hsig, bestArz);
        }
        // Adapt step size sigma - Eq. (5)
        sigma *= Math.exp(Math.min(1, (normps / chiN - 1) * cs / damps));
        final double bestFitness = fitness[arindex[0]];
        final double worstFitness = fitness[arindex[arindex.length - 1]];
        if (bestValue > bestFitness) {
            bestValue = bestFitness;
            lastResult = optimum;
            optimum = new PointValuePair(fitfun.repair(bestArx.getColumn(0)), isMinimize ? bestFitness : -bestFitness);
            if (getConvergenceChecker() != null && getConvergenceChecker().converged(iterations, optimum, lastResult)) {
                break generationLoop;
            }
        }
        // Break, if fitness is good enough
        if (stopFitness == stopFitness && bestFitness < (isMinimize ? stopFitness : -stopFitness)) {
            break generationLoop;
        }
        final double[] sqrtDiagC = sqrt(diagC).getColumn(0);
        final double[] pcCol = pc.getColumn(0);
        for (int i = 0; i < dimension; i++) {
            if (sigma * Math.max(Math.abs(pcCol[i]), sqrtDiagC[i]) > stopTolX) {
                break;
            }
            if (i >= dimension - 1) {
                break generationLoop;
            }
        }
        for (int i = 0; i < dimension; i++) {
            if (sigma * sqrtDiagC[i] > stopTolUpX) {
                break generationLoop;
            }
        }
        final double historyBest = min(fitnessHistory);
        final double historyWorst = max(fitnessHistory);
        if (iterations > 2 && Math.max(historyWorst, worstFitness) - Math.min(historyBest, bestFitness) < stopTolFun) {
            break generationLoop;
        }
        if (iterations > fitnessHistory.length && historyWorst - historyBest < stopTolHistFun) {
            break generationLoop;
        }
        // condition number of the covariance matrix exceeds 1e14
        if (max(diagD) / min(diagD) > tENmILLION) {
            break generationLoop;
        }
        // user defined termination
        if (getConvergenceChecker() != null) {
            final PointValuePair current = new PointValuePair(bestArx.getColumn(0), isMinimize ? bestFitness : -bestFitness);
            if (lastResult != null && getConvergenceChecker().converged(iterations, current, lastResult)) {
                break generationLoop;
            }
            lastResult = current;
        }
        // Adjust step size in case of equal function values (flat fitness)
        if (bestValue == fitness[arindex[(int) (0.1 + lambda / 4.0)]]) {
            sigma *= Math.exp(0.2 + cs / damps);
        }
        if (iterations > 2 && Math.max(historyWorst, bestFitness) - Math.min(historyBest, bestFitness) == 0) {
            sigma *= Math.exp(0.2 + cs / damps);
        }
        // store best in history
        push(fitnessHistory, bestFitness);
        if (generateStatistics) {
            statisticsSigmaHistory.add(sigma);
            statisticsFitnessHistory.add(bestFitness);
            statisticsMeanHistory.add(xmean.transpose());
            statisticsDHistory.add(diagD.transpose().scalarMultiply(hUNDreDtHOUSAND));
        }
    }
    return optimum;
}
Also used : TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) Array2DRowRealMatrix(org.apache.commons.math3.linear.Array2DRowRealMatrix) RealMatrix(org.apache.commons.math3.linear.RealMatrix) PointValuePair(org.apache.commons.math3.optim.PointValuePair)

Example 12 with MultivariateOptimizer

use of org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer in project tetrad by cmu-phil.

the class LogisticRegression2 method regress.

// I am going to try to maximize the liklehood function directly using the Powell Estimator.
public void regress(int[] target, int numValues, double[][] regressors) {
    try {
        int numParams = regressors.length + 1;
        double[] coefficients = new double[(numValues - 1) * numParams];
        // Apparently this needs to be fairly loose.
        int tolerance = 250;
        MultivariateOptimizer search = new PowellOptimizer(tolerance, tolerance);
        PointValuePair pair = search.optimize(new InitialGuess(coefficients), new ObjectiveFunction(new FittingFunction(target, regressors)), GoalType.MAXIMIZE, new MaxEval(1000000));
        this.likelihood = pair.getValue();
    } catch (TooManyEvaluationsException e) {
        e.printStackTrace();
        this.likelihood = Double.NaN;
    }
}
Also used : MultivariateOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer) InitialGuess(org.apache.commons.math3.optim.InitialGuess) MaxEval(org.apache.commons.math3.optim.MaxEval) TooManyEvaluationsException(org.apache.commons.math3.exception.TooManyEvaluationsException) ObjectiveFunction(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction) PowellOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.PowellOptimizer) PointValuePair(org.apache.commons.math3.optim.PointValuePair)

Example 13 with MultivariateOptimizer

use of org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer in project tetrad by cmu-phil.

the class Mimbuild2 method optimizeMeasureVariancesConditionally.

private void optimizeMeasureVariancesConditionally(TetradMatrix measurescov, TetradMatrix latentscov, double[][] loadings, int[][] indicatorIndices, double[] delta) {
    double[] values2 = new double[delta.length];
    int count = 0;
    for (int i = 0; i < delta.length; i++) {
        values2[count++] = delta[i];
    }
    Function2 function2 = new Function2(indicatorIndices, measurescov, loadings, latentscov, delta, count);
    MultivariateOptimizer search = new PowellOptimizer(1e-7, 1e-7);
    PointValuePair pair = search.optimize(new InitialGuess(values2), new ObjectiveFunction(function2), GoalType.MINIMIZE, new MaxEval(100000));
    minimum = pair.getValue();
}
Also used : MultivariateOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer) InitialGuess(org.apache.commons.math3.optim.InitialGuess) MaxEval(org.apache.commons.math3.optim.MaxEval) ObjectiveFunction(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction) PowellOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.PowellOptimizer) PointValuePair(org.apache.commons.math3.optim.PointValuePair)

Example 14 with MultivariateOptimizer

use of org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer in project tetrad by cmu-phil.

the class Mimbuild2 method optimizeAllParamsSimultaneously.

private void optimizeAllParamsSimultaneously(Node[][] indicators, TetradMatrix measurescov, TetradMatrix latentscov, double[][] loadings, int[][] indicatorIndices, double[] delta) {
    double[] values = getAllParams(indicators, latentscov, loadings, delta);
    Function4 function = new Function4(indicatorIndices, measurescov, loadings, latentscov, delta);
    MultivariateOptimizer search = new PowellOptimizer(1e-7, 1e-7);
    PointValuePair pair = search.optimize(new InitialGuess(values), new ObjectiveFunction(function), GoalType.MINIMIZE, new MaxEval(100000));
    minimum = pair.getValue();
}
Also used : MultivariateOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer) InitialGuess(org.apache.commons.math3.optim.InitialGuess) MaxEval(org.apache.commons.math3.optim.MaxEval) ObjectiveFunction(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction) PowellOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.PowellOptimizer) PointValuePair(org.apache.commons.math3.optim.PointValuePair)

Example 15 with MultivariateOptimizer

use of org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer in project tetrad by cmu-phil.

the class MimbuildTrek method optimizeAllParamsSimultaneously.

private void optimizeAllParamsSimultaneously(Node[][] indicators, TetradMatrix measurescov, TetradMatrix latentscov, double[][] loadings, int[][] indicatorIndices, double[] delta) {
    double[] values = getAllParams(indicators, latentscov, loadings, delta);
    Function4 function = new Function4(indicatorIndices, measurescov, loadings, latentscov, delta);
    MultivariateOptimizer search = new PowellOptimizer(1e-7, 1e-7);
    PointValuePair pair = search.optimize(new InitialGuess(values), new ObjectiveFunction(function), GoalType.MINIMIZE, new MaxEval(100000));
    minimum = pair.getValue();
}
Also used : MultivariateOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer) InitialGuess(org.apache.commons.math3.optim.InitialGuess) MaxEval(org.apache.commons.math3.optim.MaxEval) ObjectiveFunction(org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction) PowellOptimizer(org.apache.commons.math3.optim.nonlinear.scalar.noderiv.PowellOptimizer) PointValuePair(org.apache.commons.math3.optim.PointValuePair)

Aggregations

PointValuePair (org.apache.commons.math3.optim.PointValuePair)16 MultivariateOptimizer (org.apache.commons.math3.optim.nonlinear.scalar.MultivariateOptimizer)16 ObjectiveFunction (org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunction)16 InitialGuess (org.apache.commons.math3.optim.InitialGuess)15 MaxEval (org.apache.commons.math3.optim.MaxEval)15 PowellOptimizer (org.apache.commons.math3.optim.nonlinear.scalar.noderiv.PowellOptimizer)13 TooManyEvaluationsException (org.apache.commons.math3.exception.TooManyEvaluationsException)4 MultivariateFunction (org.apache.commons.math3.analysis.MultivariateFunction)3 TooManyIterationsException (org.apache.commons.math3.exception.TooManyIterationsException)2 OptimizationData (org.apache.commons.math3.optim.OptimizationData)2 SimpleBounds (org.apache.commons.math3.optim.SimpleBounds)2 SimpleValueChecker (org.apache.commons.math3.optim.SimpleValueChecker)2 ObjectiveFunctionGradient (org.apache.commons.math3.optim.nonlinear.scalar.ObjectiveFunctionGradient)2 CMAESOptimizer (org.apache.commons.math3.optim.nonlinear.scalar.noderiv.CMAESOptimizer)2 RandomGenerator (org.apache.commons.math3.random.RandomGenerator)2 Context (edu.cmu.tetrad.calculator.expression.Context)1 Expression (edu.cmu.tetrad.calculator.expression.Expression)1 Array2DRowRealMatrix (org.apache.commons.math3.linear.Array2DRowRealMatrix)1 RealMatrix (org.apache.commons.math3.linear.RealMatrix)1 BFGSOptimizer (org.apache.commons.math3.optim.nonlinear.scalar.gradient.BFGSOptimizer)1