use of org.apache.commons.math3.linear.DiagonalMatrix in project GDSC-SMLM by aherbert.
the class PcPalmFitting method fitClusteredModel.
/**
* Fits the correlation curve with r>0 to the clustered model using the estimated density and
* precision. Parameters must be fit within a tolerance of the starting values.
*
* @param gr the correlation curve
* @param sigmaS The estimated precision
* @param proteinDensity The estimated protein density
* @param resultColour the result colour
* @return The fitted parameters [precision, density, clusterRadius, clusterDensity]
*/
@Nullable
private double[] fitClusteredModel(double[][] gr, double sigmaS, double proteinDensity, String resultColour) {
final ClusteredModelFunctionGradient function = new ClusteredModelFunctionGradient();
clusteredModel = function;
ImageJUtils.log("Fitting %s: Estimated precision = %f nm, estimated protein density = %g um^-2", clusteredModel.getName(), sigmaS, proteinDensity * 1e6);
clusteredModel.setLogging(true);
for (int i = offset; i < gr[0].length; i++) {
// error)
if (gr[0][i] > sigmaS * settings.fitAboveEstimatedPrecision) {
clusteredModel.addPoint(gr[0][i], gr[1][i]);
}
}
double[] parameters;
// The model is: sigma, density, range, amplitude
final double[] initialSolution = new double[] { sigmaS, proteinDensity, sigmaS * 5, 1 };
// Constrain the fitting to be close to the estimated precision (sigmaS) and protein density.
// LVM fitting does not support constrained fitting so use a bounded optimiser.
final SumOfSquaresModelFunction clusteredModelMulti = new SumOfSquaresModelFunction(clusteredModel);
final double[] x = clusteredModelMulti.x;
// Put some bounds around the initial guess. Use the fitting tolerance (in %) if provided.
final double limit = (settings.fittingTolerance > 0) ? 1 + settings.fittingTolerance / 100 : 2;
final double[] lB = new double[] { initialSolution[0] / limit, initialSolution[1] / limit, 0, 0 };
// The amplitude and range should not extend beyond the limits of the g(r) curve.
final double[] uB = new double[] { initialSolution[0] * limit, initialSolution[1] * limit, MathUtils.max(x), MathUtils.max(gr[1]) };
ImageJUtils.log("Fitting %s using a bounded search: %s < precision < %s & %s < density < %s", clusteredModel.getName(), MathUtils.rounded(lB[0], 4), MathUtils.rounded(uB[0], 4), MathUtils.rounded(lB[1] * 1e6, 4), MathUtils.rounded(uB[1] * 1e6, 4));
final PointValuePair constrainedSolution = runBoundedOptimiser(initialSolution, lB, uB, clusteredModelMulti);
if (constrainedSolution == null) {
return null;
}
parameters = constrainedSolution.getPointRef();
int evaluations = boundedEvaluations;
// Refit using a LVM
if (settings.refitWithGradients) {
ImageJUtils.log("Re-fitting %s using a gradient optimisation", clusteredModel.getName());
final LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
Optimum lvmSolution;
try {
// @formatter:off
final LeastSquaresProblem problem = new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(3000).start(parameters).target(function.getY()).weight(new DiagonalMatrix(function.getWeights())).model(function, new MultivariateMatrixFunction() {
@Override
public double[][] value(double[] point) {
return function.jacobian(point);
}
}).build();
// @formatter:on
lvmSolution = optimizer.optimize(problem);
evaluations += lvmSolution.getEvaluations();
final double ss = lvmSolution.getResiduals().dotProduct(lvmSolution.getResiduals());
if (ss < constrainedSolution.getValue()) {
ImageJUtils.log("Re-fitting %s improved the SS from %s to %s (-%s%%)", clusteredModel.getName(), MathUtils.rounded(constrainedSolution.getValue(), 4), MathUtils.rounded(ss, 4), MathUtils.rounded(100 * (constrainedSolution.getValue() - ss) / constrainedSolution.getValue(), 4));
parameters = lvmSolution.getPoint().toArray();
}
} catch (final TooManyIterationsException ex) {
ImageJUtils.log("Failed to re-fit %s: Too many iterations (%s)", clusteredModel.getName(), ex.getMessage());
} catch (final ConvergenceException ex) {
ImageJUtils.log("Failed to re-fit %s: %s", clusteredModel.getName(), ex.getMessage());
}
}
clusteredModel.setLogging(false);
// Ensure the width is positive
parameters[0] = Math.abs(parameters[0]);
double ss = 0;
final double[] obs = clusteredModel.getY();
final double[] exp = clusteredModel.value(parameters);
for (int i = 0; i < obs.length; i++) {
ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
}
final double totalSumSquares = MathUtils.getTotalSumOfSquares(clusteredModel.getY());
final double adjustedR2 = MathUtils.getAdjustedCoefficientOfDetermination(ss, totalSumSquares, clusteredModel.size(), parameters.length);
final double fitSigmaS = parameters[0];
final double fitProteinDensity = parameters[1];
// The radius of the cluster domain
final double domainRadius = parameters[2];
// The density of the cluster domain
final double domainDensity = parameters[3];
// This is from the PC-PALM paper. However that paper fits the g(r)protein exponential convolved
// in 2D with the g(r)PSF. In this method we have just fit the exponential
final double nCluster = 2 * domainDensity * Math.PI * domainRadius * domainRadius * fitProteinDensity;
final double e1 = parameterDrift(sigmaS, fitSigmaS);
final double e2 = parameterDrift(proteinDensity, fitProteinDensity);
ImageJUtils.log(" %s fit: SS = %f. Adj.R^2 = %f. %d evaluations", clusteredModel.getName(), ss, adjustedR2, evaluations);
ImageJUtils.log(" %s parameters:", clusteredModel.getName());
ImageJUtils.log(" Average precision = %s nm (%s%%)", MathUtils.rounded(fitSigmaS, 4), MathUtils.rounded(e1, 4));
ImageJUtils.log(" Average protein density = %s um^-2 (%s%%)", MathUtils.rounded(fitProteinDensity * 1e6, 4), MathUtils.rounded(e2, 4));
ImageJUtils.log(" Domain radius = %s nm", MathUtils.rounded(domainRadius, 4));
ImageJUtils.log(" Domain density = %s", MathUtils.rounded(domainDensity, 4));
ImageJUtils.log(" nCluster = %s", MathUtils.rounded(nCluster, 4));
// Check the fitted parameters are within tolerance of the initial estimates
valid2 = true;
if (settings.fittingTolerance > 0 && (Math.abs(e1) > settings.fittingTolerance || Math.abs(e2) > settings.fittingTolerance)) {
ImageJUtils.log(" Failed to fit %s within tolerance (%s%%): Average precision = %f nm (%s%%)," + " average protein density = %g um^-2 (%s%%)", clusteredModel.getName(), MathUtils.rounded(settings.fittingTolerance, 4), fitSigmaS, MathUtils.rounded(e1, 4), fitProteinDensity * 1e6, MathUtils.rounded(e2, 4));
valid2 = false;
}
// positive
if (domainRadius < fitSigmaS) {
ImageJUtils.log(" Failed to fit %s: Domain radius is smaller than the average precision (%s < %s)", clusteredModel.getName(), MathUtils.rounded(domainRadius, 4), MathUtils.rounded(fitSigmaS, 4));
valid2 = false;
}
if (domainDensity < 0) {
ImageJUtils.log(" Failed to fit %s: Domain density is negative (%s)", clusteredModel.getName(), MathUtils.rounded(domainDensity, 4));
valid2 = false;
}
if (adjustedR2 <= randomModelAdjustedR2) {
ImageJUtils.log(" Failed to fit %s - Adjusted r^2 has decreased %s%%", clusteredModel.getName(), MathUtils.rounded((100 * (randomModelAdjustedR2 - adjustedR2) / randomModelAdjustedR2), 4));
valid2 = false;
}
addResult(clusteredModel.getName(), resultColour, valid2, fitSigmaS, fitProteinDensity, domainRadius, domainDensity, nCluster, -1, adjustedR2);
return parameters;
}
use of org.apache.commons.math3.linear.DiagonalMatrix in project GDSC-SMLM by aherbert.
the class PcPalmFitting method fitEmulsionModel.
/**
* Fits the correlation curve with r>0 to the clustered model using the estimated density and
* precision. Parameters must be fit within a tolerance of the starting values.
*
* @param gr the correlation curve
* @param sigmaS The estimated precision
* @param proteinDensity The estimated protein density
* @param resultColour the result colour
* @return The fitted parameters [precision, density, clusterRadius, clusterDensity]
*/
private double[] fitEmulsionModel(double[][] gr, double sigmaS, double proteinDensity, String resultColour) {
final EmulsionModelFunctionGradient function = new EmulsionModelFunctionGradient();
emulsionModel = function;
ImageJUtils.log("Fitting %s: Estimated precision = %f nm, estimated protein density = %g um^-2", emulsionModel.getName(), sigmaS, proteinDensity * 1e6);
emulsionModel.setLogging(true);
for (int i = offset; i < gr[0].length; i++) {
// error)
if (gr[0][i] > sigmaS * settings.fitAboveEstimatedPrecision) {
emulsionModel.addPoint(gr[0][i], gr[1][i]);
}
}
// The model is: sigma, density, range, amplitude, alpha
final double[] initialSolution = new double[] { sigmaS, proteinDensity, sigmaS * 5, 1, sigmaS * 5 };
// Constrain the fitting to be close to the estimated precision (sigmaS) and protein density.
// LVM fitting does not support constrained fitting so use a bounded optimiser.
final SumOfSquaresModelFunction emulsionModelMulti = new SumOfSquaresModelFunction(emulsionModel);
final double[] x = emulsionModelMulti.x;
final double[] y = emulsionModelMulti.y;
// Range should be equal to the first time the g(r) curve crosses 1
for (int i = 0; i < x.length; i++) {
if (y[i] < 1) {
initialSolution[4] = initialSolution[2] = (i > 0) ? (x[i - 1] + x[i]) * 0.5 : x[i];
break;
}
}
// Put some bounds around the initial guess. Use the fitting tolerance (in %) if provided.
final double limit = (settings.fittingTolerance > 0) ? 1 + settings.fittingTolerance / 100 : 2;
final double[] lB = new double[] { initialSolution[0] / limit, initialSolution[1] / limit, 0, 0, 0 };
// The amplitude and range should not extend beyond the limits of the g(r) curve.
// TODO - Find out the expected range for the alpha parameter.
final double[] uB = new double[] { initialSolution[0] * limit, initialSolution[1] * limit, MathUtils.max(x), MathUtils.max(gr[1]), MathUtils.max(x) * 2 };
ImageJUtils.log("Fitting %s using a bounded search: %s < precision < %s & %s < density < %s", emulsionModel.getName(), MathUtils.rounded(lB[0], 4), MathUtils.rounded(uB[0], 4), MathUtils.rounded(lB[1] * 1e6, 4), MathUtils.rounded(uB[1] * 1e6, 4));
final PointValuePair constrainedSolution = runBoundedOptimiser(initialSolution, lB, uB, emulsionModelMulti);
if (constrainedSolution == null) {
return null;
}
double[] parameters = constrainedSolution.getPointRef();
int evaluations = boundedEvaluations;
// Refit using a LVM
if (settings.refitWithGradients) {
ImageJUtils.log("Re-fitting %s using a gradient optimisation", emulsionModel.getName());
final LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
Optimum lvmSolution;
try {
// @formatter:off
final LeastSquaresProblem problem = new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(3000).start(parameters).target(function.getY()).weight(new DiagonalMatrix(function.getWeights())).model(function, function::jacobian).build();
// @formatter:on
lvmSolution = optimizer.optimize(problem);
evaluations += lvmSolution.getEvaluations();
final double ss = lvmSolution.getResiduals().dotProduct(lvmSolution.getResiduals());
if (ss < constrainedSolution.getValue()) {
ImageJUtils.log("Re-fitting %s improved the SS from %s to %s (-%s%%)", emulsionModel.getName(), MathUtils.rounded(constrainedSolution.getValue(), 4), MathUtils.rounded(ss, 4), MathUtils.rounded(100 * (constrainedSolution.getValue() - ss) / constrainedSolution.getValue(), 4));
parameters = lvmSolution.getPoint().toArray();
}
} catch (final TooManyIterationsException ex) {
ImageJUtils.log("Failed to re-fit %s: Too many iterations (%s)", emulsionModel.getName(), ex.getMessage());
} catch (final ConvergenceException ex) {
ImageJUtils.log("Failed to re-fit %s: %s", emulsionModel.getName(), ex.getMessage());
}
}
emulsionModel.setLogging(false);
// Ensure the width is positive
parameters[0] = Math.abs(parameters[0]);
double ss = 0;
final double[] obs = emulsionModel.getY();
final double[] exp = emulsionModel.value(parameters);
for (int i = 0; i < obs.length; i++) {
ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
}
final double totalSumSquares = MathUtils.getTotalSumOfSquares(clusteredModel.getY());
final double adjustedR2 = MathUtils.getAdjustedCoefficientOfDetermination(ss, totalSumSquares, emulsionModel.size(), parameters.length);
final double fitSigmaS = parameters[0];
final double fitProteinDensity = parameters[1];
// The radius of the cluster domain
final double domainRadius = parameters[2];
final double amplitutde = parameters[3];
// The coherence length between circles
final double coherence = parameters[4];
final double e1 = parameterDrift(sigmaS, fitSigmaS);
final double e2 = parameterDrift(proteinDensity, fitProteinDensity);
ImageJUtils.log(" %s fit: SS = %f. Adj.R^2 = %f. %d evaluations", emulsionModel.getName(), ss, adjustedR2, evaluations);
ImageJUtils.log(" %s parameters:", emulsionModel.getName());
ImageJUtils.log(" Average precision = %s nm (%s%%)", MathUtils.rounded(fitSigmaS, 4), MathUtils.rounded(e1, 4));
ImageJUtils.log(" Average protein density = %s um^-2 (%s%%)", MathUtils.rounded(fitProteinDensity * 1e6, 4), MathUtils.rounded(e2, 4));
ImageJUtils.log(" Domain radius = %s nm", MathUtils.rounded(domainRadius, 4));
ImageJUtils.log(" Domain density = %s", MathUtils.rounded(amplitutde, 4));
ImageJUtils.log(" Domain coherence = %s", MathUtils.rounded(coherence, 4));
// Check the fitted parameters are within tolerance of the initial estimates
valid2 = true;
if (settings.fittingTolerance > 0 && (Math.abs(e1) > settings.fittingTolerance || Math.abs(e2) > settings.fittingTolerance)) {
ImageJUtils.log(" Failed to fit %s within tolerance (%s%%): Average precision = %f nm (%s%%)," + " average protein density = %g um^-2 (%s%%)", emulsionModel.getName(), MathUtils.rounded(settings.fittingTolerance, 4), fitSigmaS, MathUtils.rounded(e1, 4), fitProteinDensity * 1e6, MathUtils.rounded(e2, 4));
valid2 = false;
}
// positive
if (domainRadius < fitSigmaS) {
ImageJUtils.log(" Failed to fit %s: Domain radius is smaller than the average precision (%s < %s)", emulsionModel.getName(), MathUtils.rounded(domainRadius, 4), MathUtils.rounded(fitSigmaS, 4));
valid2 = false;
}
if (amplitutde < 0) {
ImageJUtils.log(" Failed to fit %s: Domain density is negative (%s)", emulsionModel.getName(), MathUtils.rounded(amplitutde, 4));
valid2 = false;
}
if (adjustedR2 <= randomModelAdjustedR2) {
ImageJUtils.log(" Failed to fit %s - Adjusted r^2 has decreased %s%%", clusteredModel.getName(), MathUtils.rounded((100 * (randomModelAdjustedR2 - adjustedR2) / randomModelAdjustedR2), 4));
valid2 = false;
}
addResult(emulsionModel.getName(), resultColour, valid2, fitSigmaS, fitProteinDensity, domainRadius, amplitutde, -1, coherence, adjustedR2);
return parameters;
}
use of org.apache.commons.math3.linear.DiagonalMatrix in project GDSC-SMLM by aherbert.
the class PcPalmMolecules method optimiseLeastSquares.
private double[] optimiseLeastSquares(float[] x, float[] y, double[] initialSolution) {
// Least-squares optimisation using numerical gradients
final SkewNormalDifferentiableFunction function = new SkewNormalDifferentiableFunction(initialSolution);
function.addData(x, y);
final LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
// @formatter:off
final LeastSquaresProblem problem = new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(3000).start(initialSolution).target(function.calculateTarget()).weight(new DiagonalMatrix(function.calculateWeights())).model(function, function::jacobian).build();
// @formatter:on
final Optimum optimum = optimizer.optimize(problem);
return optimum.getPoint().toArray();
}
use of org.apache.commons.math3.linear.DiagonalMatrix in project GDSC-SMLM by aherbert.
the class BinomialFitter method fitBinomial.
/**
* Fit the binomial distribution (n,p) to the cumulative histogram. Performs fitting assuming a
* fixed n value and attempts to optimise p.
*
* @param histogram The input histogram
* @param mean The histogram mean (used to estimate p). Calculated if NaN.
* @param n The n to evaluate
* @param zeroTruncated True if the model should ignore n=0 (zero-truncated binomial)
* @return The best fit (n, p)
* @throws IllegalArgumentException If any of the input data values are negative
* @throws IllegalArgumentException If any fitting a zero truncated binomial and there are no
* values above zero
*/
public PointValuePair fitBinomial(double[] histogram, double mean, int n, boolean zeroTruncated) {
if (Double.isNaN(mean)) {
mean = getMean(histogram);
}
if (zeroTruncated && histogram[0] > 0) {
log("Fitting zero-truncated histogram but there are zero values - " + "Renormalising to ignore zero");
double cumul = 0;
for (int i = 1; i < histogram.length; i++) {
cumul += histogram[i];
}
if (cumul == 0) {
throw new IllegalArgumentException("Fitting zero-truncated histogram but there are no non-zero values");
}
histogram[0] = 0;
for (int i = 1; i < histogram.length; i++) {
histogram[i] /= cumul;
}
}
final int nFittedPoints = Math.min(histogram.length, n + 1) - ((zeroTruncated) ? 1 : 0);
if (nFittedPoints < 1) {
log("No points to fit (%d): Histogram.length = %d, n = %d, zero-truncated = %b", nFittedPoints, histogram.length, n, zeroTruncated);
return null;
}
// The model is only fitting the probability p
// For a binomial n*p = mean => p = mean/n
final double[] initialSolution = new double[] { Math.min(mean / n, 1) };
// Create the function
final BinomialModelFunction function = new BinomialModelFunction(histogram, n, zeroTruncated);
final double[] lB = new double[1];
final double[] uB = new double[] { 1 };
final SimpleBounds bounds = new SimpleBounds(lB, uB);
// Fit
// CMAESOptimizer or BOBYQAOptimizer support bounds
// CMAESOptimiser based on Matlab code:
// https://www.lri.fr/~hansen/cmaes.m
// Take the defaults from the Matlab documentation
final int maxIterations = 2000;
final double stopFitness = 0;
final boolean isActiveCma = true;
final int diagonalOnly = 0;
final int checkFeasableCount = 1;
final RandomGenerator random = new RandomGeneratorAdapter(UniformRandomProviders.create());
final boolean generateStatistics = false;
final ConvergenceChecker<PointValuePair> checker = new SimpleValueChecker(1e-6, 1e-10);
// The sigma determines the search range for the variables. It should be 1/3 of the initial
// search region.
final OptimizationData sigma = new CMAESOptimizer.Sigma(new double[] { (uB[0] - lB[0]) / 3 });
final OptimizationData popSize = new CMAESOptimizer.PopulationSize((int) (4 + Math.floor(3 * Math.log(2))));
try {
PointValuePair solution = null;
boolean noRefit = maximumLikelihood;
if (n == 1 && zeroTruncated) {
// No need to fit
solution = new PointValuePair(new double[] { 1 }, 0);
noRefit = true;
} else {
final GoalType goalType = (maximumLikelihood) ? GoalType.MAXIMIZE : GoalType.MINIMIZE;
// Iteratively fit
final CMAESOptimizer opt = new CMAESOptimizer(maxIterations, stopFitness, isActiveCma, diagonalOnly, checkFeasableCount, random, generateStatistics, checker);
for (int iteration = 0; iteration <= fitRestarts; iteration++) {
try {
// Start from the initial solution
final PointValuePair result = opt.optimize(new InitialGuess(initialSolution), new ObjectiveFunction(function), goalType, bounds, sigma, popSize, new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
// opt.getEvaluations());
if (solution == null || result.getValue() < solution.getValue()) {
solution = result;
}
} catch (final TooManyEvaluationsException | TooManyIterationsException ex) {
// No solution
}
if (solution == null) {
continue;
}
try {
// Also restart from the current optimum
final PointValuePair result = opt.optimize(new InitialGuess(solution.getPointRef()), new ObjectiveFunction(function), goalType, bounds, sigma, popSize, new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
// opt.getEvaluations());
if (result.getValue() < solution.getValue()) {
solution = result;
}
} catch (final TooManyEvaluationsException | TooManyIterationsException ex) {
// No solution
}
}
if (solution == null) {
return null;
}
}
if (noRefit) {
// Although we fit the log-likelihood, return the sum-of-squares to allow
// comparison across different n
final double p = solution.getPointRef()[0];
double ss = 0;
final double[] obs = function.pvalues;
final double[] exp = function.getP(p);
for (int i = 0; i < obs.length; i++) {
ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
}
return new PointValuePair(solution.getPointRef(), ss);
// We can do a LVM refit if the number of fitted points is more than 1.
} else if (nFittedPoints > 1) {
// Improve SS fit with a gradient based LVM optimizer
final LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
try {
final BinomialModelFunctionGradient gradientFunction = new BinomialModelFunctionGradient(histogram, n, zeroTruncated);
// @formatter:off
final LeastSquaresProblem problem = new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(3000).start(solution.getPointRef()).target(gradientFunction.pvalues).weight(new DiagonalMatrix(gradientFunction.getWeights())).model(gradientFunction, gradientFunction::jacobian).build();
// @formatter:on
final Optimum lvmSolution = optimizer.optimize(problem);
// Check the pValue is valid since the LVM is not bounded.
final double p = lvmSolution.getPoint().getEntry(0);
if (p <= 1 && p >= 0) {
// True if the weights are 1
final double ss = lvmSolution.getResiduals().dotProduct(lvmSolution.getResiduals());
// ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
if (ss < solution.getValue()) {
// MathUtils.rounded(100 * (solution.getValue() - ss) / solution.getValue(), 4));
return new PointValuePair(lvmSolution.getPoint().toArray(), ss);
}
}
} catch (final TooManyIterationsException ex) {
log("Failed to re-fit: Too many iterations: %s", ex.getMessage());
} catch (final ConvergenceException ex) {
log("Failed to re-fit: %s", ex.getMessage());
} catch (final Exception ex) {
// Ignore this ...
}
}
return solution;
} catch (final RuntimeException ex) {
log("Failed to fit Binomial distribution with N=%d : %s", n, ex.getMessage());
}
return null;
}
use of org.apache.commons.math3.linear.DiagonalMatrix in project GDSC-SMLM by aherbert.
the class JumpDistanceAnalysis method doFitJumpDistanceHistogram.
/**
* Fit the jump distance histogram using a cumulative sum with the given number of species.
*
* <p>Results are sorted by the diffusion coefficient ascending.
*
* @param jdHistogram The cumulative jump distance histogram. X-axis is um^2, Y-axis is cumulative
* probability. Must be monototic ascending.
* @param estimatedD The estimated diffusion coefficient
* @param n The number of species in the mixed population
* @return Array containing: { D (um^2), Fractions }. Can be null if no fit was made.
*/
private double[][] doFitJumpDistanceHistogram(double[][] jdHistogram, double estimatedD, int n) {
calibrated = isCalibrated();
if (n == 1) {
// Fit using a single population model
final LevenbergMarquardtOptimizer lvmOptimizer = new LevenbergMarquardtOptimizer();
try {
final JumpDistanceCumulFunction function = new JumpDistanceCumulFunction(jdHistogram[0], jdHistogram[1], estimatedD);
// @formatter:off
final LeastSquaresProblem problem = new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(3000).start(function.guess()).target(function.getY()).weight(new DiagonalMatrix(function.getWeights())).model(function, function::jacobian).build();
// @formatter:on
final Optimum lvmSolution = lvmOptimizer.optimize(problem);
final double[] fitParams = lvmSolution.getPoint().toArray();
// True for an unweighted fit
ss = lvmSolution.getResiduals().dotProduct(lvmSolution.getResiduals());
// ss = calculateSumOfSquares(function.getY(), function.value(fitParams));
lastFitValue = fitValue = MathUtils.getAdjustedCoefficientOfDetermination(ss, MathUtils.getTotalSumOfSquares(function.getY()), function.x.length, 1);
final double[] coefficients = fitParams;
final double[] fractions = new double[] { 1 };
LoggerUtils.log(logger, Level.INFO, "Fit Jump distance (N=1) : %s, SS = %s, Adjusted R^2 = %s (%d evaluations)", formatD(fitParams[0]), MathUtils.rounded(ss, 4), MathUtils.rounded(fitValue, 4), lvmSolution.getEvaluations());
return new double[][] { coefficients, fractions };
} catch (final TooManyIterationsException ex) {
LoggerUtils.log(logger, Level.INFO, "LVM optimiser failed to fit (N=1) : Too many iterations : %s", ex.getMessage());
} catch (final ConvergenceException ex) {
LoggerUtils.log(logger, Level.INFO, "LVM optimiser failed to fit (N=1) : %s", ex.getMessage());
}
}
// Uses a weighted sum of n exponential functions, each function models a fraction of the
// particles.
// An LVM fit cannot restrict the parameters so the fractions do not go below zero.
// Use the CustomPowell/CMEASOptimizer which supports bounded fitting.
final MixedJumpDistanceCumulFunctionMultivariate function = new MixedJumpDistanceCumulFunctionMultivariate(jdHistogram[0], jdHistogram[1], estimatedD, n);
final double[] lB = function.getLowerBounds();
int evaluations = 0;
PointValuePair constrainedSolution = null;
final MaxEval maxEval = new MaxEval(20000);
final CustomPowellOptimizer powellOptimizer = createCustomPowellOptimizer();
try {
// The Powell algorithm can use more general bounds: 0 - Infinity
constrainedSolution = powellOptimizer.optimize(maxEval, new ObjectiveFunction(function), new InitialGuess(function.guess()), new SimpleBounds(lB, function.getUpperBounds(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY)), new CustomPowellOptimizer.BasisStep(function.step()), GoalType.MINIMIZE);
evaluations = powellOptimizer.getEvaluations();
LoggerUtils.log(logger, Level.FINE, "Powell optimiser fit (N=%d) : SS = %f (%d evaluations)", n, constrainedSolution.getValue(), evaluations);
} catch (final TooManyEvaluationsException ex) {
LoggerUtils.log(logger, Level.INFO, "Powell optimiser failed to fit (N=%d) : Too many evaluations (%d)", n, powellOptimizer.getEvaluations());
} catch (final TooManyIterationsException ex) {
LoggerUtils.log(logger, Level.INFO, "Powell optimiser failed to fit (N=%d) : Too many iterations (%d)", n, powellOptimizer.getIterations());
} catch (final ConvergenceException ex) {
LoggerUtils.log(logger, Level.INFO, "Powell optimiser failed to fit (N=%d) : %s", n, ex.getMessage());
}
if (constrainedSolution == null) {
LoggerUtils.log(logger, Level.INFO, "Trying CMAES optimiser with restarts ...");
final double[] uB = function.getUpperBounds();
final SimpleBounds bounds = new SimpleBounds(lB, uB);
// The sigma determines the search range for the variables. It should be 1/3 of the initial
// search region.
final double[] s = new double[lB.length];
for (int i = 0; i < s.length; i++) {
s[i] = (uB[i] - lB[i]) / 3;
}
final OptimizationData sigma = new CMAESOptimizer.Sigma(s);
final OptimizationData popSize = new CMAESOptimizer.PopulationSize((int) (4 + Math.floor(3 * Math.log(function.x.length))));
// Iterate this for stability in the initial guess
final CMAESOptimizer cmaesOptimizer = createCmaesOptimizer();
for (int i = 0; i <= fitRestarts; i++) {
// Try from the initial guess
try {
final PointValuePair solution = cmaesOptimizer.optimize(new InitialGuess(function.guess()), new ObjectiveFunction(function), GoalType.MINIMIZE, bounds, sigma, popSize, maxEval);
if (constrainedSolution == null || solution.getValue() < constrainedSolution.getValue()) {
evaluations = cmaesOptimizer.getEvaluations();
constrainedSolution = solution;
LoggerUtils.log(logger, Level.FINE, "CMAES optimiser [%da] fit (N=%d) : SS = %f (%d evaluations)", i, n, solution.getValue(), evaluations);
}
} catch (final TooManyEvaluationsException ex) {
// No solution
}
if (constrainedSolution == null) {
continue;
}
// Try from the current optimum
try {
final PointValuePair solution = cmaesOptimizer.optimize(new InitialGuess(constrainedSolution.getPointRef()), new ObjectiveFunction(function), GoalType.MINIMIZE, bounds, sigma, popSize, maxEval);
if (solution.getValue() < constrainedSolution.getValue()) {
evaluations = cmaesOptimizer.getEvaluations();
constrainedSolution = solution;
LoggerUtils.log(logger, Level.FINE, "CMAES optimiser [%db] fit (N=%d) : SS = %f (%d evaluations)", i, n, solution.getValue(), evaluations);
}
} catch (final TooManyEvaluationsException ex) {
// No solution
}
}
if (constrainedSolution != null) {
// Re-optimise with Powell?
try {
final PointValuePair solution = powellOptimizer.optimize(maxEval, new ObjectiveFunction(function), new InitialGuess(constrainedSolution.getPointRef()), new SimpleBounds(lB, function.getUpperBounds(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY)), new CustomPowellOptimizer.BasisStep(function.step()), GoalType.MINIMIZE);
if (solution.getValue() < constrainedSolution.getValue()) {
evaluations = cmaesOptimizer.getEvaluations();
constrainedSolution = solution;
LoggerUtils.log(logger, Level.INFO, "Powell optimiser re-fit (N=%d) : SS = %f (%d evaluations)", n, constrainedSolution.getValue(), evaluations);
}
} catch (final TooManyEvaluationsException ex) {
// No solution
} catch (final TooManyIterationsException ex) {
// No solution
} catch (final ConvergenceException ex) {
// No solution
}
}
}
if (constrainedSolution == null) {
LoggerUtils.log(logger, Level.INFO, "Failed to fit N=%d", n);
return null;
}
double[] fitParams = constrainedSolution.getPointRef();
ss = constrainedSolution.getValue();
// TODO - Try a bounded BFGS optimiser
// Try and improve using a LVM fit
final MixedJumpDistanceCumulFunctionGradient functionGradient = new MixedJumpDistanceCumulFunctionGradient(jdHistogram[0], jdHistogram[1], estimatedD, n);
Optimum lvmSolution;
final LevenbergMarquardtOptimizer lvmOptimizer = new LevenbergMarquardtOptimizer();
try {
// @formatter:off
final LeastSquaresProblem problem = new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(3000).start(fitParams).target(functionGradient.getY()).weight(new DiagonalMatrix(functionGradient.getWeights())).model(functionGradient, functionGradient::jacobian).build();
// @formatter:on
lvmSolution = lvmOptimizer.optimize(problem);
// True for an unweighted fit
final double ss = lvmSolution.getResiduals().dotProduct(lvmSolution.getResiduals());
// All fitted parameters must be above zero
if (ss < this.ss && MathUtils.min(lvmSolution.getPoint().toArray()) > 0) {
LoggerUtils.log(logger, Level.INFO, " Re-fitting improved the SS from %s to %s (-%s%%)", MathUtils.rounded(this.ss, 4), MathUtils.rounded(ss, 4), MathUtils.rounded(100 * (this.ss - ss) / this.ss, 4));
fitParams = lvmSolution.getPoint().toArray();
this.ss = ss;
evaluations += lvmSolution.getEvaluations();
}
} catch (final TooManyIterationsException ex) {
LoggerUtils.log(logger, Level.WARNING, "Failed to re-fit : Too many iterations : %s", ex.getMessage());
} catch (final ConvergenceException ex) {
LoggerUtils.log(logger, Level.WARNING, "Failed to re-fit : %s", ex.getMessage());
}
// Since the fractions must sum to one we subtract 1 degree of freedom from the number of
// parameters
fitValue = MathUtils.getAdjustedCoefficientOfDetermination(ss, MathUtils.getTotalSumOfSquares(function.getY()), function.x.length, fitParams.length - 1);
final double[] d = new double[n];
final double[] f = new double[n];
double sum = 0;
for (int i = 0; i < d.length; i++) {
f[i] = fitParams[i * 2];
sum += f[i];
d[i] = fitParams[i * 2 + 1];
}
for (int i = 0; i < f.length; i++) {
f[i] /= sum;
}
// Sort by coefficient size
sort(d, f);
final double[] coefficients = d;
final double[] fractions = f;
LoggerUtils.log(logger, Level.INFO, "Fit Jump distance (N=%d) : %s (%s), SS = %s, Adjusted R^2 = %s (%d evaluations)", n, formatD(d), format(f), MathUtils.rounded(ss, 4), MathUtils.rounded(fitValue, 4), evaluations);
if (isValid(d, f)) {
lastFitValue = fitValue;
return new double[][] { coefficients, fractions };
}
return null;
}
Aggregations