use of org.apache.commons.math3.optim.univariate.UnivariatePointValuePair in project GDSC-SMLM by aherbert.
the class CustomPowellOptimizer method doOptimize.
/** {@inheritDoc} */
@Override
protected PointValuePair doOptimize() {
final GoalType goal = getGoalType();
final double[] guess = getStartPoint();
final int n = guess.length;
// Mark when we have modified the basis vectors
boolean nonBasis = false;
double[][] direc = createBasisVectors(n);
final ConvergenceChecker<PointValuePair> checker = getConvergenceChecker();
//int resets = 0;
//PointValuePair solution = null;
//PointValuePair finalSolution = null;
//int solutionIter = 0, solutionEval = 0;
//double startValue = 0;
//try
//{
double[] x = guess;
// Ensure the point is within bounds
applyBounds(x);
double fVal = computeObjectiveValue(x);
//startValue = fVal;
double[] x1 = x.clone();
while (true) {
incrementIterationCount();
final double fX = fVal;
double fX2 = 0;
double delta = 0;
int bigInd = 0;
for (int i = 0; i < n; i++) {
fX2 = fVal;
final UnivariatePointValuePair optimum = line.search(x, direc[i]);
fVal = optimum.getValue();
x = newPoint(x, direc[i], optimum.getPoint());
if ((fX2 - fVal) > delta) {
delta = fX2 - fVal;
bigInd = i;
}
}
boolean stop = false;
if (positionChecker != null) {
// Check for convergence on the position
stop = positionChecker.converged(x1, x);
}
if (!stop) {
// Check if we have improved from an impossible position
if (Double.isInfinite(fX) || Double.isNaN(fX)) {
if (Double.isInfinite(fVal) || Double.isNaN(fVal)) {
// Nowhere to go
stop = true;
}
// else: this is better as we now have a value, so continue
} else {
stop = DoubleEquality.almostEqualRelativeOrAbsolute(fX, fVal, relativeThreshold, absoluteThreshold);
}
}
final PointValuePair previous = new PointValuePair(x1, fX);
final PointValuePair current = new PointValuePair(x, fVal);
if (!stop && checker != null) {
// User-defined stopping criteria.
stop = checker.converged(getIterations(), previous, current);
}
boolean reset = false;
if (stop) {
// Only allow convergence using the basis vectors, i.e. we cannot move along any dimension
if (basisConvergence && nonBasis) {
// Reset to the basis vectors and continue
reset = true;
//resets++;
} else {
//System.out.printf("Resets = %d\n", resets);
final PointValuePair answer;
if (goal == GoalType.MINIMIZE) {
answer = (fVal < fX) ? current : previous;
} else {
answer = (fVal > fX) ? current : previous;
}
return answer;
// XXX Debugging
// Continue the algorithm to see how far it goes
//if (solution == null)
//{
// solution = answer;
// solutionIter = getIterations();
// solutionEval = getEvaluations();
//}
//finalSolution = answer;
}
}
if (reset) {
direc = createBasisVectors(n);
nonBasis = false;
}
final double[] d = new double[n];
final double[] x2 = new double[n];
for (int i = 0; i < n; i++) {
d[i] = x[i] - x1[i];
x2[i] = x[i] + d[i];
}
applyBounds(x2);
x1 = x.clone();
fX2 = computeObjectiveValue(x2);
// See if we can continue along the overall search direction to find a better value
if (fX > fX2) {
// Check if:
// 1. The decrease along the average direction was not due to any single direction's decrease
// 2. There is a substantial second derivative along the average direction and we are close to
// it minimum
double t = 2 * (fX + fX2 - 2 * fVal);
double temp = fX - fVal - delta;
t *= temp * temp;
temp = fX - fX2;
t -= delta * temp * temp;
if (t < 0.0) {
final UnivariatePointValuePair optimum = line.search(x, d);
fVal = optimum.getValue();
if (reset) {
x = newPoint(x, d, optimum.getPoint());
continue;
} else {
final double[][] result = newPointAndDirection(x, d, optimum.getPoint());
x = result[0];
final int lastInd = n - 1;
direc[bigInd] = direc[lastInd];
direc[lastInd] = result[1];
nonBasis = true;
}
}
}
}
//}
//catch (RuntimeException e)
//{
// if (solution != null)
// {
// System.out.printf("Start %f : Initial %f (%d,%d) : Final %f (%d,%d) : %f\n", startValue,
// solution.getValue(), solutionIter, solutionEval, finalSolution.getValue(), getIterations(),
// getEvaluations(), DoubleEquality.relativeError(finalSolution.getValue(), solution.getValue()));
// return finalSolution;
// }
// throw e;
//}
}
use of org.apache.commons.math3.optim.univariate.UnivariatePointValuePair in project GDSC-SMLM by aherbert.
the class FIRE method findMin.
private UnivariatePointValuePair findMin(UnivariatePointValuePair current, SimplexOptimizer o, MultivariateFunction f, double qValue) {
try {
NelderMeadSimplex simplex = new NelderMeadSimplex(1);
double[] initialSolution = { qValue };
PointValuePair solution = o.optimize(new MaxEval(1000), new InitialGuess(initialSolution), simplex, new ObjectiveFunction(f), GoalType.MINIMIZE);
UnivariatePointValuePair next = (solution == null) ? null : new UnivariatePointValuePair(solution.getPointRef()[0], solution.getValue());
if (next == null)
return current;
//System.out.printf("Simplex [%f] %f = %f\n", qValue, next.getPoint(), next.getValue());
if (current != null)
return (next.getValue() < current.getValue()) ? next : current;
return next;
} catch (Exception e) {
return current;
}
}
use of org.apache.commons.math3.optim.univariate.UnivariatePointValuePair in project GDSC-SMLM by aherbert.
the class FIRE method findMin.
private UnivariatePointValuePair findMin(UnivariatePointValuePair current, UnivariateOptimizer o, UnivariateFunction f, double qValue, double factor) {
try {
BracketFinder bracket = new BracketFinder();
bracket.search(f, GoalType.MINIMIZE, qValue * factor, qValue / factor);
UnivariatePointValuePair next = o.optimize(GoalType.MINIMIZE, new MaxEval(3000), new SearchInterval(bracket.getLo(), bracket.getHi(), bracket.getMid()), new UnivariateObjectiveFunction(f));
if (next == null)
return current;
//System.out.printf("LineMin [%.1f] %f = %f\n", factor, next.getPoint(), next.getValue());
if (current != null)
return (next.getValue() < current.getValue()) ? next : current;
return next;
} catch (Exception e) {
return current;
}
}
use of org.apache.commons.math3.optim.univariate.UnivariatePointValuePair in project GDSC-SMLM by aherbert.
the class BoundedNonLinearConjugateGradientOptimizer method doOptimize.
/** {@inheritDoc} */
@Override
protected PointValuePair doOptimize() {
final ConvergenceChecker<PointValuePair> checker = getConvergenceChecker();
final double[] point = getStartPoint();
final GoalType goal = getGoalType();
final int n = point.length;
sign = (goal == GoalType.MINIMIZE) ? -1 : 1;
double[] unbounded = point.clone();
applyBounds(point);
double[] r = computeObjectiveGradient(point);
checkGradients(r, unbounded);
if (goal == GoalType.MINIMIZE) {
for (int i = 0; i < n; i++) {
r[i] = -r[i];
}
}
// Initial search direction.
double[] steepestDescent = preconditioner.precondition(point, r);
double[] searchDirection = steepestDescent.clone();
double delta = 0;
for (int i = 0; i < n; ++i) {
delta += r[i] * searchDirection[i];
}
// Used for non-gradient based line search
LineSearch line = null;
double rel = 1e-6;
double abs = 1e-10;
if (getConvergenceChecker() instanceof SimpleValueChecker) {
rel = ((SimpleValueChecker) getConvergenceChecker()).getRelativeThreshold();
abs = ((SimpleValueChecker) getConvergenceChecker()).getRelativeThreshold();
}
line = new LineSearch(Math.sqrt(rel), Math.sqrt(abs));
PointValuePair current = null;
int maxEval = getMaxEvaluations();
while (true) {
incrementIterationCount();
final double objective = computeObjectiveValue(point);
PointValuePair previous = current;
current = new PointValuePair(point, objective);
if (previous != null && checker.converged(getIterations(), previous, current)) {
// We have found an optimum.
return current;
}
double step;
if (useGradientLineSearch) {
// Classic code using the gradient function for the line search:
// Find the optimal step in the search direction.
final UnivariateFunction lsf = new LineSearchFunction(point, searchDirection);
final double uB;
try {
uB = findUpperBound(lsf, 0, initialStep);
// Check if the bracket found a minimum. Otherwise just move to the new point.
if (noBracket)
step = uB;
else {
// XXX Last parameters is set to a value close to zero in order to
// work around the divergence problem in the "testCircleFitting"
// unit test (see MATH-439).
//System.out.printf("Bracket %f - %f - %f\n", 0., 1e-15, uB);
step = solver.solve(maxEval, lsf, 0, uB, 1e-15);
// Subtract used up evaluations.
maxEval -= solver.getEvaluations();
}
} catch (MathIllegalStateException e) {
//System.out.printf("Failed to bracket %s @ %s\n", Arrays.toString(point), Arrays.toString(searchDirection));
// Line search without gradient (as per Powell optimiser)
final UnivariatePointValuePair optimum = line.search(point, searchDirection);
step = optimum.getPoint();
//throw e;
}
} else {
// Line search without gradient (as per Powell optimiser)
final UnivariatePointValuePair optimum = line.search(point, searchDirection);
step = optimum.getPoint();
}
//System.out.printf("Step = %f x %s\n", step, Arrays.toString(searchDirection));
for (int i = 0; i < point.length; ++i) {
point[i] += step * searchDirection[i];
}
unbounded = point.clone();
applyBounds(point);
r = computeObjectiveGradient(point);
checkGradients(r, unbounded);
if (goal == GoalType.MINIMIZE) {
for (int i = 0; i < n; ++i) {
r[i] = -r[i];
}
}
// Compute beta.
final double deltaOld = delta;
final double[] newSteepestDescent = preconditioner.precondition(point, r);
delta = 0;
for (int i = 0; i < n; ++i) {
delta += r[i] * newSteepestDescent[i];
}
if (delta == 0)
return new PointValuePair(point, computeObjectiveValue(point));
final double beta;
switch(updateFormula) {
case FLETCHER_REEVES:
beta = delta / deltaOld;
break;
case POLAK_RIBIERE:
double deltaMid = 0;
for (int i = 0; i < r.length; ++i) {
deltaMid += r[i] * steepestDescent[i];
}
beta = (delta - deltaMid) / deltaOld;
break;
default:
// Should never happen.
throw new MathInternalError();
}
steepestDescent = newSteepestDescent;
// Compute conjugate search direction.
if (getIterations() % n == 0 || beta < 0) {
// Break conjugation: reset search direction.
searchDirection = steepestDescent.clone();
} else {
// Compute new conjugate search direction.
for (int i = 0; i < n; ++i) {
searchDirection[i] = steepestDescent[i] + beta * searchDirection[i];
}
}
// The gradient has already been adjusted for the search direction
checkGradients(searchDirection, unbounded, -sign);
}
}
use of org.apache.commons.math3.optim.univariate.UnivariatePointValuePair in project GDSC-SMLM by aherbert.
the class FIRE method runQEstimation.
private void runQEstimation() {
IJ.showStatus(TITLE + " ...");
if (!showQEstimationInputDialog())
return;
MemoryPeakResults results = ResultsManager.loadInputResults(inputOption, false);
if (results == null || results.size() == 0) {
IJ.error(TITLE, "No results could be loaded");
return;
}
if (results.getCalibration() == null) {
IJ.error(TITLE, "The results are not calibrated");
return;
}
results = cropToRoi(results);
if (results.size() < 2) {
IJ.error(TITLE, "No results within the crop region");
return;
}
initialise(results, null);
// We need localisation precision.
// Build a histogram of the localisation precision.
// Get the initial mean and SD and plot as a Gaussian.
PrecisionHistogram histogram = calculatePrecisionHistogram();
if (histogram == null) {
IJ.error(TITLE, "No localisation precision available.\n \nPlease choose " + PrecisionMethod.FIXED + " and enter a precision mean and SD.");
return;
}
StoredDataStatistics precision = histogram.precision;
//String name = results.getName();
double fourierImageScale = SCALE_VALUES[imageScaleIndex];
int imageSize = IMAGE_SIZE_VALUES[imageSizeIndex];
// Create the image and compute the numerator of FRC.
// Do not use the signal so results.size() is the number of localisations.
IJ.showStatus("Computing FRC curve ...");
FireImages images = createImages(fourierImageScale, imageSize, false);
// DEBUGGING - Save the two images to disk. Load the images into the Matlab
// code that calculates the Q-estimation and make this plugin match the functionality.
//IJ.save(new ImagePlus("i1", images.ip1), "/scratch/i1.tif");
//IJ.save(new ImagePlus("i2", images.ip2), "/scratch/i2.tif");
FRC frc = new FRC();
frc.progress = progress;
frc.setFourierMethod(fourierMethod);
frc.setSamplingMethod(samplingMethod);
frc.setPerimeterSamplingFactor(perimeterSamplingFactor);
FRCCurve frcCurve = frc.calculateFrcCurve(images.ip1, images.ip2, images.nmPerPixel);
if (frcCurve == null) {
IJ.error(TITLE, "Failed to compute FRC curve");
return;
}
IJ.showStatus("Running Q-estimation ...");
// Note:
// The method implemented here is based on Matlab code provided by Bernd Rieger.
// The idea is to compute the spurious correlation component of the FRC Numerator
// using an initial estimate of distribution of the localisation precision (assumed
// to be Gaussian). This component is the contribution of repeat localisations of
// the same molecule to the numerator and is modelled as an exponential decay
// (exp_decay). The component is scaled by the Q-value which
// is the average number of times a molecule is seen in addition to the first time.
// At large spatial frequencies the scaled component should match the numerator,
// i.e. at high resolution (low FIRE number) the numerator is made up of repeat
// localisations of the same molecule and not actual structure in the image.
// The best fit is where the numerator equals the scaled component, i.e. num / (q*exp_decay) == 1.
// The FRC Numerator is plotted and Q can be determined by
// adjusting Q and the precision mean and SD to maximise the cost function.
// This can be done interactively by the user with the effect on the FRC curve
// dynamically updated and displayed.
// Compute the scaled FRC numerator
double qNorm = (1 / frcCurve.mean1 + 1 / frcCurve.mean2);
double[] frcnum = new double[frcCurve.getSize()];
for (int i = 0; i < frcnum.length; i++) {
FRCCurveResult r = frcCurve.get(i);
frcnum[i] = qNorm * r.getNumerator() / r.getNumberOfSamples();
}
// Compute the spatial frequency and the region for curve fitting
double[] q = FRC.computeQ(frcCurve, false);
int low = 0, high = q.length;
while (high > 0 && q[high - 1] > maxQ) high--;
while (low < q.length && q[low] < minQ) low++;
// Require we fit at least 10% of the curve
if (high - low < q.length * 0.1) {
IJ.error(TITLE, "Not enough points for Q estimation");
return;
}
// Obtain initial estimate of Q plateau height and decay.
// This can be done by fitting the precision histogram and then fixing the mean and sigma.
// Or it can be done by allowing the precision to be sampled and the mean and sigma
// become parameters for fitting.
// Check if we can sample precision values
boolean sampleDecay = precision != null && FIRE.sampleDecay;
double[] exp_decay;
if (sampleDecay) {
// Random sample of precision values from the distribution is used to
// construct the decay curve
int[] sample = Random.sample(10000, precision.getN(), new Well19937c());
final double four_pi2 = 4 * Math.PI * Math.PI;
double[] pre = new double[q.length];
for (int i = 1; i < q.length; i++) pre[i] = -four_pi2 * q[i] * q[i];
// Sample
final int n = sample.length;
double[] hq = new double[n];
for (int j = 0; j < n; j++) {
// Scale to SR pixels
double s2 = precision.getValue(sample[j]) / images.nmPerPixel;
s2 *= s2;
for (int i = 1; i < q.length; i++) hq[i] += FastMath.exp(pre[i] * s2);
}
for (int i = 1; i < q.length; i++) hq[i] /= n;
exp_decay = new double[q.length];
exp_decay[0] = 1;
for (int i = 1; i < q.length; i++) {
double sinc_q = sinc(Math.PI * q[i]);
exp_decay[i] = sinc_q * sinc_q * hq[i];
}
} else {
// Note: The sigma mean and std should be in the units of super-resolution
// pixels so scale to SR pixels
exp_decay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
}
// Smoothing
double[] smooth;
if (loessSmoothing) {
// Note: This computes the log then smooths it
double bandwidth = 0.1;
int robustness = 0;
double[] l = new double[exp_decay.length];
for (int i = 0; i < l.length; i++) {
// Original Matlab code computes the log for each array.
// This is equivalent to a single log on the fraction of the two.
// Perhaps the two log method is more numerically stable.
//l[i] = Math.log(Math.abs(frcnum[i])) - Math.log(exp_decay[i]);
l[i] = Math.log(Math.abs(frcnum[i] / exp_decay[i]));
}
try {
LoessInterpolator loess = new LoessInterpolator(bandwidth, robustness);
smooth = loess.smooth(q, l);
} catch (Exception e) {
IJ.error(TITLE, "LOESS smoothing failed");
return;
}
} else {
// Note: This smooths the curve before computing the log
double[] norm = new double[exp_decay.length];
for (int i = 0; i < norm.length; i++) {
norm[i] = frcnum[i] / exp_decay[i];
}
// Median window of 5 == radius of 2
MedianWindow mw = new MedianWindow(norm, 2);
smooth = new double[exp_decay.length];
for (int i = 0; i < norm.length; i++) {
smooth[i] = Math.log(Math.abs(mw.getMedian()));
mw.increment();
}
}
// Fit with quadratic to find the initial guess.
// Note: example Matlab code frc_Qcorrection7.m identifies regions of the
// smoothed log curve with low derivative and only fits those. The fit is
// used for the final estimate. Fitting a subset with low derivative is not
// implemented here since the initial estimate is subsequently optimised
// to maximise a cost function.
Quadratic curve = new Quadratic();
SimpleCurveFitter fit = SimpleCurveFitter.create(curve, new double[2]);
WeightedObservedPoints points = new WeightedObservedPoints();
for (int i = low; i < high; i++) points.add(q[i], smooth[i]);
double[] estimate = fit.fit(points.toList());
double qValue = FastMath.exp(estimate[0]);
//System.out.printf("Initial q-estimate = %s => %.3f\n", Arrays.toString(estimate), qValue);
// This could be made an option. Just use for debugging
boolean debug = false;
if (debug) {
// Plot the initial fit and the fit curve
double[] qScaled = FRC.computeQ(frcCurve, true);
double[] line = new double[q.length];
for (int i = 0; i < q.length; i++) line[i] = curve.value(q[i], estimate);
String title = TITLE + " Initial fit";
Plot2 plot = new Plot2(title, "Spatial Frequency (nm^-1)", "FRC Numerator");
String label = String.format("Q = %.3f", qValue);
plot.addPoints(qScaled, smooth, Plot.LINE);
plot.setColor(Color.red);
plot.addPoints(qScaled, line, Plot.LINE);
plot.setColor(Color.black);
plot.addLabel(0, 0, label);
Utils.display(title, plot, Utils.NO_TO_FRONT);
}
if (fitPrecision) {
// Q - Should this be optional?
if (sampleDecay) {
// If a sample of the precision was used to construct the data for the initial fit
// then update the estimate using the fit result since it will be a better start point.
histogram.sigma = precision.getStandardDeviation();
// Normalise sum-of-squares to the SR pixel size
double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
}
// Do a multivariate fit ...
SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
PointValuePair p = null;
MultiPlateauness f = new MultiPlateauness(frcnum, q, low, high);
double[] initial = new double[] { histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, qValue };
p = findMin(p, opt, f, scale(initial, 0.1));
p = findMin(p, opt, f, scale(initial, 0.5));
p = findMin(p, opt, f, initial);
p = findMin(p, opt, f, scale(initial, 2));
p = findMin(p, opt, f, scale(initial, 10));
if (p != null) {
double[] point = p.getPointRef();
histogram.mean = point[0] * images.nmPerPixel;
histogram.sigma = point[1] * images.nmPerPixel;
qValue = point[2];
}
} else {
// If so then this should be optional.
if (sampleDecay) {
if (precisionMethod != PrecisionMethod.FIXED) {
histogram.sigma = precision.getStandardDeviation();
// Normalise sum-of-squares to the SR pixel size
double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
}
exp_decay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
}
// Estimate spurious component by promoting plateauness.
// The Matlab code used random initial points for a Simplex optimiser.
// A Brent line search should be pretty deterministic so do simple repeats.
// However it will proceed downhill so if the initial point is wrong then
// it will find a sub-optimal result.
UnivariateOptimizer o = new BrentOptimizer(1e-3, 1e-6);
Plateauness f = new Plateauness(frcnum, exp_decay, low, high);
UnivariatePointValuePair p = null;
p = findMin(p, o, f, qValue, 0.1);
p = findMin(p, o, f, qValue, 0.2);
p = findMin(p, o, f, qValue, 0.333);
p = findMin(p, o, f, qValue, 0.5);
// Do some Simplex repeats as well
SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
p = findMin(p, opt, f, qValue * 0.1);
p = findMin(p, opt, f, qValue * 0.5);
p = findMin(p, opt, f, qValue);
p = findMin(p, opt, f, qValue * 2);
p = findMin(p, opt, f, qValue * 10);
if (p != null)
qValue = p.getPoint();
}
QPlot qplot = new QPlot(frcCurve, qValue, low, high);
// Interactive dialog to estimate Q (blinking events per flourophore) using
// sliders for the mean and standard deviation of the localisation precision.
showQEstimationDialog(histogram, qplot, frcCurve, images.nmPerPixel);
IJ.showStatus(TITLE + " complete");
}
Aggregations