use of org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer in project tetrad by cmu-phil.
the class GeneralizedSemEstimator method optimize.
private double[] optimize(MultivariateFunction function, double[] values, int optimizer) {
PointValuePair pair;
if (optimizer == 1) {
// 0.01, 0.000001
// 2.0D * FastMath.ulp(1.0D), 1e-8
MultivariateOptimizer search = new PowellOptimizer(1e-7, 1e-7);
pair = search.optimize(new InitialGuess(values), new ObjectiveFunction(function), GoalType.MINIMIZE, new MaxEval(100000));
} else if (optimizer == 2) {
MultivariateOptimizer search = new SimplexOptimizer(1e-7, 1e-7);
pair = search.optimize(new InitialGuess(values), new ObjectiveFunction(function), GoalType.MINIMIZE, new MaxEval(100000), new NelderMeadSimplex(values.length));
} else if (optimizer == 3) {
int dim = values.length;
int additionalInterpolationPoints = 0;
final int numIterpolationPoints = 2 * dim + 1 + additionalInterpolationPoints;
BOBYQAOptimizer search = new BOBYQAOptimizer(numIterpolationPoints);
pair = search.optimize(new MaxEval(100000), new ObjectiveFunction(function), GoalType.MINIMIZE, new InitialGuess(values), SimpleBounds.unbounded(dim));
} else if (optimizer == 4) {
MultivariateOptimizer search = new CMAESOptimizer(3000000, .05, false, 0, 0, new MersenneTwister(), false, new SimplePointChecker<PointValuePair>(0.5, 0.5));
pair = search.optimize(new MaxEval(30000), new ObjectiveFunction(function), GoalType.MINIMIZE, new InitialGuess(values), new CMAESOptimizer.Sigma(new double[values.length]), new CMAESOptimizer.PopulationSize(1000));
} else if (optimizer == 5) {
// 0.01, 0.000001
// 2.0D * FastMath.ulp(1.0D), 1e-8
MultivariateOptimizer search = new PowellOptimizer(.05, .05);
pair = search.optimize(new InitialGuess(values), new ObjectiveFunction(function), GoalType.MINIMIZE, new MaxEval(100000));
} else if (optimizer == 6) {
MultivariateOptimizer search = new PowellOptimizer(1e-7, 1e-7);
pair = search.optimize(new InitialGuess(values), new ObjectiveFunction(function), GoalType.MAXIMIZE, new MaxEval(10000));
} else {
throw new IllegalStateException();
}
return pair.getPoint();
}
use of org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer in project GDSC-SMLM by aherbert.
the class Fire method findMin.
private static UnivariatePointValuePair findMin(UnivariatePointValuePair current, SimplexOptimizer optimiser, MultivariateFunction func, double qvalue) {
try {
final NelderMeadSimplex simplex = new NelderMeadSimplex(1);
final double[] initialSolution = { qvalue };
final PointValuePair solution = optimiser.optimize(new MaxEval(1000), new InitialGuess(initialSolution), simplex, new ObjectiveFunction(func), GoalType.MINIMIZE);
final UnivariatePointValuePair next = (solution == null) ? null : new UnivariatePointValuePair(solution.getPointRef()[0], solution.getValue());
if (next == null) {
return current;
}
if (current != null) {
return (next.getValue() < current.getValue()) ? next : current;
}
return next;
} catch (final Exception ex) {
return current;
}
}
use of org.apache.commons.math3.optim.nonlinear.scalar.noderiv.SimplexOptimizer in project GDSC-SMLM by aherbert.
the class Fire method runQEstimation.
@SuppressWarnings("null")
private void runQEstimation() {
IJ.showStatus(pluginTitle + " ...");
if (!showQEstimationInputDialog()) {
return;
}
MemoryPeakResults inputResults = ResultsManager.loadInputResults(settings.inputOption, false, null, null);
if (MemoryPeakResults.isEmpty(inputResults)) {
IJ.error(pluginTitle, "No results could be loaded");
return;
}
if (inputResults.getCalibration() == null) {
IJ.error(pluginTitle, "The results are not calibrated");
return;
}
inputResults = cropToRoi(inputResults);
if (inputResults.size() < 2) {
IJ.error(pluginTitle, "No results within the crop region");
return;
}
initialise(inputResults, null);
// We need localisation precision.
// Build a histogram of the localisation precision.
// Get the initial mean and SD and plot as a Gaussian.
final PrecisionHistogram histogram = calculatePrecisionHistogram();
if (histogram == null) {
IJ.error(pluginTitle, "No localisation precision available.\n \nPlease choose " + PrecisionMethod.FIXED + " and enter a precision mean and SD.");
return;
}
final StoredDataStatistics precision = histogram.precision;
final double fourierImageScale = Settings.scaleValues[settings.imageScaleIndex];
final int imageSize = Settings.imageSizeValues[settings.imageSizeIndex];
// Create the image and compute the numerator of FRC.
// Do not use the signal so results.size() is the number of localisations.
IJ.showStatus("Computing FRC curve ...");
final FireImages images = createImages(fourierImageScale, imageSize, false);
// DEBUGGING - Save the two images to disk. Load the images into the Matlab
// code that calculates the Q-estimation and make this plugin match the functionality.
// IJ.save(new ImagePlus("i1", images.ip1), "/scratch/i1.tif");
// IJ.save(new ImagePlus("i2", images.ip2), "/scratch/i2.tif");
final Frc frc = new Frc();
frc.setTrackProgress(progress);
frc.setFourierMethod(fourierMethod);
frc.setSamplingMethod(samplingMethod);
frc.setPerimeterSamplingFactor(settings.perimeterSamplingFactor);
final FrcCurve frcCurve = frc.calculateFrcCurve(images.ip1, images.ip2, images.nmPerPixel);
if (frcCurve == null) {
IJ.error(pluginTitle, "Failed to compute FRC curve");
return;
}
IJ.showStatus("Running Q-estimation ...");
// Note:
// The method implemented here is based on Matlab code provided by Bernd Rieger.
// The idea is to compute the spurious correlation component of the FRC Numerator
// using an initial estimate of distribution of the localisation precision (assumed
// to be Gaussian). This component is the contribution of repeat localisations of
// the same molecule to the numerator and is modelled as an exponential decay
// (exp_decay). The component is scaled by the Q-value which
// is the average number of times a molecule is seen in addition to the first time.
// At large spatial frequencies the scaled component should match the numerator,
// i.e. at high resolution (low FIRE number) the numerator is made up of repeat
// localisations of the same molecule and not actual structure in the image.
// The best fit is where the numerator equals the scaled component, i.e. num / (q*exp_decay) ==
// 1.
// The FRC Numerator is plotted and Q can be determined by
// adjusting Q and the precision mean and SD to maximise the cost function.
// This can be done interactively by the user with the effect on the FRC curve
// dynamically updated and displayed.
// Compute the scaled FRC numerator
final double qNorm = (1 / frcCurve.mean1 + 1 / frcCurve.mean2);
final double[] frcnum = new double[frcCurve.getSize()];
for (int i = 0; i < frcnum.length; i++) {
final FrcCurveResult r = frcCurve.get(i);
frcnum[i] = qNorm * r.getNumerator() / r.getNumberOfSamples();
}
// Compute the spatial frequency and the region for curve fitting
final double[] q = Frc.computeQ(frcCurve, false);
int low = 0;
int high = q.length;
while (high > 0 && q[high - 1] > settings.maxQ) {
high--;
}
while (low < q.length && q[low] < settings.minQ) {
low++;
}
// Require we fit at least 10% of the curve
if (high - low < q.length * 0.1) {
IJ.error(pluginTitle, "Not enough points for Q estimation");
return;
}
// Obtain initial estimate of Q plateau height and decay.
// This can be done by fitting the precision histogram and then fixing the mean and sigma.
// Or it can be done by allowing the precision to be sampled and the mean and sigma
// become parameters for fitting.
// Check if we can sample precision values
final boolean sampleDecay = precision != null && settings.sampleDecay;
double[] expDecay;
if (sampleDecay) {
// Random sample of precision values from the distribution is used to
// construct the decay curve
final int[] sample = RandomUtils.sample(10000, precision.getN(), UniformRandomProviders.create());
final double four_pi2 = 4 * Math.PI * Math.PI;
final double[] pre = new double[q.length];
for (int i = 1; i < q.length; i++) {
pre[i] = -four_pi2 * q[i] * q[i];
}
// Sample
final int n = sample.length;
final double[] hq = new double[n];
for (int j = 0; j < n; j++) {
// Scale to SR pixels
double s2 = precision.getValue(sample[j]) / images.nmPerPixel;
s2 *= s2;
for (int i = 1; i < q.length; i++) {
hq[i] += StdMath.exp(pre[i] * s2);
}
}
for (int i = 1; i < q.length; i++) {
hq[i] /= n;
}
expDecay = new double[q.length];
expDecay[0] = 1;
for (int i = 1; i < q.length; i++) {
final double sinc_q = sinc(Math.PI * q[i]);
expDecay[i] = sinc_q * sinc_q * hq[i];
}
} else {
// Note: The sigma mean and std should be in the units of super-resolution
// pixels so scale to SR pixels
expDecay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
}
// Smoothing
double[] smooth;
if (settings.loessSmoothing) {
// Note: This computes the log then smooths it
final double bandwidth = 0.1;
final int robustness = 0;
final double[] l = new double[expDecay.length];
for (int i = 0; i < l.length; i++) {
// Original Matlab code computes the log for each array.
// This is equivalent to a single log on the fraction of the two.
// Perhaps the two log method is more numerically stable.
// l[i] = Math.log(Math.abs(frcnum[i])) - Math.log(exp_decay[i]);
l[i] = Math.log(Math.abs(frcnum[i] / expDecay[i]));
}
try {
final LoessInterpolator loess = new LoessInterpolator(bandwidth, robustness);
smooth = loess.smooth(q, l);
} catch (final Exception ex) {
IJ.error(pluginTitle, "LOESS smoothing failed");
return;
}
} else {
// Note: This smooths the curve before computing the log
final double[] norm = new double[expDecay.length];
for (int i = 0; i < norm.length; i++) {
norm[i] = frcnum[i] / expDecay[i];
}
// Median window of 5 == radius of 2
final DoubleMedianWindow mw = DoubleMedianWindow.wrap(norm, 2);
smooth = new double[expDecay.length];
for (int i = 0; i < norm.length; i++) {
smooth[i] = Math.log(Math.abs(mw.getMedian()));
mw.increment();
}
}
// Fit with quadratic to find the initial guess.
// Note: example Matlab code frc_Qcorrection7.m identifies regions of the
// smoothed log curve with low derivative and only fits those. The fit is
// used for the final estimate. Fitting a subset with low derivative is not
// implemented here since the initial estimate is subsequently optimised
// to maximise a cost function.
final Quadratic curve = new Quadratic();
final SimpleCurveFitter fit = SimpleCurveFitter.create(curve, new double[2]);
final WeightedObservedPoints points = new WeightedObservedPoints();
for (int i = low; i < high; i++) {
points.add(q[i], smooth[i]);
}
final double[] estimate = fit.fit(points.toList());
double qvalue = StdMath.exp(estimate[0]);
// This could be made an option. Just use for debugging
final boolean debug = false;
if (debug) {
// Plot the initial fit and the fit curve
final double[] qScaled = Frc.computeQ(frcCurve, true);
final double[] line = new double[q.length];
for (int i = 0; i < q.length; i++) {
line[i] = curve.value(q[i], estimate);
}
final String title = pluginTitle + " Initial fit";
final Plot plot = new Plot(title, "Spatial Frequency (nm^-1)", "FRC Numerator");
final String label = String.format("Q = %.3f", qvalue);
plot.addPoints(qScaled, smooth, Plot.LINE);
plot.setColor(Color.red);
plot.addPoints(qScaled, line, Plot.LINE);
plot.setColor(Color.black);
plot.addLabel(0, 0, label);
ImageJUtils.display(title, plot, ImageJUtils.NO_TO_FRONT);
}
if (settings.fitPrecision) {
// Q - Should this be optional?
if (sampleDecay) {
// If a sample of the precision was used to construct the data for the initial fit
// then update the estimate using the fit result since it will be a better start point.
histogram.sigma = precision.getStandardDeviation();
// Normalise sum-of-squares to the SR pixel size
final double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
}
// Do a multivariate fit ...
final SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
PointValuePair pair = null;
final MultiPlateauness f = new MultiPlateauness(frcnum, q, low, high);
final double[] initial = new double[] { histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, qvalue };
pair = findMin(pair, opt, f, scale(initial, 0.1));
pair = findMin(pair, opt, f, scale(initial, 0.5));
pair = findMin(pair, opt, f, initial);
pair = findMin(pair, opt, f, scale(initial, 2));
pair = findMin(pair, opt, f, scale(initial, 10));
if (pair != null) {
final double[] point = pair.getPointRef();
histogram.mean = point[0] * images.nmPerPixel;
histogram.sigma = point[1] * images.nmPerPixel;
qvalue = point[2];
}
} else {
// If so then this should be optional.
if (sampleDecay) {
if (precisionMethod != PrecisionMethod.FIXED) {
histogram.sigma = precision.getStandardDeviation();
// Normalise sum-of-squares to the SR pixel size
final double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
}
expDecay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
}
// Estimate spurious component by promoting plateauness.
// The Matlab code used random initial points for a Simplex optimiser.
// A Brent line search should be pretty deterministic so do simple repeats.
// However it will proceed downhill so if the initial point is wrong then
// it will find a sub-optimal result.
final UnivariateOptimizer o = new BrentOptimizer(1e-3, 1e-6);
final Plateauness f = new Plateauness(frcnum, expDecay, low, high);
UnivariatePointValuePair result = null;
result = findMin(result, o, f, qvalue, 0.1);
result = findMin(result, o, f, qvalue, 0.2);
result = findMin(result, o, f, qvalue, 0.333);
result = findMin(result, o, f, qvalue, 0.5);
// Do some Simplex repeats as well
final SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
result = findMin(result, opt, f, qvalue * 0.1);
result = findMin(result, opt, f, qvalue * 0.5);
result = findMin(result, opt, f, qvalue);
result = findMin(result, opt, f, qvalue * 2);
result = findMin(result, opt, f, qvalue * 10);
if (result != null) {
qvalue = result.getPoint();
}
}
final QPlot qplot = new QPlot(frcCurve, qvalue, low, high);
// Interactive dialog to estimate Q (blinking events per flourophore) using
// sliders for the mean and standard deviation of the localisation precision.
showQEstimationDialog(histogram, qplot, images.nmPerPixel);
IJ.showStatus(pluginTitle + " complete");
}
Aggregations