use of uk.ac.sussex.gdsc.core.utils.StoredDataStatistics in project GDSC-SMLM by aherbert.
the class BenchmarkFit method runFit.
private void runFit() {
// Initialise the answer.
answer[Gaussian2DFunction.BACKGROUND] = benchmarkParameters.getBackground();
answer[Gaussian2DFunction.SIGNAL] = benchmarkParameters.getSignal();
answer[Gaussian2DFunction.X_POSITION] = benchmarkParameters.x;
answer[Gaussian2DFunction.Y_POSITION] = benchmarkParameters.y;
answer[Gaussian2DFunction.Z_POSITION] = benchmarkParameters.z;
answer[Gaussian2DFunction.X_SD] = benchmarkParameters.sd / benchmarkParameters.pixelPitch;
answer[Gaussian2DFunction.Y_SD] = benchmarkParameters.sd / benchmarkParameters.pixelPitch;
// Set up the fit region. Always round down since 0.5 is the centre of the pixel.
final int x = (int) benchmarkParameters.x;
final int y = (int) benchmarkParameters.y;
region = new Rectangle(x - regionSize, y - regionSize, 2 * regionSize + 1, 2 * regionSize + 1);
if (!new Rectangle(0, 0, imp.getWidth(), imp.getHeight()).contains(region)) {
// Check if it is incorrect by only 1 pixel
if (region.width <= imp.getWidth() + 1 && region.height <= imp.getHeight() + 1) {
ImageJUtils.log("Adjusting region %s to fit within image bounds (%dx%d)", region.toString(), imp.getWidth(), imp.getHeight());
region = new Rectangle(0, 0, imp.getWidth(), imp.getHeight());
} else {
IJ.error(TITLE, "Fit region does not fit within the image");
return;
}
}
// Adjust the centre & account for 0.5 pixel offset during fitting
answer[Gaussian2DFunction.X_POSITION] -= (region.x + 0.5);
answer[Gaussian2DFunction.Y_POSITION] -= (region.y + 0.5);
// Configure for fitting
fitConfig.setBackgroundFitting(backgroundFitting);
fitConfig.setNotSignalFitting(!signalFitting);
fitConfig.setComputeDeviations(false);
// Create the camera model
CameraModel cameraModel = fitConfig.getCameraModel();
// Crop for speed. Reset origin first so the region is within the model
cameraModel.setOrigin(0, 0);
cameraModel = cameraModel.crop(region, false);
final ImageStack stack = imp.getImageStack();
final int totalFrames = benchmarkParameters.frames;
// Create a pool of workers
final int nThreads = Prefs.getThreads();
final BlockingQueue<Integer> jobs = new ArrayBlockingQueue<>(nThreads * 2);
final List<Worker> workers = new LinkedList<>();
final List<Thread> threads = new LinkedList<>();
final Ticker ticker = ImageJUtils.createTicker(totalFrames, nThreads, "Fitting frames ...");
for (int i = 0; i < nThreads; i++) {
final Worker worker = new Worker(jobs, stack, region, fitConfig, cameraModel, ticker);
final Thread t = new Thread(worker);
workers.add(worker);
threads.add(t);
t.start();
}
// Store all the fitting results
results = new double[totalFrames * startPoints.length][];
resultsTime = new long[results.length];
// Fit the frames
for (int i = 0; i < totalFrames; i++) {
// Only fit if there were simulated photons
if (benchmarkParameters.framePhotons[i] > 0) {
put(jobs, i);
}
}
// Finish all the worker threads by passing in a null job
for (int i = 0; i < threads.size(); i++) {
put(jobs, -1);
}
// Wait for all to finish
for (int i = 0; i < threads.size(); i++) {
try {
threads.get(i).join();
} catch (final InterruptedException ex) {
Thread.currentThread().interrupt();
throw new ConcurrentRuntimeException(ex);
}
}
threads.clear();
if (hasOffsetXy()) {
ImageJUtils.log(TITLE + ": CoM within start offset = %d / %d (%s%%)", comValid.intValue(), totalFrames, MathUtils.rounded((100.0 * comValid.intValue()) / totalFrames));
}
ImageJUtils.finished("Collecting results ...");
// Collect the results
Statistics[] stats = null;
for (int i = 0; i < workers.size(); i++) {
final Statistics[] next = workers.get(i).stats;
if (stats == null) {
stats = next;
continue;
}
for (int j = 0; j < next.length; j++) {
stats[j].add(next[j]);
}
}
workers.clear();
Objects.requireNonNull(stats, "No statistics were computed");
// Show a table of the results
summariseResults(stats, cameraModel);
// Optionally show histograms
if (showHistograms) {
IJ.showStatus("Calculating histograms ...");
final WindowOrganiser windowOrganiser = new WindowOrganiser();
final double[] convert = getConversionFactors();
final HistogramPlotBuilder builder = new HistogramPlotBuilder(TITLE).setNumberOfBins(histogramBins);
for (int i = 0; i < NAMES.length; i++) {
if (displayHistograms[i] && convert[i] != 0) {
// We will have to convert the values...
final double[] tmp = ((StoredDataStatistics) stats[i]).getValues();
for (int j = 0; j < tmp.length; j++) {
tmp[j] *= convert[i];
}
final StoredDataStatistics tmpStats = StoredDataStatistics.create(tmp);
builder.setData(tmpStats).setName(NAMES[i]).setPlotLabel(String.format("%s +/- %s", MathUtils.rounded(tmpStats.getMean()), MathUtils.rounded(tmpStats.getStandardDeviation()))).show(windowOrganiser);
}
}
windowOrganiser.tile();
}
if (saveRawData) {
final String dir = ImageJUtils.getDirectory("Data_directory", rawDataDirectory);
if (dir != null) {
saveData(stats, dir);
}
}
IJ.showStatus("");
}
use of uk.ac.sussex.gdsc.core.utils.StoredDataStatistics in project GDSC-SMLM by aherbert.
the class CreateData method createPhotonDistribution.
/**
* Creates the photon distribution.
*
* @return A photon distribution loaded from a file of floating-point values with the specified
* population mean.
*/
private RealDistribution createPhotonDistribution() {
if (PHOTON_DISTRIBUTION[PHOTON_CUSTOM].equals(settings.getPhotonDistribution())) {
// Get the distribution file
final String filename = ImageJUtils.getFilename("Photon_distribution", settings.getPhotonDistributionFile());
if (filename != null) {
settings.setPhotonDistributionFile(filename);
try (BufferedReader in = new BufferedReader(new UnicodeReader(new FileInputStream(new File(settings.getPhotonDistributionFile())), null))) {
final StoredDataStatistics stats = new StoredDataStatistics();
String str = in.readLine();
double val = 0.0d;
while (str != null) {
val = Double.parseDouble(str);
stats.add(val);
str = in.readLine();
}
if (stats.getSum() > 0) {
// Update the statistics to the desired mean.
final double scale = settings.getPhotonsPerSecond() / stats.getMean();
final double[] values = stats.getValues();
for (int i = 0; i < values.length; i++) {
values[i] *= scale;
}
// TODO - Investigate the limits of this distribution.
// How far above and below the input data will values be generated.
// Create the distribution using the recommended number of bins
final int binCount = stats.getN() / 10;
final EmpiricalDistribution dist = new EmpiricalDistribution(binCount, new RandomGeneratorAdapter(createRandomGenerator()));
dist.load(values);
return dist;
}
} catch (final IOException | NullArgumentException | NumberFormatException ex) {
// Ignore
}
}
ImageJUtils.log("Failed to load custom photon distribution from file: %s. Default to fixed.", settings.getPhotonDistributionFile());
} else if (PHOTON_DISTRIBUTION[PHOTON_UNIFORM].equals(settings.getPhotonDistribution())) {
if (settings.getPhotonsPerSecond() < settings.getPhotonsPerSecondMaximum()) {
return new UniformRealDistribution(new RandomGeneratorAdapter(createRandomGenerator()), settings.getPhotonsPerSecond(), settings.getPhotonsPerSecondMaximum());
}
} else if (PHOTON_DISTRIBUTION[PHOTON_GAMMA].equals(settings.getPhotonDistribution())) {
final double scaleParameter = settings.getPhotonsPerSecond() / settings.getPhotonShape();
return new GammaDistribution(new RandomGeneratorAdapter(createRandomGenerator()), settings.getPhotonShape(), scaleParameter, ExponentialDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
} else if (PHOTON_DISTRIBUTION[PHOTON_CORRELATED].equals(settings.getPhotonDistribution())) {
// No distribution required
return null;
}
settings.setPhotonDistribution(PHOTON_DISTRIBUTION[PHOTON_FIXED]);
return null;
}
use of uk.ac.sussex.gdsc.core.utils.StoredDataStatistics in project GDSC-SMLM by aherbert.
the class BaseFunctionSolverTest method canFitSingleGaussianBetter.
void canFitSingleGaussianBetter(RandomSeed seed, FunctionSolver solver, boolean applyBounds, FunctionSolver solver2, boolean applyBounds2, String name, String name2, NoiseModel noiseModel) {
final double[] noise = getNoise(seed, noiseModel);
if (solver.isWeighted()) {
solver.setWeights(getWeights(seed, noiseModel));
}
final int loops = 5;
final UniformRandomProvider rg = RngUtils.create(seed.getSeed());
final StoredDataStatistics[] stats = new StoredDataStatistics[6];
final String[] statName = { "Signal", "X", "Y" };
final int[] betterPrecision = new int[3];
final int[] totalPrecision = new int[3];
final int[] betterAccuracy = new int[3];
final int[] totalAccuracy = new int[3];
final String msg = "%s vs %s : %.1f (%s) %s %f +/- %f vs %f +/- %f (N=%d) %b %s";
int i1 = 0;
int i2 = 0;
for (final double s : signal) {
final double[] expected = createParams(1, s, 0, 0, 1);
double[] lower = null;
double[] upper = null;
if (applyBounds || applyBounds2) {
lower = createParams(0, s * 0.5, -0.3, -0.3, 0.8);
upper = createParams(3, s * 2, 0.3, 0.3, 1.2);
}
if (applyBounds) {
solver.setBounds(lower, upper);
}
if (applyBounds2) {
solver2.setBounds(lower, upper);
}
for (int loop = loops; loop-- > 0; ) {
final double[] data = drawGaussian(expected, noise, noiseModel, rg);
for (int i = 0; i < stats.length; i++) {
stats[i] = new StoredDataStatistics();
}
for (final double db : base) {
for (final double dx : shift) {
for (final double dy : shift) {
for (final double dsx : factor) {
final double[] p = createParams(db, s, dx, dy, dsx);
final double[] fp = fitGaussian(solver, data, p, expected);
i1 += solver.getEvaluations();
final double[] fp2 = fitGaussian(solver2, data, p, expected);
i2 += solver2.getEvaluations();
// Get the mean and sd (the fit precision)
compare(fp, expected, fp2, expected, Gaussian2DFunction.SIGNAL, stats[0], stats[1]);
compare(fp, expected, fp2, expected, Gaussian2DFunction.X_POSITION, stats[2], stats[3]);
compare(fp, expected, fp2, expected, Gaussian2DFunction.Y_POSITION, stats[4], stats[5]);
// Use the distance
// stats[2].add(distance(fp, expected));
// stats[3].add(distance(fp2, expected2));
}
}
}
}
// two sided
final double alpha = 0.05;
for (int i = 0; i < stats.length; i += 2) {
double u1 = stats[i].getMean();
double u2 = stats[i + 1].getMean();
final double sd1 = stats[i].getStandardDeviation();
final double sd2 = stats[i + 1].getStandardDeviation();
final TTest tt = new TTest();
final boolean diff = tt.tTest(stats[i].getValues(), stats[i + 1].getValues(), alpha);
final int index = i / 2;
final Object[] args = new Object[] { name2, name, s, noiseModel, statName[index], u2, sd2, u1, sd1, stats[i].getN(), diff, "" };
if (diff) {
// Different means. Check they are roughly the same
if (DoubleEquality.almostEqualRelativeOrAbsolute(u1, u2, 0.1, 0)) {
// Basically the same. Check which is more precise
if (!DoubleEquality.almostEqualRelativeOrAbsolute(sd1, sd2, 0.05, 0)) {
if (sd2 < sd1) {
betterPrecision[index]++;
args[args.length - 1] = "P*";
logger.log(TestLogUtils.getRecord(Level.FINE, msg, args));
} else {
args[args.length - 1] = "P";
logger.log(TestLogUtils.getRecord(Level.FINE, msg, args));
}
totalPrecision[index]++;
}
} else {
// Check which is more accurate (closer to zero)
u1 = Math.abs(u1);
u2 = Math.abs(u2);
if (u2 < u1) {
betterAccuracy[index]++;
args[args.length - 1] = "A*";
logger.log(TestLogUtils.getRecord(Level.FINE, msg, args));
} else {
args[args.length - 1] = "A";
logger.log(TestLogUtils.getRecord(Level.FINE, msg, args));
}
totalAccuracy[index]++;
}
// The same means. Check that it is more precise
} else if (!DoubleEquality.almostEqualRelativeOrAbsolute(sd1, sd2, 0.05, 0)) {
if (sd2 < sd1) {
betterPrecision[index]++;
args[args.length - 1] = "P*";
logger.log(TestLogUtils.getRecord(Level.FINE, msg, args));
} else {
args[args.length - 1] = "P";
logger.log(TestLogUtils.getRecord(Level.FINE, msg, args));
}
totalPrecision[index]++;
}
}
}
}
int better = 0;
int total = 0;
for (int index = 0; index < statName.length; index++) {
better += betterPrecision[index] + betterAccuracy[index];
total += totalPrecision[index] + totalAccuracy[index];
test(name2, name, statName[index] + " P", betterPrecision[index], totalPrecision[index], Level.FINE);
test(name2, name, statName[index] + " A", betterAccuracy[index], totalAccuracy[index], Level.FINE);
}
test(name2, name, String.format("All (eval [%d] [%d]) : ", i1, i2), better, total, Level.INFO);
}
use of uk.ac.sussex.gdsc.core.utils.StoredDataStatistics in project GDSC-SMLM by aherbert.
the class BoundedFunctionSolverTest method fitSingleGaussianWithoutBias.
private void fitSingleGaussianWithoutBias(RandomSeed seed, boolean applyBounds, int clamping) {
final double bias = 100;
final NonLinearFit solver = getLvm((applyBounds) ? 2 : 1, clamping, false);
final NonLinearFit solver2 = getLvm((applyBounds) ? 2 : 1, clamping, false);
final String name = getLvmName(applyBounds, clamping, false);
final int loops = 5;
final UniformRandomProvider rg = RngUtils.create(seed.getSeed());
final StoredDataStatistics[] stats = new StoredDataStatistics[6];
for (final double s : signal) {
final double[] expected = createParams(1, s, 0, 0, 1);
double[] lower = null;
double[] upper = null;
if (applyBounds) {
lower = createParams(0, s * 0.5, -0.2, -0.2, 0.8);
upper = createParams(3, s * 2, 0.2, 0.2, 1.2);
solver.setBounds(lower, upper);
}
final double[] expected2 = addBiasToParams(expected, bias);
if (applyBounds) {
final double[] lower2 = addBiasToParams(lower, bias);
final double[] upper2 = addBiasToParams(upper, bias);
solver2.setBounds(lower2, upper2);
}
for (int loop = loops; loop-- > 0; ) {
final double[] data = drawGaussian(expected, rg);
final double[] data2 = data.clone();
for (int i = 0; i < data.length; i++) {
data2[i] += bias;
}
for (int i = 0; i < stats.length; i++) {
stats[i] = new StoredDataStatistics();
}
for (final double db : base) {
for (final double dx : shift) {
for (final double dy : shift) {
for (final double dsx : factor) {
final double[] p = createParams(db, s, dx, dy, dsx);
final double[] p2 = addBiasToParams(p, bias);
final double[] fp = fitGaussian(solver, data, p, expected);
final double[] fp2 = fitGaussian(solver2, data2, p2, expected2);
// The result should be the same without a bias
Assertions.assertEquals(solver.getEvaluations(), solver2.getEvaluations(), () -> name + " Iterations");
fp2[0] -= bias;
Assertions.assertArrayEquals(fp, fp2, 1e-6, () -> name + " Solution");
}
}
}
}
}
}
}
use of uk.ac.sussex.gdsc.core.utils.StoredDataStatistics in project GDSC-SMLM by aherbert.
the class Fire method runQEstimation.
@SuppressWarnings("null")
private void runQEstimation() {
IJ.showStatus(pluginTitle + " ...");
if (!showQEstimationInputDialog()) {
return;
}
MemoryPeakResults inputResults = ResultsManager.loadInputResults(settings.inputOption, false, null, null);
if (MemoryPeakResults.isEmpty(inputResults)) {
IJ.error(pluginTitle, "No results could be loaded");
return;
}
if (inputResults.getCalibration() == null) {
IJ.error(pluginTitle, "The results are not calibrated");
return;
}
inputResults = cropToRoi(inputResults);
if (inputResults.size() < 2) {
IJ.error(pluginTitle, "No results within the crop region");
return;
}
initialise(inputResults, null);
// We need localisation precision.
// Build a histogram of the localisation precision.
// Get the initial mean and SD and plot as a Gaussian.
final PrecisionHistogram histogram = calculatePrecisionHistogram();
if (histogram == null) {
IJ.error(pluginTitle, "No localisation precision available.\n \nPlease choose " + PrecisionMethod.FIXED + " and enter a precision mean and SD.");
return;
}
final StoredDataStatistics precision = histogram.precision;
final double fourierImageScale = Settings.scaleValues[settings.imageScaleIndex];
final int imageSize = Settings.imageSizeValues[settings.imageSizeIndex];
// Create the image and compute the numerator of FRC.
// Do not use the signal so results.size() is the number of localisations.
IJ.showStatus("Computing FRC curve ...");
final FireImages images = createImages(fourierImageScale, imageSize, false);
// DEBUGGING - Save the two images to disk. Load the images into the Matlab
// code that calculates the Q-estimation and make this plugin match the functionality.
// IJ.save(new ImagePlus("i1", images.ip1), "/scratch/i1.tif");
// IJ.save(new ImagePlus("i2", images.ip2), "/scratch/i2.tif");
final Frc frc = new Frc();
frc.setTrackProgress(progress);
frc.setFourierMethod(fourierMethod);
frc.setSamplingMethod(samplingMethod);
frc.setPerimeterSamplingFactor(settings.perimeterSamplingFactor);
final FrcCurve frcCurve = frc.calculateFrcCurve(images.ip1, images.ip2, images.nmPerPixel);
if (frcCurve == null) {
IJ.error(pluginTitle, "Failed to compute FRC curve");
return;
}
IJ.showStatus("Running Q-estimation ...");
// Note:
// The method implemented here is based on Matlab code provided by Bernd Rieger.
// The idea is to compute the spurious correlation component of the FRC Numerator
// using an initial estimate of distribution of the localisation precision (assumed
// to be Gaussian). This component is the contribution of repeat localisations of
// the same molecule to the numerator and is modelled as an exponential decay
// (exp_decay). The component is scaled by the Q-value which
// is the average number of times a molecule is seen in addition to the first time.
// At large spatial frequencies the scaled component should match the numerator,
// i.e. at high resolution (low FIRE number) the numerator is made up of repeat
// localisations of the same molecule and not actual structure in the image.
// The best fit is where the numerator equals the scaled component, i.e. num / (q*exp_decay) ==
// 1.
// The FRC Numerator is plotted and Q can be determined by
// adjusting Q and the precision mean and SD to maximise the cost function.
// This can be done interactively by the user with the effect on the FRC curve
// dynamically updated and displayed.
// Compute the scaled FRC numerator
final double qNorm = (1 / frcCurve.mean1 + 1 / frcCurve.mean2);
final double[] frcnum = new double[frcCurve.getSize()];
for (int i = 0; i < frcnum.length; i++) {
final FrcCurveResult r = frcCurve.get(i);
frcnum[i] = qNorm * r.getNumerator() / r.getNumberOfSamples();
}
// Compute the spatial frequency and the region for curve fitting
final double[] q = Frc.computeQ(frcCurve, false);
int low = 0;
int high = q.length;
while (high > 0 && q[high - 1] > settings.maxQ) {
high--;
}
while (low < q.length && q[low] < settings.minQ) {
low++;
}
// Require we fit at least 10% of the curve
if (high - low < q.length * 0.1) {
IJ.error(pluginTitle, "Not enough points for Q estimation");
return;
}
// Obtain initial estimate of Q plateau height and decay.
// This can be done by fitting the precision histogram and then fixing the mean and sigma.
// Or it can be done by allowing the precision to be sampled and the mean and sigma
// become parameters for fitting.
// Check if we can sample precision values
final boolean sampleDecay = precision != null && settings.sampleDecay;
double[] expDecay;
if (sampleDecay) {
// Random sample of precision values from the distribution is used to
// construct the decay curve
final int[] sample = RandomUtils.sample(10000, precision.getN(), UniformRandomProviders.create());
final double four_pi2 = 4 * Math.PI * Math.PI;
final double[] pre = new double[q.length];
for (int i = 1; i < q.length; i++) {
pre[i] = -four_pi2 * q[i] * q[i];
}
// Sample
final int n = sample.length;
final double[] hq = new double[n];
for (int j = 0; j < n; j++) {
// Scale to SR pixels
double s2 = precision.getValue(sample[j]) / images.nmPerPixel;
s2 *= s2;
for (int i = 1; i < q.length; i++) {
hq[i] += StdMath.exp(pre[i] * s2);
}
}
for (int i = 1; i < q.length; i++) {
hq[i] /= n;
}
expDecay = new double[q.length];
expDecay[0] = 1;
for (int i = 1; i < q.length; i++) {
final double sinc_q = sinc(Math.PI * q[i]);
expDecay[i] = sinc_q * sinc_q * hq[i];
}
} else {
// Note: The sigma mean and std should be in the units of super-resolution
// pixels so scale to SR pixels
expDecay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
}
// Smoothing
double[] smooth;
if (settings.loessSmoothing) {
// Note: This computes the log then smooths it
final double bandwidth = 0.1;
final int robustness = 0;
final double[] l = new double[expDecay.length];
for (int i = 0; i < l.length; i++) {
// Original Matlab code computes the log for each array.
// This is equivalent to a single log on the fraction of the two.
// Perhaps the two log method is more numerically stable.
// l[i] = Math.log(Math.abs(frcnum[i])) - Math.log(exp_decay[i]);
l[i] = Math.log(Math.abs(frcnum[i] / expDecay[i]));
}
try {
final LoessInterpolator loess = new LoessInterpolator(bandwidth, robustness);
smooth = loess.smooth(q, l);
} catch (final Exception ex) {
IJ.error(pluginTitle, "LOESS smoothing failed");
return;
}
} else {
// Note: This smooths the curve before computing the log
final double[] norm = new double[expDecay.length];
for (int i = 0; i < norm.length; i++) {
norm[i] = frcnum[i] / expDecay[i];
}
// Median window of 5 == radius of 2
final DoubleMedianWindow mw = DoubleMedianWindow.wrap(norm, 2);
smooth = new double[expDecay.length];
for (int i = 0; i < norm.length; i++) {
smooth[i] = Math.log(Math.abs(mw.getMedian()));
mw.increment();
}
}
// Fit with quadratic to find the initial guess.
// Note: example Matlab code frc_Qcorrection7.m identifies regions of the
// smoothed log curve with low derivative and only fits those. The fit is
// used for the final estimate. Fitting a subset with low derivative is not
// implemented here since the initial estimate is subsequently optimised
// to maximise a cost function.
final Quadratic curve = new Quadratic();
final SimpleCurveFitter fit = SimpleCurveFitter.create(curve, new double[2]);
final WeightedObservedPoints points = new WeightedObservedPoints();
for (int i = low; i < high; i++) {
points.add(q[i], smooth[i]);
}
final double[] estimate = fit.fit(points.toList());
double qvalue = StdMath.exp(estimate[0]);
// This could be made an option. Just use for debugging
final boolean debug = false;
if (debug) {
// Plot the initial fit and the fit curve
final double[] qScaled = Frc.computeQ(frcCurve, true);
final double[] line = new double[q.length];
for (int i = 0; i < q.length; i++) {
line[i] = curve.value(q[i], estimate);
}
final String title = pluginTitle + " Initial fit";
final Plot plot = new Plot(title, "Spatial Frequency (nm^-1)", "FRC Numerator");
final String label = String.format("Q = %.3f", qvalue);
plot.addPoints(qScaled, smooth, Plot.LINE);
plot.setColor(Color.red);
plot.addPoints(qScaled, line, Plot.LINE);
plot.setColor(Color.black);
plot.addLabel(0, 0, label);
ImageJUtils.display(title, plot, ImageJUtils.NO_TO_FRONT);
}
if (settings.fitPrecision) {
// Q - Should this be optional?
if (sampleDecay) {
// If a sample of the precision was used to construct the data for the initial fit
// then update the estimate using the fit result since it will be a better start point.
histogram.sigma = precision.getStandardDeviation();
// Normalise sum-of-squares to the SR pixel size
final double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
}
// Do a multivariate fit ...
final SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
PointValuePair pair = null;
final MultiPlateauness f = new MultiPlateauness(frcnum, q, low, high);
final double[] initial = new double[] { histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, qvalue };
pair = findMin(pair, opt, f, scale(initial, 0.1));
pair = findMin(pair, opt, f, scale(initial, 0.5));
pair = findMin(pair, opt, f, initial);
pair = findMin(pair, opt, f, scale(initial, 2));
pair = findMin(pair, opt, f, scale(initial, 10));
if (pair != null) {
final double[] point = pair.getPointRef();
histogram.mean = point[0] * images.nmPerPixel;
histogram.sigma = point[1] * images.nmPerPixel;
qvalue = point[2];
}
} else {
// If so then this should be optional.
if (sampleDecay) {
if (precisionMethod != PrecisionMethod.FIXED) {
histogram.sigma = precision.getStandardDeviation();
// Normalise sum-of-squares to the SR pixel size
final double meanSumOfSquares = (precision.getSumOfSquares() / (images.nmPerPixel * images.nmPerPixel)) / precision.getN();
histogram.mean = images.nmPerPixel * Math.sqrt(meanSumOfSquares - estimate[1] / (4 * Math.PI * Math.PI));
}
expDecay = computeExpDecay(histogram.mean / images.nmPerPixel, histogram.sigma / images.nmPerPixel, q);
}
// Estimate spurious component by promoting plateauness.
// The Matlab code used random initial points for a Simplex optimiser.
// A Brent line search should be pretty deterministic so do simple repeats.
// However it will proceed downhill so if the initial point is wrong then
// it will find a sub-optimal result.
final UnivariateOptimizer o = new BrentOptimizer(1e-3, 1e-6);
final Plateauness f = new Plateauness(frcnum, expDecay, low, high);
UnivariatePointValuePair result = null;
result = findMin(result, o, f, qvalue, 0.1);
result = findMin(result, o, f, qvalue, 0.2);
result = findMin(result, o, f, qvalue, 0.333);
result = findMin(result, o, f, qvalue, 0.5);
// Do some Simplex repeats as well
final SimplexOptimizer opt = new SimplexOptimizer(1e-6, 1e-10);
result = findMin(result, opt, f, qvalue * 0.1);
result = findMin(result, opt, f, qvalue * 0.5);
result = findMin(result, opt, f, qvalue);
result = findMin(result, opt, f, qvalue * 2);
result = findMin(result, opt, f, qvalue * 10);
if (result != null) {
qvalue = result.getPoint();
}
}
final QPlot qplot = new QPlot(frcCurve, qvalue, low, high);
// Interactive dialog to estimate Q (blinking events per flourophore) using
// sliders for the mean and standard deviation of the localisation precision.
showQEstimationDialog(histogram, qplot, images.nmPerPixel);
IJ.showStatus(pluginTitle + " complete");
}
Aggregations