use of uk.ac.sussex.gdsc.core.logging.Ticker in project GDSC-SMLM by aherbert.
the class ResultsMatchCalculator method showPairs.
/**
* Show the match pairs in a results table.
*
* <p>Adds an ROI painter for the original image source of results set 1 (if it is visible) to the
* table.
*
* @param results1 the first set of results
* @param pairs the pairs
* @param resultsWindow the results window
*/
private void showPairs(MemoryPeakResults results1, final List<PointPair> pairs, final TextWindow resultsWindow) {
if (!settings.showPairs) {
return;
}
final TextWindow pairsWindow = createPairsWindow(resultsWindow, results1.getSource());
pairsWindow.getTextPanel().clear();
final Ticker ticker = ImageJUtils.createTicker(pairs.size(), 0, "Writing pairs table");
try (final BufferedTextWindow bw = new BufferedTextWindow(pairsWindow)) {
final StringBuilder sb = new StringBuilder();
for (final PointPair pair : pairs) {
bw.append(addPairResult(sb, pair));
ticker.tick();
}
}
ImageJUtils.finished();
}
use of uk.ac.sussex.gdsc.core.logging.Ticker in project GDSC-SMLM by aherbert.
the class CmosAnalysis method runAnalysis.
private void runAnalysis() {
final long start = System.currentTimeMillis();
// Create thread pool and workers. The system is likely to be IO limited
// so reduce the computation threads to allow the reading thread in the
// SeriesImageSource to run.
// If the images are small enough to fit into memory then 3 threads are used,
// otherwise it is 1.
final int nThreads = Math.max(1, getThreads() - 3);
final ExecutorService executor = Executors.newFixedThreadPool(nThreads);
final LocalList<Future<?>> futures = new LocalList<>(nThreads);
final LocalList<ImageWorker> workers = new LocalList<>(nThreads);
final double[][] data = new double[subDirs.size() * 2][];
double[] pixelOffset = null;
double[] pixelVariance = null;
Statistics statsOffset = null;
Statistics statsVariance = null;
// For each sub-directory compute the mean and variance
final int nSubDirs = subDirs.size();
boolean error = false;
int width = 0;
int height = 0;
for (int n = 0; n < nSubDirs; n++) {
ImageJUtils.showSlowProgress(n, nSubDirs);
final SubDir sd = subDirs.unsafeGet(n);
ImageJUtils.showStatus(() -> "Analysing " + sd.name);
final StopWatch sw = StopWatch.createStarted();
// Option to reuse data
final File file = new File(settings.directory, "perPixel" + sd.name + ".tif");
boolean found = false;
if (settings.reuseProcessedData && file.exists()) {
final Opener opener = new Opener();
opener.setSilentMode(true);
final ImagePlus imp = opener.openImage(file.getPath());
if (imp != null && imp.getStackSize() == 2 && imp.getBitDepth() == 32) {
if (n == 0) {
width = imp.getWidth();
height = imp.getHeight();
} else if (width != imp.getWidth() || height != imp.getHeight()) {
error = true;
IJ.error(TITLE, "Image width/height mismatch in image series: " + file.getPath() + String.format("\n \nExpected %dx%d, Found %dx%d", width, height, imp.getWidth(), imp.getHeight()));
break;
}
final ImageStack stack = imp.getImageStack();
data[2 * n] = SimpleArrayUtils.toDouble((float[]) stack.getPixels(1));
data[2 * n + 1] = SimpleArrayUtils.toDouble((float[]) stack.getPixels(2));
found = true;
}
}
if (!found) {
// Open the series
final SeriesImageSource source = new SeriesImageSource(sd.name, sd.path.getPath());
if (!source.open()) {
error = true;
IJ.error(TITLE, "Failed to open image series: " + sd.path.getPath());
break;
}
if (n == 0) {
width = source.getWidth();
height = source.getHeight();
} else if (width != source.getWidth() || height != source.getHeight()) {
error = true;
IJ.error(TITLE, "Image width/height mismatch in image series: " + sd.path.getPath() + String.format("\n \nExpected %dx%d, Found %dx%d", width, height, source.getWidth(), source.getHeight()));
break;
}
// So the bar remains at 99% when workers have finished use frames + 1
final Ticker ticker = ImageJUtils.createTicker(source.getFrames() + 1L, nThreads);
// Open the first frame to get the bit depth.
// Assume the first pixels are not empty as the source is open.
Object pixels = source.nextRaw();
final int bitDepth = ImageJUtils.getBitDepth(pixels);
ArrayMoment moment;
if (settings.rollingAlgorithm) {
moment = new RollingArrayMoment();
// We assume 16-bit camera at the maximum
} else if (bitDepth <= 16 && IntegerArrayMoment.isValid(IntegerType.UNSIGNED_16, source.getFrames())) {
moment = new IntegerArrayMoment();
} else {
moment = new SimpleArrayMoment();
}
final BlockingQueue<Object> jobs = new ArrayBlockingQueue<>(nThreads * 2);
for (int i = 0; i < nThreads; i++) {
final ImageWorker worker = new ImageWorker(ticker, jobs, moment);
workers.add(worker);
futures.add(executor.submit(worker));
}
// Process the raw pixel data
long lastTime = 0;
while (pixels != null) {
final long time = System.currentTimeMillis();
if (time - lastTime > 150) {
if (ImageJUtils.isInterrupted()) {
error = true;
break;
}
lastTime = time;
IJ.showStatus("Analysing " + sd.name + " Frame " + source.getStartFrameNumber());
}
put(jobs, pixels);
pixels = source.nextRaw();
}
source.close();
if (error) {
// Kill the workers
workers.stream().forEach(worker -> worker.finished = true);
// Clear the queue
jobs.clear();
// Signal any waiting workers
workers.stream().forEach(worker -> jobs.add(ImageWorker.STOP_SIGNAL));
// Cancel by interruption. We set the finished flag so the ImageWorker should
// ignore the interrupt.
futures.stream().forEach(future -> future.cancel(true));
break;
}
// Finish all the worker threads cleanly
workers.stream().forEach(worker -> jobs.add(ImageWorker.STOP_SIGNAL));
// Wait for all to finish
ConcurrencyUtils.waitForCompletionUnchecked(futures);
// Create the final aggregate statistics
for (final ImageWorker w : workers) {
moment.add(w.moment);
}
data[2 * n] = moment.getMean();
data[2 * n + 1] = moment.getVariance();
// Get the processing speed.
sw.stop();
// ticker holds the number of number of frames processed
final double bits = (double) bitDepth * source.getFrames() * source.getWidth() * source.getHeight();
final double bps = bits / sw.getTime(TimeUnit.SECONDS);
final SiPrefix prefix = SiPrefix.getSiPrefix(bps);
ImageJUtils.log("Processed %d frames. Time = %s. Rate = %s %sbits/s", moment.getN(), sw.toString(), MathUtils.rounded(prefix.convert(bps)), prefix.getPrefix());
// Reset
futures.clear();
workers.clear();
final ImageStack stack = new ImageStack(width, height);
stack.addSlice("Mean", SimpleArrayUtils.toFloat(data[2 * n]));
stack.addSlice("Variance", SimpleArrayUtils.toFloat(data[2 * n + 1]));
IJ.save(new ImagePlus("PerPixel", stack), file.getPath());
}
final Statistics s = Statistics.create(data[2 * n]);
if (pixelOffset != null) {
// Compute mean ADU
final Statistics signal = new Statistics();
final double[] mean = data[2 * n];
for (int i = 0; i < pixelOffset.length; i++) {
signal.add(mean[i] - pixelOffset[i]);
}
ImageJUtils.log("%s Mean = %s +/- %s. Signal = %s +/- %s ADU", sd.name, MathUtils.rounded(s.getMean()), MathUtils.rounded(s.getStandardDeviation()), MathUtils.rounded(signal.getMean()), MathUtils.rounded(signal.getStandardDeviation()));
} else {
// Set the offset assuming the first sub-directory is the bias image
pixelOffset = data[0];
pixelVariance = data[1];
statsOffset = s;
statsVariance = Statistics.create(pixelVariance);
ImageJUtils.log("%s Offset = %s +/- %s. Variance = %s +/- %s", sd.name, MathUtils.rounded(s.getMean()), MathUtils.rounded(s.getStandardDeviation()), MathUtils.rounded(statsVariance.getMean()), MathUtils.rounded(statsVariance.getStandardDeviation()));
}
IJ.showProgress(1);
}
ImageJUtils.clearSlowProgress();
if (error) {
executor.shutdownNow();
IJ.showStatus(TITLE + " cancelled");
return;
}
executor.shutdown();
if (pixelOffset == null || pixelVariance == null) {
IJ.showStatus(TITLE + " error: no bias image");
return;
}
// Compute the gain
ImageJUtils.showStatus("Computing gain");
final double[] pixelGain = new double[pixelOffset.length];
final double[] bibiT = new double[pixelGain.length];
final double[] biaiT = new double[pixelGain.length];
// Ignore first as this is the 0 exposure image
for (int n = 1; n < nSubDirs; n++) {
// Use equation 2.5 from the Huang et al paper.
final double[] b = data[2 * n];
final double[] a = data[2 * n + 1];
for (int i = 0; i < pixelGain.length; i++) {
final double bi = b[i] - pixelOffset[i];
final double ai = a[i] - pixelVariance[i];
bibiT[i] += bi * bi;
biaiT[i] += bi * ai;
}
}
for (int i = 0; i < pixelGain.length; i++) {
pixelGain[i] = biaiT[i] / bibiT[i];
}
final Statistics statsGain = Statistics.create(pixelGain);
ImageJUtils.log("Gain Mean = %s +/- %s", MathUtils.rounded(statsGain.getMean()), MathUtils.rounded(statsGain.getStandardDeviation()));
// Histogram of offset, variance and gain
final int bins = 2 * HistogramPlot.getBinsSturgesRule(pixelGain.length);
final WindowOrganiser wo = new WindowOrganiser();
showHistogram("Offset (ADU)", pixelOffset, bins, statsOffset, wo);
showHistogram("Variance (ADU^2)", pixelVariance, bins, statsVariance, wo);
showHistogram("Gain (ADU/e)", pixelGain, bins, statsGain, wo);
wo.tile();
// Save
final float[] bias = SimpleArrayUtils.toFloat(pixelOffset);
final float[] variance = SimpleArrayUtils.toFloat(pixelVariance);
final float[] gain = SimpleArrayUtils.toFloat(pixelGain);
measuredStack = new ImageStack(width, height);
measuredStack.addSlice("Offset", bias);
measuredStack.addSlice("Variance", variance);
measuredStack.addSlice("Gain", gain);
final ExtendedGenericDialog egd = new ExtendedGenericDialog(TITLE);
egd.addMessage("Save the sCMOS camera model?");
if (settings.modelDirectory == null) {
settings.modelDirectory = settings.directory;
settings.modelName = "sCMOS Camera";
}
egd.addStringField("Model_name", settings.modelName, 30);
egd.addDirectoryField("Model_directory", settings.modelDirectory);
egd.showDialog();
if (!egd.wasCanceled()) {
settings.modelName = egd.getNextString();
settings.modelDirectory = egd.getNextString();
saveCameraModel(width, height, bias, gain, variance);
}
// Remove the status from the ij.io.ImageWriter class
IJ.showStatus("");
ImageJUtils.log("Analysis time = " + TextUtils.millisToString(System.currentTimeMillis() - start));
}
use of uk.ac.sussex.gdsc.core.logging.Ticker in project GDSC-SMLM by aherbert.
the class CmosAnalysis method simulate.
private void simulate() throws IOException {
// Create the offset, variance and gain for each pixel
final int n = settings.size * settings.size;
final float[] pixelOffset = new float[n];
final float[] pixelVariance = new float[n];
final float[] pixelGain = new float[n];
IJ.showStatus("Creating random per-pixel readout");
final long start = System.currentTimeMillis();
final UniformRandomProvider rg = UniformRandomProviders.create();
final DiscreteSampler pd = PoissonSamplerUtils.createPoissonSampler(rg, settings.offset);
final ContinuousSampler ed = SamplerUtils.createExponentialSampler(rg, settings.variance);
final SharedStateContinuousSampler gauss = SamplerUtils.createGaussianSampler(rg, settings.gain, settings.gainStdDev);
Ticker ticker = ImageJUtils.createTicker(n, 0);
for (int i = 0; i < n; i++) {
// Q. Should these be clipped to a sensible range?
pixelOffset[i] = pd.sample();
pixelVariance[i] = (float) ed.sample();
pixelGain[i] = (float) gauss.sample();
ticker.tick();
}
IJ.showProgress(1);
// Save to the directory as a stack
final ImageStack simulationStack = new ImageStack(settings.size, settings.size);
simulationStack.addSlice("Offset", pixelOffset);
simulationStack.addSlice("Variance", pixelVariance);
simulationStack.addSlice("Gain", pixelGain);
simulationImp = new ImagePlus("PerPixel", simulationStack);
// Only the info property is saved to the TIFF file
simulationImp.setProperty("Info", String.format("Offset=%s; Variance=%s; Gain=%s +/- %s", MathUtils.rounded(settings.offset), MathUtils.rounded(settings.variance), MathUtils.rounded(settings.gain), MathUtils.rounded(settings.gainStdDev)));
IJ.save(simulationImp, new File(settings.directory, "perPixelSimulation.tif").getPath());
// Create thread pool and workers
final int threadCount = getThreads();
final ExecutorService executor = Executors.newFixedThreadPool(threadCount);
final LocalList<Future<?>> futures = new LocalList<>(numberOfThreads);
// Simulate the exposure input.
final int[] photons = settings.getPhotons();
// For saving stacks
final int blockSize = 10;
int numberPerThread = (int) Math.ceil((double) settings.frames / numberOfThreads);
// Convert to fit the block size
numberPerThread = (int) Math.ceil((double) numberPerThread / blockSize) * blockSize;
final Pcg32 rng = Pcg32.xshrs(start);
// Note the bias is increased by 3-fold so add 2 to the length
ticker = Ticker.createStarted(new ImageJTrackProgress(true), (long) (photons.length + 2) * settings.frames, threadCount > 1);
for (final int p : photons) {
ImageJUtils.showStatus(() -> "Simulating " + TextUtils.pleural(p, "photon"));
// Create the directory
final Path out = Paths.get(settings.directory, String.format("photon%03d", p));
Files.createDirectories(out);
// Increase frames for bias image
final int frames = settings.frames * (p == 0 ? 3 : 1);
for (int from = 0; from < frames; ) {
final int to = Math.min(from + numberPerThread, frames);
futures.add(executor.submit(new SimulationWorker(ticker, rng.split(), out.toString(), simulationStack, from, to, blockSize, p)));
from = to;
}
ConcurrencyUtils.waitForCompletionUnchecked(futures);
futures.clear();
}
final String msg = "Simulation time = " + TextUtils.millisToString(System.currentTimeMillis() - start);
IJ.showStatus(msg);
ImageJUtils.clearSlowProgress();
executor.shutdown();
ImageJUtils.log(msg);
}
use of uk.ac.sussex.gdsc.core.logging.Ticker in project GDSC-SMLM by aherbert.
the class DriftCalculator method calculateDriftUsingImageStack.
/**
* Calculate the drift of images to the reference image. If no reference is provided then produce
* a combined z-projection. Update the current drift parameters.
*
* @param reference the reference
* @param images The images to align
* @param fhtImages The images to align (pre-transformed to a FHT)
* @param blockT The frame number for each image
* @param dx The X drift
* @param dy The Y drift
* @param originalDriftTimePoints Non-zero when the frame number refers to an aligned image frame
* @param smoothing the smoothing
* @param iterations the iterations
* @return The change in the drift (NaN is an error occurred)
*/
private double calculateDriftUsingImageStack(FloatProcessor reference, ImageProcessor[] images, Fht[] fhtImages, int[] blockT, double[] dx, double[] dy, double[] originalDriftTimePoints, double smoothing, int iterations) {
Ticker ticker = Ticker.createStarted(tracker, images.length, true);
if (reference == null) {
// Construct images using the current drift
tracker.status("Constructing reference image");
// Build an image using the current drift
final List<Future<?>> futures = new LinkedList<>();
ticker = Ticker.createStarted(tracker, images.length * 2L, true);
final ImageProcessor[] blockIp = new ImageProcessor[images.length];
final double[] threadDx = new double[images.length];
final double[] threadDy = new double[images.length];
for (int i = 0; i < images.length; i++) {
threadDx[i] = dx[blockT[i]];
threadDy[i] = dy[blockT[i]];
}
final int imagesPerThread = getImagesPerThread(images);
for (int i = 0; i < images.length; i += imagesPerThread) {
futures.add(executor.submit(new ImageTranslator(images, blockIp, threadDx, threadDy, i, i + imagesPerThread, ticker, settings.interpolationMethod)));
}
ConcurrencyUtils.waitForCompletionUnchecked(futures);
// Build an image with all results.
reference = new FloatProcessor(blockIp[0].getWidth(), blockIp[0].getHeight());
for (final ImageProcessor ip : blockIp) {
reference.copyBits(ip, 0, 0, Blitter.ADD);
}
}
// Ensure the reference is windowed
AlignImagesFft.applyWindowSeparable(reference, WindowMethod.TUKEY);
return calculateDrift(blockT, 1f, dx, dy, originalDriftTimePoints, smoothing, iterations, fhtImages, reference, false, ticker);
}
use of uk.ac.sussex.gdsc.core.logging.Ticker in project GDSC-SMLM by aherbert.
the class DriftCalculator method calculateUsingImageStack.
/**
* Calculates drift using images from a reference stack aligned to the overall z-projection.
*
* @param stack the stack
* @param limits the limits
* @return the drift { dx[], dy[] }
*/
@Nullable
private double[][] calculateUsingImageStack(ImageStack stack, int[] limits) {
// Update the limits using the stack size
final int upperT = settings.startFrame + settings.frameSpacing * (stack.getSize() - 1);
limits[1] = Math.max(limits[1], upperT);
// TODO - Truncate the stack if there are far too many frames for the localisation limits
tracker.status("Constructing images");
executor = Executors.newFixedThreadPool(Prefs.getThreads());
// Built an image and FHT image for each slice
final ImageProcessor[] images = new ImageProcessor[stack.getSize()];
final Fht[] fhtImages = new Fht[stack.getSize()];
final List<Future<?>> futures = new LinkedList<>();
final Ticker ticker = Ticker.createStarted(tracker, images.length, true);
final int imagesPerThread = getImagesPerThread(images);
final AlignImagesFft aligner = new AlignImagesFft();
final FloatProcessor referenceIp = stack.getProcessor(1).toFloat(0, null);
// We do not care about the window method because this processor will not
// actually be used for alignment, it is a reference for the FHT size
aligner.initialiseReference(referenceIp, WindowMethod.NONE, false);
for (int i = 0; i < images.length; i += imagesPerThread) {
futures.add(executor.submit(new ImageFhtInitialiser(stack, images, aligner, fhtImages, i, i + imagesPerThread, ticker)));
}
ConcurrencyUtils.waitForCompletionUnchecked(futures);
tracker.progress(1);
if (tracker.isEnded()) {
return null;
}
final double[] dx = new double[limits[1] + 1];
final double[] dy = new double[dx.length];
final double[] originalDriftTimePoints = new double[dx.length];
final int[] blockT = new int[stack.getSize()];
for (int i = 0, t = settings.startFrame; i < stack.getSize(); i++, t += settings.frameSpacing) {
originalDriftTimePoints[t] = 1;
blockT[i] = t;
}
final double smoothing = updateSmoothingParameter(originalDriftTimePoints);
lastdx = null;
// For the first iteration calculate drift to the first image in the stack
// (since the average projection may have a large drift blurring the image)
double change = calculateDriftUsingImageStack(referenceIp, images, fhtImages, blockT, dx, dy, originalDriftTimePoints, smoothing, settings.iterations);
if (Double.isNaN(change) || tracker.isEnded()) {
return null;
}
plotDrift(limits, dx, dy);
ImageJUtils.log("Drift Calculator : Initial drift " + MathUtils.rounded(change));
for (int i = 1; i <= settings.maxIterations; i++) {
change = calculateDriftUsingImageStack(null, images, fhtImages, blockT, dx, dy, originalDriftTimePoints, smoothing, settings.iterations);
if (Double.isNaN(change)) {
return null;
}
plotDrift(limits, dx, dy);
if (converged(i, change, getTotalDrift(dx, dy, originalDriftTimePoints))) {
break;
}
}
if (tracker.isEnded()) {
return null;
}
plotDrift(limits, dx, dy);
return new double[][] { dx, dy };
}
Aggregations