use of gdsc.smlm.ij.plugins.ResultsMatchCalculator.PeakResultPoint in project GDSC-SMLM by aherbert.
the class BenchmarkSmartSpotRanking method run.
private void run() {
// Extract all the results in memory into a list per frame. This can be cached
boolean refresh = false;
if (lastId != simulationParameters.id) {
// Do not get integer coordinates
// The Coordinate objects will be PeakResultPoint objects that store the original PeakResult
// from the MemoryPeakResults
actualCoordinates = ResultsMatchCalculator.getCoordinates(results.getResults(), false);
lastId = simulationParameters.id;
refresh = true;
}
// Extract all the candidates into a list per frame. This can be cached if the settings have not changed
if (refresh || lastFilterId != BenchmarkSpotFilter.filterResult.id || lastFractionPositives != fractionPositives || lastFractionNegativesAfterAllPositives != fractionNegativesAfterAllPositives || lastNegativesAfterAllPositives != negativesAfterAllPositives) {
filterCandidates = subsetFilterResults(BenchmarkSpotFilter.filterResult.filterResults);
lastFilterId = BenchmarkSpotFilter.filterResult.id;
lastFractionPositives = fractionPositives;
lastFractionNegativesAfterAllPositives = fractionNegativesAfterAllPositives;
lastNegativesAfterAllPositives = negativesAfterAllPositives;
}
final ImageStack stack = imp.getImageStack();
// Create a pool of workers
final int nThreads = Prefs.getThreads();
final BlockingQueue<Integer> jobs = new ArrayBlockingQueue<Integer>(nThreads * 2);
List<Worker> workers = new LinkedList<Worker>();
List<Thread> threads = new LinkedList<Thread>();
for (int i = 0; i < nThreads; i++) {
Worker worker = new Worker(jobs, stack, actualCoordinates, filterCandidates);
Thread t = new Thread(worker);
workers.add(worker);
threads.add(t);
t.start();
}
// Process the frames
totalProgress = filterCandidates.size();
stepProgress = Utils.getProgressInterval(totalProgress);
progress = 0;
filterCandidates.forEachKey(new TIntProcedure() {
public boolean execute(int value) {
put(jobs, value);
return true;
}
});
// Finish all the worker threads by passing in a null job
for (int i = 0; i < threads.size(); i++) {
put(jobs, -1);
}
// Wait for all to finish
for (int i = 0; i < threads.size(); i++) {
try {
threads.get(i).join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
threads.clear();
IJ.showProgress(1);
if (Utils.isInterrupted()) {
IJ.showStatus("Aborted");
return;
}
IJ.showStatus("Collecting results ...");
rankResultsId++;
rankResults = new TIntObjectHashMap<RankResults>();
for (Worker w : workers) {
rankResults.putAll(w.results);
}
summariseResults(rankResults);
IJ.showStatus("");
}
use of gdsc.smlm.ij.plugins.ResultsMatchCalculator.PeakResultPoint in project GDSC-SMLM by aherbert.
the class BenchmarkSpotFit method run.
private void run() {
// Extract all the results in memory into a list per frame. This can be cached
boolean refresh = false;
if (lastId != simulationParameters.id) {
// Do not get integer coordinates
// The Coordinate objects will be PeakResultPoint objects that store the original PeakResult
// from the MemoryPeakResults
actualCoordinates = ResultsMatchCalculator.getCoordinates(results.getResults(), false);
lastId = simulationParameters.id;
refresh = true;
}
// Extract all the candidates into a list per frame. This can be cached if the settings have not changed
final int width = (config.isIncludeNeighbours()) ? config.getRelativeFitting() : 0;
final Settings settings = new Settings(BenchmarkSpotFilter.filterResult.id, fractionPositives, fractionNegativesAfterAllPositives, negativesAfterAllPositives, width);
if (refresh || !settings.equals(lastSettings)) {
filterCandidates = subsetFilterResults(BenchmarkSpotFilter.filterResult.filterResults, width);
lastSettings = settings;
lastFilterId = BenchmarkSpotFilter.filterResult.id;
}
stopWatch = StopWatch.createStarted();
final ImageStack stack = imp.getImageStack();
clearFitResults();
// Save results to memory
MemoryPeakResults peakResults = new MemoryPeakResults();
peakResults.copySettings(this.results);
peakResults.setName(TITLE);
MemoryPeakResults.addResults(peakResults);
// Create a pool of workers
final int nThreads = Prefs.getThreads();
BlockingQueue<Integer> jobs = new ArrayBlockingQueue<Integer>(nThreads * 2);
List<Worker> workers = new LinkedList<Worker>();
List<Thread> threads = new LinkedList<Thread>();
for (int i = 0; i < nThreads; i++) {
Worker worker = new Worker(jobs, stack, actualCoordinates, filterCandidates, peakResults);
Thread t = new Thread(worker);
workers.add(worker);
threads.add(t);
t.start();
}
// Fit the frames
long runTime = System.nanoTime();
totalProgress = stack.getSize();
stepProgress = Utils.getProgressInterval(totalProgress);
progress = 0;
for (int i = 1; i <= totalProgress; i++) {
put(jobs, i);
}
// Finish all the worker threads by passing in a null job
for (int i = 0; i < threads.size(); i++) {
put(jobs, -1);
}
// Wait for all to finish
for (int i = 0; i < threads.size(); i++) {
try {
threads.get(i).join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
threads.clear();
IJ.showProgress(1);
runTime = System.nanoTime() - runTime;
if (Utils.isInterrupted()) {
return;
}
stopWatch.stop();
final String timeString = stopWatch.toString();
IJ.log("Spot fit time : " + timeString);
IJ.showStatus("Collecting results ...");
fitResultsId++;
fitResults = new TIntObjectHashMap<FilterCandidates>();
for (Worker w : workers) {
fitResults.putAll(w.results);
}
// Assign a unique ID to each result
int count = 0;
// Materialise into an array since we use it twice
FilterCandidates[] candidates = fitResults.values(new FilterCandidates[fitResults.size()]);
for (FilterCandidates result : candidates) {
for (int i = 0; i < result.fitResult.length; i++) {
final MultiPathFitResult fitResult = result.fitResult[i];
count += count(fitResult.getSingleFitResult());
count += count(fitResult.getMultiFitResult());
count += count(fitResult.getDoubletFitResult());
count += count(fitResult.getMultiDoubletFitResult());
}
}
PreprocessedPeakResult[] preprocessedPeakResults = new PreprocessedPeakResult[count];
count = 0;
for (FilterCandidates result : candidates) {
for (int i = 0; i < result.fitResult.length; i++) {
final MultiPathFitResult fitResult = result.fitResult[i];
count = store(fitResult.getSingleFitResult(), count, preprocessedPeakResults);
count = store(fitResult.getMultiFitResult(), count, preprocessedPeakResults);
count = store(fitResult.getDoubletFitResult(), count, preprocessedPeakResults);
count = store(fitResult.getMultiDoubletFitResult(), count, preprocessedPeakResults);
}
}
summariseResults(fitResults, runTime, preprocessedPeakResults, count);
IJ.showStatus("");
}
Aggregations