use of gnu.trove.procedure.TIntProcedure in project GDSC-SMLM by aherbert.
the class ResultsMatchCalculator method getTimepoints.
/**
* Merge the time points from each map into a single sorted list of unique time points
*
* @param actualCoordinates
* @param predictedCoordinates
* @return a list of time points
*/
private static int[] getTimepoints(TIntObjectHashMap<ArrayList<Coordinate>> actualCoordinates, TIntObjectHashMap<ArrayList<Coordinate>> predictedCoordinates) {
//int[] set = SimpleArrayUtils.merge(actualCoordinates.keys(), predictedCoordinates.keys(), true);
// Do inline to avoid materialising the keys arrays
final TIntHashSet hashset = new TIntHashSet(Math.max(actualCoordinates.size(), predictedCoordinates.size()));
final TIntProcedure p = new TIntProcedure() {
public boolean execute(int value) {
hashset.add(value);
return true;
}
};
actualCoordinates.forEachKey(p);
predictedCoordinates.forEachKey(p);
int[] set = hashset.toArray();
Arrays.sort(set);
return set;
}
use of gnu.trove.procedure.TIntProcedure in project GDSC-SMLM by aherbert.
the class BlinkEstimatorTest method findOptimalFittedPoints.
@Test
public void findOptimalFittedPoints() {
int particles = 1000;
double fixedFraction = 1;
for (boolean timeAtLowerBound : new boolean[] { false }) {
final int[] count = new int[MAX_FITTED_POINTS + 1];
int tests = 0;
for (int run = 0; run < 3; run++) {
for (double n : nBlinks) {
for (int i = 0; i < tOn.length; i++) {
tests++;
TIntHashSet ok = estimateBlinking(n, tOn[i], tOff[i], particles, fixedFraction, timeAtLowerBound, false);
ok.forEach(new TIntProcedure() {
public boolean execute(int value) {
count[value]++;
return true;
}
});
}
}
}
System.out.printf("Time@LowerBound = %b\n", timeAtLowerBound);
for (int nFittedPoints = MIN_FITTED_POINTS; nFittedPoints <= MAX_FITTED_POINTS; nFittedPoints++) {
System.out.printf("%2d = %2d/%2d |", nFittedPoints, count[nFittedPoints], tests);
for (int i = 0; i < count[nFittedPoints]; i++) System.out.printf("-");
System.out.printf("\n");
}
}
}
use of gnu.trove.procedure.TIntProcedure in project GDSC-SMLM by aherbert.
the class BenchmarkSmartSpotRanking method run.
private void run() {
// Extract all the results in memory into a list per frame. This can be cached
boolean refresh = false;
if (lastId != simulationParameters.id) {
// Do not get integer coordinates
// The Coordinate objects will be PeakResultPoint objects that store the original PeakResult
// from the MemoryPeakResults
actualCoordinates = ResultsMatchCalculator.getCoordinates(results.getResults(), false);
lastId = simulationParameters.id;
refresh = true;
}
// Extract all the candidates into a list per frame. This can be cached if the settings have not changed
if (refresh || lastFilterId != BenchmarkSpotFilter.filterResult.id || lastFractionPositives != fractionPositives || lastFractionNegativesAfterAllPositives != fractionNegativesAfterAllPositives || lastNegativesAfterAllPositives != negativesAfterAllPositives) {
filterCandidates = subsetFilterResults(BenchmarkSpotFilter.filterResult.filterResults);
lastFilterId = BenchmarkSpotFilter.filterResult.id;
lastFractionPositives = fractionPositives;
lastFractionNegativesAfterAllPositives = fractionNegativesAfterAllPositives;
lastNegativesAfterAllPositives = negativesAfterAllPositives;
}
final ImageStack stack = imp.getImageStack();
// Create a pool of workers
final int nThreads = Prefs.getThreads();
final BlockingQueue<Integer> jobs = new ArrayBlockingQueue<Integer>(nThreads * 2);
List<Worker> workers = new LinkedList<Worker>();
List<Thread> threads = new LinkedList<Thread>();
for (int i = 0; i < nThreads; i++) {
Worker worker = new Worker(jobs, stack, actualCoordinates, filterCandidates);
Thread t = new Thread(worker);
workers.add(worker);
threads.add(t);
t.start();
}
// Process the frames
totalProgress = filterCandidates.size();
stepProgress = Utils.getProgressInterval(totalProgress);
progress = 0;
filterCandidates.forEachKey(new TIntProcedure() {
public boolean execute(int value) {
put(jobs, value);
return true;
}
});
// Finish all the worker threads by passing in a null job
for (int i = 0; i < threads.size(); i++) {
put(jobs, -1);
}
// Wait for all to finish
for (int i = 0; i < threads.size(); i++) {
try {
threads.get(i).join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
threads.clear();
IJ.showProgress(1);
if (Utils.isInterrupted()) {
IJ.showStatus("Aborted");
return;
}
IJ.showStatus("Collecting results ...");
rankResultsId++;
rankResults = new TIntObjectHashMap<RankResults>();
for (Worker w : workers) {
rankResults.putAll(w.results);
}
summariseResults(rankResults);
IJ.showStatus("");
}
use of gnu.trove.procedure.TIntProcedure in project GDSC-SMLM by aherbert.
the class BenchmarkSpotFilter method run.
private BenchmarkFilterResult run(FitEngineConfiguration config, boolean relativeDistances, boolean batchSummary) {
if (Utils.isInterrupted())
return null;
MaximaSpotFilter spotFilter = config.createSpotFilter(relativeDistances);
// Extract all the results in memory into a list per frame. This can be cached
if (// || lastRelativeDistances != relativeDistances)
lastId != simulationParameters.id) {
// Always use float coordinates.
// The Worker adds a pixel offset for the spot coordinates.
TIntObjectHashMap<ArrayList<Coordinate>> coordinates = ResultsMatchCalculator.getCoordinates(results.getResults(), false);
actualCoordinates = new TIntObjectHashMap<PSFSpot[]>();
lastId = simulationParameters.id;
//lastRelativeDistances = relativeDistances;
// Store these so we can reset them
final int total = totalProgress;
final String prefix = progressPrefix;
// Spot PSFs may overlap so we must determine the amount of signal overlap and amplitude effect
// for each spot...
IJ.showStatus("Computing PSF overlap ...");
final int nThreads = Prefs.getThreads();
final BlockingQueue<Integer> jobs = new ArrayBlockingQueue<Integer>(nThreads * 2);
List<OverlapWorker> workers = new LinkedList<OverlapWorker>();
List<Thread> threads = new LinkedList<Thread>();
for (int i = 0; i < nThreads; i++) {
OverlapWorker worker = new OverlapWorker(jobs, coordinates);
Thread t = new Thread(worker);
workers.add(worker);
threads.add(t);
t.start();
}
// Process the frames
totalProgress = coordinates.size();
stepProgress = Utils.getProgressInterval(totalProgress);
progress = 0;
coordinates.forEachKey(new TIntProcedure() {
public boolean execute(int value) {
put(jobs, value);
return true;
}
});
// Finish all the worker threads by passing in a null job
for (int i = 0; i < threads.size(); i++) {
put(jobs, -1);
}
// Wait for all to finish
for (int i = 0; i < threads.size(); i++) {
try {
threads.get(i).join();
} catch (InterruptedException e) {
e.printStackTrace();
}
actualCoordinates.putAll(workers.get(i).coordinates);
}
threads.clear();
IJ.showProgress(-1);
IJ.showStatus("");
setupProgress(total, prefix);
}
if (!batchMode)
IJ.showStatus("Computing results ...");
final ImageStack stack = imp.getImageStack();
float background = 0;
if (spotFilter.isAbsoluteIntensity()) {
// To allow the signal factor to be computed we need to lower the image by the background so
// that the intensities correspond to the results amplitude.
// Just assume the background is uniform.
double sum = 0;
for (PeakResult r : results) sum += r.getBackground();
background = (float) (sum / results.size());
}
// Create a pool of workers
final int nThreads = Prefs.getThreads();
BlockingQueue<Integer> jobs = new ArrayBlockingQueue<Integer>(nThreads * 2);
List<Worker> workers = new LinkedList<Worker>();
List<Thread> threads = new LinkedList<Thread>();
for (int i = 0; i < nThreads; i++) {
Worker worker = new Worker(jobs, stack, spotFilter, background);
Thread t = new Thread(worker);
workers.add(worker);
threads.add(t);
t.start();
}
// Fit the frames
for (int i = 1; i <= stack.getSize(); i++) {
put(jobs, i);
}
// Finish all the worker threads by passing in a null job
for (int i = 0; i < threads.size(); i++) {
put(jobs, -1);
}
// Wait for all to finish
for (int i = 0; i < threads.size(); i++) {
try {
threads.get(i).join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
threads.clear();
if (Utils.isInterrupted())
return null;
if (!batchMode) {
IJ.showProgress(-1);
IJ.showStatus("Collecting results ...");
}
TIntObjectHashMap<FilterResult> filterResults = new TIntObjectHashMap<FilterResult>();
time = 0;
for (Worker w : workers) {
time += w.time;
filterResults.putAll(w.results);
}
// Show a table of the results
BenchmarkFilterResult filterResult = summariseResults(filterResults, config, spotFilter, relativeDistances, batchSummary);
if (!batchMode)
IJ.showStatus("");
return filterResult;
}
use of gnu.trove.procedure.TIntProcedure in project cogcomp-nlp by CogComp.
the class CoreferenceView method getCanonicalEntities.
@Deprecated
public Set<Constituent> getCanonicalEntities() {
findCanonicalEntries();
final Set<Constituent> cc = new HashSet<>();
this.canonicalEntitiesMap.forEachKey(new TIntProcedure() {
@Override
public boolean execute(int value) {
return cc.add(constituents.get(value));
}
});
return cc;
}
Aggregations