use of uk.ac.sussex.gdsc.smlm.data.config.UnitProtos.IntensityUnit in project GDSC-SMLM by aherbert.
the class CreateData method setNoise.
/**
* Sets the noise in the results if missing.
*
* @param results the results
* @param imp the imp
*/
private static void setNoise(MemoryPeakResults results, ImagePlus imp) {
// Loaded results do not have noise
if (results.hasNoise()) {
return;
}
IJ.showStatus("Estimating noise ...");
// Compute noise per frame
final ImageStack stack = imp.getImageStack();
final int width = stack.getWidth();
final int height = stack.getHeight();
final IJImageSource source = new IJImageSource(imp);
final float[] noise = new float[source.getFrames() + 1];
source.setReadHint(ReadHint.SEQUENTIAL);
source.open();
for (int slice = 1; slice < noise.length; slice++) {
final float[] data = source.next();
// Use the trimmed method as there may be a lot of spots in the frame
noise[slice] = FitWorker.estimateNoise(data, width, height, NoiseEstimatorMethod.QUICK_RESIDUALS_LEAST_TRIMMED_OF_SQUARES);
}
// Statistics stats = Statistics.create(Arrays.copyOfRange(noise, 1, noise.length));
// System.out.printf("Noise = %.3f +/- %.3f (%d)\n", stats.getMean(),
// stats.getStandardDeviation(), stats.getN());
// Convert noise units from counts to the result format
final TypeConverter<IntensityUnit> c = results.getIntensityConverter(IntensityUnit.COUNT);
for (int i = 0; i < noise.length; i++) {
noise[i] = c.convertBack(noise[i]);
}
results.forEach((PeakResultProcedure) result -> {
if (result.getFrame() < noise.length) {
result.setNoise(noise[result.getFrame()]);
}
});
}
use of uk.ac.sussex.gdsc.smlm.data.config.UnitProtos.IntensityUnit in project GDSC-SMLM by aherbert.
the class CreateData method showSimulationParametersDialog.
private SimulationParameters showSimulationParametersDialog(ImagePlus imp, MemoryPeakResults results) {
final int molecules = results.size();
// Get the missing parameters from the user
boolean fullSimulation = false;
double sd = -1;
if (!results.convertToPreferredUnits()) {
IJ.error(TITLE, String.format("Results should be in the preferred units (%s,%s)", UnitHelper.getName(MemoryPeakResults.PREFERRED_DISTANCE_UNIT), UnitHelper.getName(MemoryPeakResults.PREFERRED_INTENSITY_UNIT)));
return null;
}
// Get these from the data
final RawResultProcedure sp = new RawResultProcedure(results);
sp.getBixyz();
final float[] signal = sp.intensity;
float[] limits = MathUtils.limits(signal);
final double minSignal = limits[0];
final double maxSignal = limits[1];
final double signalPerFrame = MathUtils.sum(signal) / molecules;
final float[] depths = sp.z;
limits = MathUtils.limits(depths);
float depth = Math.max(Math.abs(limits[0]), Math.abs(limits[1]));
final boolean fixedDepth = Double.compare(limits[0], limits[1]) == 0;
final CalibrationWriter cal = results.getCalibrationWriter();
final String iUnits = " " + UnitHelper.getName(cal.getIntensityUnit());
final String zUnits = " " + UnitHelper.getName(cal.getDistanceUnit());
// Get this from the user
double background = -1;
// Use last simulation parameters for missing settings.
// This is good if we are re-running the plugin to load data.
Rectangle lastCameraBounds = null;
if (simulationParameters != null && simulationParameters.isLoaded()) {
fullSimulation = simulationParameters.fullSimulation;
sd = simulationParameters.sd;
background = simulationParameters.background;
if (!cal.hasBias()) {
cal.setBias(simulationParameters.bias);
}
if (!cal.hasCountPerPhoton()) {
cal.setCountPerPhoton(simulationParameters.gain);
}
if (!cal.hasQuantumEfficiency()) {
cal.setQuantumEfficiency(simulationParameters.qe);
}
if (!cal.hasReadNoise()) {
cal.setReadNoise(simulationParameters.readNoise);
}
if (!cal.hasCameraType()) {
cal.setCameraType(simulationParameters.cameraType);
}
if (!cal.hasNmPerPixel()) {
cal.setNmPerPixel(simulationParameters.pixelPitch);
}
if (!cal.hasCameraModelName()) {
cal.setCameraModelName(simulationParameters.cameraModelName);
}
lastCameraBounds = simulationParameters.cameraBounds;
}
// Show a dialog to confirm settings
final ExtendedGenericDialog gd = new ExtendedGenericDialog(TITLE);
final StringBuilder sb = new StringBuilder();
sb.append("Results contain ").append(TextUtils.pleural(molecules, "molecule")).append('\n');
sb.append("Min signal = ").append(MathUtils.rounded(minSignal)).append(iUnits).append('\n');
sb.append("Max signal = ").append(MathUtils.rounded(maxSignal)).append(iUnits).append('\n');
sb.append("Av signal = ").append(MathUtils.rounded(signalPerFrame)).append(iUnits).append('\n');
if (fixedDepth) {
sb.append("Fixed depth = ").append(MathUtils.rounded(depth)).append(zUnits).append('\n');
}
gd.addMessage(sb.toString());
gd.addCheckbox("Flourophore_simulation", fullSimulation);
gd.addNumericField("Gaussian_SD", sd, 3, 8, "nm");
gd.addNumericField("Pixel_pitch", cal.getNmPerPixel(), 3, 8, "nm");
gd.addNumericField("Background", background, 3, 8, "photon");
// Camera type does not need the full simulation settings. Plus the units are different
// so just re-implement.
gd.addChoice("Camera_type", SettingsManager.getCameraTypeNames(), CalibrationProtosHelper.getName(cal.getCameraType()), new OptionListener<Integer>() {
@Override
public boolean collectOptions(Integer field) {
cal.setCameraType(SettingsManager.getCameraTypeValues()[field]);
return collectOptions(false);
}
@Override
public boolean collectOptions() {
return collectOptions(true);
}
private boolean collectOptions(boolean silent) {
final CameraType cameraType = cal.getCameraType();
final boolean isCcd = CalibrationProtosHelper.isCcdCameraType(cameraType);
final ExtendedGenericDialog egd = new ExtendedGenericDialog(TITLE, null);
if (isCcd) {
egd.addNumericField("Total_gain", cal.getCountPerPhoton(), 3, 8, "count/photon");
egd.addNumericField("Quantum_efficiency", cal.getQuantumEfficiency(), 3, 8, "e-/photon");
egd.addNumericField("Read_noise", cal.getReadNoise(), 3, 8, "count");
egd.addNumericField("Bias", cal.getBias(), 3, 8, "count");
} else if (cameraType == CameraType.SCMOS) {
final String[] models = CameraModelManager.listCameraModels(true);
egd.addChoice("Camera_model_name", models, cal.getCameraModelName());
egd.addNumericField("Quantum_efficiency", cal.getQuantumEfficiency(), 2, 6, "electron/photon");
} else {
IJ.error("Unsupported camera type " + CalibrationProtosHelper.getName(cameraType));
return false;
}
egd.setSilent(silent);
egd.showDialog(true, gd);
if (egd.wasCanceled()) {
return false;
}
if (isCcd) {
cal.setCountPerPhoton(egd.getNextNumber());
cal.setQuantumEfficiency(egd.getNextNumber());
cal.setReadNoise(egd.getNextNumber());
cal.setBias(egd.getNextNumber());
} else if (cameraType == CameraType.SCMOS) {
cal.setCameraModelName(egd.getNextChoice());
cal.setQuantumEfficiency(Math.abs(egd.getNextNumber()));
}
return true;
}
});
if (!fixedDepth) {
gd.addNumericField("Depth", depth, 3, 8, "pixel");
}
gd.addHelp(HelpUrls.getUrl("load-benchmark-data"));
gd.showDialog();
if (gd.wasCanceled()) {
return null;
}
fullSimulation = gd.getNextBoolean();
sd = gd.getNextNumber();
cal.setNmPerPixel(gd.getNextNumber());
background = gd.getNextNumber();
cal.setCameraType(SettingsManager.getCameraTypeValues()[gd.getNextChoiceIndex()]);
float myDepth = depth;
if (!fixedDepth) {
myDepth = (float) gd.getNextNumber();
if (myDepth < depth) {
IJ.error(TITLE, String.format("Input depth is smaller than the depth guessed from the data: %f < %f", myDepth, depth));
return null;
}
depth = myDepth;
}
gd.collectOptions();
// Validate settings
Rectangle modelBounds = null;
try {
ParameterUtils.isAboveZero("Gaussian SD", sd);
ParameterUtils.isAboveZero("Pixel pitch", cal.getNmPerPixel());
ParameterUtils.isPositive("Background", background);
ParameterUtils.isAboveZero("Quantum efficiency", cal.getQuantumEfficiency());
ParameterUtils.isEqualOrBelow("Quantum efficiency", cal.getQuantumEfficiency(), 1);
if (cal.isCcdCamera()) {
ParameterUtils.isAboveZero("Total gain", cal.getCountPerPhoton());
ParameterUtils.isPositive("Read noise", cal.getReadNoise());
ParameterUtils.isPositive("Bias", cal.getBias());
} else if (cal.isScmos()) {
// Load the model
cameraModel = CameraModelManager.load(cal.getCameraModelName());
if (cameraModel == null) {
IJ.error(TITLE, "Unknown camera model for name: " + cal.getCameraModelName());
return null;
}
int ox = 0;
int oy = 0;
if (lastCameraBounds != null) {
ox = lastCameraBounds.x;
oy = lastCameraBounds.y;
}
cameraModel = PeakFit.cropCameraModel(cameraModel, new Rectangle(ox, oy, imp.getWidth(), imp.getHeight()), null, false);
modelBounds = cameraModel.getBounds();
final IJImageSource imageSource = (IJImageSource) results.getSource();
imageSource.setOrigin(modelBounds.x, modelBounds.y);
cal.clearGlobalCameraSettings();
} else {
IJ.error(TITLE, "Unknown camera type: " + cal.getCameraType());
return null;
}
} catch (final IllegalArgumentException ex) {
IJ.error(TITLE, ex.getMessage());
return null;
}
// Store calibration
results.setCalibration(cal.getCalibration());
final double a = cal.getNmPerPixel();
final double bias = cal.getBias();
final double gain = cal.getCountPerPhoton();
final double readNoise = cal.getReadNoise();
final double qe = cal.getQuantumEfficiency();
// Note: The calibration will throw an exception if the converter cannot be created.
// This is OK as the data will be invalid.
// Convert +/- depth to total depth in nm
depth = cal.getDistanceConverter(DistanceUnit.NM).convert(depth * 2);
// Compute total background variance in photons
final double backgroundVariance = background;
// Do not add EM-CCD noise factor. The Mortensen formula also includes this factor
// so this is "double-counting" the EM-CCD.
// if (emCCD)
// backgroundVariance *= 2;
// Read noise is in ADUs. Convert to Photons to get contribution to background variance
final double readNoiseInPhotons = readNoise / gain;
// Get the expected value at each pixel in photons. Assuming a Poisson distribution this
// is equal to the total variance at the pixel.
final double b2 = backgroundVariance + readNoiseInPhotons * readNoiseInPhotons;
// Convert values to photons
final TypeConverter<IntensityUnit> ic = cal.getIntensityConverter(IntensityUnit.PHOTON);
final SimulationParameters p = new SimulationParameters(molecules, fullSimulation, sd, a, ic.convert(minSignal), ic.convert(maxSignal), ic.convert(signalPerFrame), depth, fixedDepth, bias, gain, qe, readNoise, cal.getCameraType(), cal.getCameraModelName(), modelBounds, background, b2, createPsf(sd / a));
p.loaded = true;
return p;
}
use of uk.ac.sussex.gdsc.smlm.data.config.UnitProtos.IntensityUnit in project GDSC-SMLM by aherbert.
the class PcPalmMolecules method traceMolecules.
/**
* Trace localisations.
*
* @param results The results
* @param precisions the precisions
* @param distance The distance threshold (nm)
* @param time The time threshold (frames)
* @param singles a list of the singles (not grouped into molecules)
* @return a list of molecules
*/
private static ArrayList<Molecule> traceMolecules(MemoryPeakResults results, double[] precisions, double distance, int time, ArrayList<Molecule> singles) {
// These plugins are not really supported so just leave them to throw an exception if
// the data cannot be handled
final TypeConverter<IntensityUnit> ic = results.getCalibrationReader().getIntensityConverter(IntensityUnit.PHOTON);
final TypeConverter<DistanceUnit> dc = results.getCalibrationReader().getDistanceConverter(DistanceUnit.NM);
// Create a new dataset with the precision
final MemoryPeakResults results2 = new MemoryPeakResults(results.size());
for (int i = 0, size = results.size(); i < size; i++) {
final AttributePeakResult peak2 = new AttributePeakResult(results.get(i));
peak2.setPrecision(precisions[i]);
results2.add(peak2);
}
final TraceManager tm = new TraceManager(results2);
final double distanceThreshold = dc.convertBack(distance);
tm.traceMolecules(distanceThreshold, time);
final Trace[] traces = tm.getTraces();
final ArrayList<Molecule> molecules = new ArrayList<>(traces.length);
for (final Trace t : traces) {
final double p = t.getLocalisationPrecision(dc);
final float[] centroid = t.getCentroid();
final List<Molecule> list = t.size() == 1 ? singles : molecules;
list.add(new Molecule(dc.convert(centroid[0]), dc.convert(centroid[1]), p, ic.convert(t.getSignal())));
}
log(" %d localisations traced to %d molecules (%d singles, %d traces) using d=%.2f nm," + " t=%d frames (%s s)", results.size(), molecules.size() + singles.size(), singles.size(), molecules.size(), distance, time, MathUtils.rounded(time * results.getCalibrationReader().getExposureTime() / 1000.0));
return molecules;
}
use of uk.ac.sussex.gdsc.smlm.data.config.UnitProtos.IntensityUnit in project GDSC-SMLM by aherbert.
the class FitEngineConfiguration method configureOutputUnits.
/**
* Configure the output units from fitting using the current calibration and fit solver settings.
*
* <p>This method should be called before the calibration is passed to any object that will handle
* the fitting output.
*
* <p>It will update the calibration units and the precision method to match that used by
* precision filter.
*/
public void configureOutputUnits() {
final FitConfiguration fitConfig = getFitConfiguration();
// If there is no calibration then the writer will just have the defaults
final CalibrationWriter calibration = fitConfig.getCalibrationWriterReference();
// Fitting is always done pixels and radians
calibration.setDistanceUnit(DistanceUnit.PIXEL);
calibration.setAngleUnit(AngleUnit.RADIAN);
// Most fitters fit in photons unless we have no calibration.
IntensityUnit intensityUnit = IntensityUnit.PHOTON;
if (fitConfig.isFitCameraCounts()) {
intensityUnit = IntensityUnit.COUNT;
}
calibration.setIntensityUnit(intensityUnit);
// This initialises the calibration precision method
fitConfig.getFilterPrecisionMethod();
}
use of uk.ac.sussex.gdsc.smlm.data.config.UnitProtos.IntensityUnit in project GDSC-SMLM by aherbert.
the class IntensityUnitTest method check.
private static void check(double offset, double countPerPhoton, ExpectedUnit<IntensityUnit>... expectedUnits) {
final int n = expectedUnits.length;
TypeConverter<IntensityUnit> conv;
for (int i = 0; i < n; i++) {
final IntensityUnit u1 = expectedUnits[i].unit;
final double v1 = expectedUnits[i].value;
for (int j = 0; j < n; j++) {
final IntensityUnit u2 = expectedUnits[j].unit;
conv = UnitConverterUtils.createConverter(u1, u2, offset, countPerPhoton);
final double o = conv.convert(v1);
Assertions.assertEquals(expectedUnits[j].value, o, 1e-5, () -> u1 + " to " + u2);
}
}
}
Aggregations