use of uk.ac.sussex.gdsc.smlm.fitting.linear.EjmlLinearSolver in project GDSC-SMLM by aherbert.
the class FisherInformationMatrix method invert.
private void invert() {
if (inverted != UNKNOWN) {
return;
}
if (matrix.numCols == 0) {
// Nothing to do
crlb = ArrayUtils.EMPTY_DOUBLE_ARRAY;
inverted = YES;
return;
}
inverted = NO;
// Matrix inversion
final EjmlLinearSolver solver = EjmlLinearSolver.createForInversion(inversionTolerance);
// Does not modify the matrix
final double[] result = solver.invertDiagonal(matrix);
if (result == null) {
return;
}
// Check all diagonal values are zero or above
if (inversionTolerance > 0) {
// Already checked so just ignore values just below zero
for (int i = matrix.numCols; i-- > 0; ) {
if (result[i] < 0) {
result[i] = 0;
}
}
} else {
// A small error is OK
for (int i = matrix.numCols; i-- > 0; ) {
if (result[i] <= 0) {
// Use the default tolerance since the user specified tolerance is not positive
if (result[i] > -DEFAULT_INVERSION_TOLERANCE) {
result[i] = 0;
continue;
}
// Not within tolerance
return;
}
}
}
inverted = YES;
this.crlb = result;
}
use of uk.ac.sussex.gdsc.smlm.fitting.linear.EjmlLinearSolver in project GDSC-SMLM by aherbert.
the class LsqLvmGradientProcedureTest method gradientProcedureComputesSameOutputWithBias.
@SeededTest
void gradientProcedureComputesSameOutputWithBias(RandomSeed seed) {
final ErfGaussian2DFunction func = new SingleFreeCircularErfGaussian2DFunction(blockWidth, blockWidth);
final int nparams = func.getNumberOfGradients();
final int iter = 100;
final Level logLevel = Level.FINER;
final boolean debug = logger.isLoggable(logLevel);
final ArrayList<double[]> paramsList = new ArrayList<>(iter);
final ArrayList<double[]> yList = new ArrayList<>(iter);
final ArrayList<double[]> alphaList = new ArrayList<>(iter);
final ArrayList<double[]> betaList = new ArrayList<>(iter);
final ArrayList<double[]> xList = new ArrayList<>(iter);
// Manipulate the background
final double defaultBackground = background;
try {
background = 1e-2;
createData(RngUtils.create(seed.getSeed()), 1, iter, paramsList, yList, true);
final EjmlLinearSolver solver = new EjmlLinearSolver(1e-5, 1e-6);
for (int i = 0; i < paramsList.size(); i++) {
final double[] y = yList.get(i);
final double[] a = paramsList.get(i);
final BaseLsqLvmGradientProcedure p = LsqLvmGradientProcedureUtils.create(y, func);
p.gradient(a);
final double[] beta = p.beta;
alphaList.add(p.getAlphaLinear());
betaList.add(beta.clone());
for (int j = 0; j < nparams; j++) {
if (Math.abs(beta[j]) < 1e-6) {
logger.log(TestLogUtils.getRecord(Level.INFO, "[%d] Tiny beta %s %g", i, func.getGradientParameterName(j), beta[j]));
}
}
// Solve
if (!solver.solve(p.getAlphaMatrix(), beta)) {
throw new AssertionError();
}
xList.add(beta);
// System.out.println(Arrays.toString(beta));
}
// for (int b = 1; b < 1000; b *= 2)
for (final double b : new double[] { -500, -100, -10, -1, -0.1, 0, 0.1, 1, 10, 100, 500 }) {
final Statistics[] rel = new Statistics[nparams];
final Statistics[] abs = new Statistics[nparams];
if (debug) {
for (int i = 0; i < nparams; i++) {
rel[i] = new Statistics();
abs[i] = new Statistics();
}
}
for (int i = 0; i < paramsList.size(); i++) {
final double[] y = add(yList.get(i), b);
final double[] a = paramsList.get(i).clone();
a[0] += b;
final BaseLsqLvmGradientProcedure p = LsqLvmGradientProcedureUtils.create(y, func);
p.gradient(a);
final double[] beta = p.beta;
final double[] alpha2 = alphaList.get(i);
final double[] beta2 = betaList.get(i);
final double[] x2 = xList.get(i);
Assertions.assertArrayEquals(beta2, beta, 1e-10, "Beta");
Assertions.assertArrayEquals(alpha2, p.getAlphaLinear(), 1e-10, "Alpha");
// Solve
solver.solve(p.getAlphaMatrix(), beta);
Assertions.assertArrayEquals(x2, beta, 1e-10, "X");
if (debug) {
for (int j = 0; j < nparams; j++) {
rel[j].add(DoubleEquality.relativeError(x2[j], beta[j]));
abs[j].add(Math.abs(x2[j] - beta[j]));
}
}
}
if (debug) {
for (int i = 0; i < nparams; i++) {
logger.log(TestLogUtils.getRecord(logLevel, "Bias = %.2f : %s : Rel %g +/- %g: Abs %g +/- %g", b, func.getGradientParameterName(i), rel[i].getMean(), rel[i].getStandardDeviation(), abs[i].getMean(), abs[i].getStandardDeviation()));
}
}
}
} finally {
background = defaultBackground;
}
}
use of uk.ac.sussex.gdsc.smlm.fitting.linear.EjmlLinearSolver in project GDSC-SMLM by aherbert.
the class LseBaseFunctionSolver method variance.
/**
* Compute the variance of the parameters of the function assuming a least squares fit of a
* Poisson process.
*
* <p>Uses the Mortensen formula (Mortensen, et al (2010) Nature Methods 7, 377-383), equation 25.
*
* <p>The method involves inversion of a matrix and may fail.
*
* <pre>
* I = sum_i { Ei,a * Ei,b }
* E = sum_i { Ei * Ei,a * Ei,b }
* with
* i the number of data points fit using least squares using a function of n variable parameters
* Ei the expected value of the function at i
* Ei,a the gradient the function at i with respect to parameter a
* Ei,b the gradient the function at i with respect to parameter b
* </pre>
*
* @param I the Iab matrix
* @param E the Ei_Eia_Eib matrix
* @return the variance (or null)
*/
public static double[] variance(double[][] I, double[][] E) {
final int n = I.length;
// Invert the matrix
final EjmlLinearSolver solver = EjmlLinearSolver.createForInversion(1e-2);
if (!solver.invert(I)) {
return null;
}
// Note that I now refers to I^-1 in the Mortensen notation
final double[] covar = new double[n];
for (int a = 0; a < n; a++) {
// Note: b==a as we only do the diagonal
double var = 0;
for (int ap = 0; ap < n; ap++) {
for (int bp = 0; bp < n; bp++) {
var += I[a][ap] * E[ap][bp] * I[bp][a];
}
}
covar[a] = var;
}
return covar;
}
use of uk.ac.sussex.gdsc.smlm.fitting.linear.EjmlLinearSolver in project GDSC-SMLM by aherbert.
the class GradientCalculatorSpeedTest method gradientCalculatorComputesSameOutputWithBias.
@SeededTest
void gradientCalculatorComputesSameOutputWithBias(RandomSeed seed) {
final Gaussian2DFunction func = new SingleEllipticalGaussian2DFunction(blockWidth, blockWidth);
final int nparams = func.getNumberOfGradients();
final GradientCalculator calc = new GradientCalculator(nparams);
final int n = func.size();
final int iter = 50;
final ArrayList<double[]> paramsList = new ArrayList<>(iter);
final ArrayList<double[]> yList = new ArrayList<>(iter);
final ArrayList<double[][]> alphaList = new ArrayList<>(iter);
final ArrayList<double[]> betaList = new ArrayList<>(iter);
final ArrayList<double[]> xList = new ArrayList<>(iter);
// Manipulate the background
final double defaultBackground = background;
final boolean report = logger.isLoggable(Level.INFO);
try {
background = 1e-2;
createData(RngUtils.create(seed.getSeed()), 1, iter, paramsList, yList, true);
final EjmlLinearSolver solver = new EjmlLinearSolver(1e-5, 1e-6);
for (int i = 0; i < paramsList.size(); i++) {
final double[] y = yList.get(i);
final double[] a = paramsList.get(i);
final double[][] alpha = new double[nparams][nparams];
final double[] beta = new double[nparams];
calc.findLinearised(n, y, a, alpha, beta, func);
alphaList.add(alpha);
betaList.add(beta.clone());
for (int j = 0; j < nparams; j++) {
if (Math.abs(beta[j]) < 1e-6) {
logger.info(FunctionUtils.getSupplier("[%d] Tiny beta %s %g", i, func.getGradientParameterName(j), beta[j]));
}
}
// Solve
if (!solver.solve(alpha, beta)) {
throw new AssertionError();
}
xList.add(beta);
// System.out.println(Arrays.toString(beta));
}
final double[][] alpha = new double[nparams][nparams];
final double[] beta = new double[nparams];
final Statistics[] rel = new Statistics[nparams];
final Statistics[] abs = new Statistics[nparams];
for (int i = 0; i < nparams; i++) {
rel[i] = new Statistics();
abs[i] = new Statistics();
}
final DoubleDoubleBiPredicate predicate = TestHelper.doublesAreClose(1e-10, 0);
// for (double b : new double[] { -500, -100, -10, -1, -0.1, 0.1, 1, 10, 100, 500 })
for (final double b : new double[] { -10, -1, -0.1, 0.1, 1, 10 }) {
if (report) {
for (int i = 0; i < nparams; i++) {
rel[i].reset();
abs[i].reset();
}
}
for (int i = 0; i < paramsList.size(); i++) {
final double[] y = add(yList.get(i), b);
final double[] a = paramsList.get(i).clone();
a[0] += b;
calc.findLinearised(n, y, a, alpha, beta, func);
final double[][] alpha2 = alphaList.get(i);
final double[] beta2 = betaList.get(i);
final double[] x2 = xList.get(i);
TestAssertions.assertArrayTest(beta2, beta, predicate, "Beta");
TestAssertions.assertArrayTest(alpha2, alpha, predicate, "Alpha");
// Solve
solver.solve(alpha, beta);
Assertions.assertArrayEquals(x2, beta, 1e-10, "X");
if (report) {
for (int j = 0; j < nparams; j++) {
rel[j].add(DoubleEquality.relativeError(x2[j], beta[j]));
abs[j].add(Math.abs(x2[j] - beta[j]));
}
}
}
if (report) {
for (int i = 0; i < nparams; i++) {
logger.info(FunctionUtils.getSupplier("Bias = %.2f : %s : Rel %g +/- %g: Abs %g +/- %g", b, func.getGradientParameterName(i), rel[i].getMean(), rel[i].getStandardDeviation(), abs[i].getMean(), abs[i].getStandardDeviation()));
}
}
}
} finally {
background = defaultBackground;
}
}
use of uk.ac.sussex.gdsc.smlm.fitting.linear.EjmlLinearSolver in project GDSC-SMLM by aherbert.
the class LseBaseFunctionSolver method covariance.
// Allow I and E as they have special meaning in the formula.
// CHECKSTYLE.OFF: ParameterName
/**
* Compute the covariance matrix of the parameters of the function assuming a least squares fit of
* a Poisson process.
*
* <p>Uses the Mortensen formula (Mortensen, et al (2010) Nature Methods 7, 377-383), equation 25.
*
* <p>The method involves inversion of a matrix and may fail.
*
* <pre>
* I = sum_i { Ei,a * Ei,b }
* E = sum_i { Ei * Ei,a * Ei,b }
* with
* i the number of data points fit using least squares using a function of n variable parameters
* Ei the expected value of the function at i
* Ei,a the gradient the function at i with respect to parameter a
* Ei,b the gradient the function at i with respect to parameter b
* </pre>
*
* @param I the Iab matrix
* @param E the Ei_Eia_Eib matrix
* @return the covariance matrix (or null)
*/
public static double[][] covariance(double[][] I, double[][] E) {
final int n = I.length;
// Invert the matrix
final EjmlLinearSolver solver = EjmlLinearSolver.createForInversion(1e-2);
if (!solver.invert(I)) {
return null;
}
// Note that I now refers to I^-1 in the Mortensen notation
final double[][] covar = new double[n][n];
for (int a = 0; a < n; a++) {
for (int b = 0; b < n; b++) {
double var = 0;
for (int ap = 0; ap < n; ap++) {
for (int bp = 0; bp < n; bp++) {
var += I[a][ap] * E[ap][bp] * I[bp][b];
}
}
covar[a][b] = var;
}
}
return covar;
}
Aggregations