use of gdsc.smlm.fitting.linear.EJMLLinearSolver in project GDSC-SMLM by aherbert.
the class FisherInformationMatrix method invert.
private void invert() {
if (inverted != UNKNOWN)
return;
if (m.numCols == 0) {
// Nothing to do
crlb = new double[0];
inverted = YES;
return;
}
inverted = NO;
// Matrix inversion
EJMLLinearSolver solver = EJMLLinearSolver.createForInversion(inversionTolerance);
// Does not modify the matrix
double[] crlb = solver.invertDiagonal(m);
if (crlb != null) {
// Check all diagonal values are zero or above
if (inversionTolerance > 0) {
// Already checked so just ignore values just below zero
for (int i = m.numCols; i-- > 0; ) if (crlb[i] < 0)
crlb[i] = 0;
} else {
// A small error is OK
for (int i = m.numCols; i-- > 0; ) {
if (crlb[i] < 0) {
if (crlb[i] > -DEFAULT_INVERSION_TOLERANCE) {
crlb[i] = 0;
continue;
}
return;
}
}
}
// Check all diagonal values are zero or above
inverted = YES;
this.crlb = crlb;
}
}
use of gdsc.smlm.fitting.linear.EJMLLinearSolver in project GDSC-SMLM by aherbert.
the class LSQLVMGradientProcedureTest method gradientProcedureComputesSameOutputWithBias.
@Test
public void gradientProcedureComputesSameOutputWithBias() {
ErfGaussian2DFunction func = new SingleFreeCircularErfGaussian2DFunction(blockWidth, blockWidth);
int nparams = func.getNumberOfGradients();
int iter = 100;
rdg = new RandomDataGenerator(new Well19937c(30051977));
ArrayList<double[]> paramsList = new ArrayList<double[]>(iter);
ArrayList<double[]> yList = new ArrayList<double[]>(iter);
ArrayList<double[]> alphaList = new ArrayList<double[]>(iter);
ArrayList<double[]> betaList = new ArrayList<double[]>(iter);
ArrayList<double[]> xList = new ArrayList<double[]>(iter);
// Manipulate the background
double defaultBackground = Background;
try {
Background = 1e-2;
createData(1, iter, paramsList, yList, true);
EJMLLinearSolver solver = new EJMLLinearSolver(1e-5, 1e-6);
for (int i = 0; i < paramsList.size(); i++) {
double[] y = yList.get(i);
double[] a = paramsList.get(i);
BaseLSQLVMGradientProcedure p = LSQLVMGradientProcedureFactory.create(y, func);
p.gradient(a);
double[] beta = p.beta;
alphaList.add(p.getAlphaLinear());
betaList.add(beta.clone());
for (int j = 0; j < nparams; j++) {
if (Math.abs(beta[j]) < 1e-6)
System.out.printf("[%d] Tiny beta %s %g\n", i, func.getName(j), beta[j]);
}
// Solve
if (!solver.solve(p.getAlphaMatrix(), beta))
throw new AssertionError();
xList.add(beta);
//System.out.println(Arrays.toString(beta));
}
//for (int b = 1; b < 1000; b *= 2)
for (double b : new double[] { -500, -100, -10, -1, -0.1, 0, 0.1, 1, 10, 100, 500 }) {
Statistics[] rel = new Statistics[nparams];
Statistics[] abs = new Statistics[nparams];
for (int i = 0; i < nparams; i++) {
rel[i] = new Statistics();
abs[i] = new Statistics();
}
for (int i = 0; i < paramsList.size(); i++) {
double[] y = add(yList.get(i), b);
double[] a = paramsList.get(i).clone();
a[0] += b;
BaseLSQLVMGradientProcedure p = LSQLVMGradientProcedureFactory.create(y, func);
p.gradient(a);
double[] beta = p.beta;
double[] alpha2 = alphaList.get(i);
double[] beta2 = betaList.get(i);
double[] x2 = xList.get(i);
Assert.assertArrayEquals("Beta", beta2, beta, 1e-10);
Assert.assertArrayEquals("Alpha", alpha2, p.getAlphaLinear(), 1e-10);
// Solve
solver.solve(p.getAlphaMatrix(), beta);
Assert.assertArrayEquals("X", x2, beta, 1e-10);
for (int j = 0; j < nparams; j++) {
rel[j].add(DoubleEquality.relativeError(x2[j], beta[j]));
abs[j].add(Math.abs(x2[j] - beta[j]));
}
}
for (int i = 0; i < nparams; i++) System.out.printf("Bias = %.2f : %s : Rel %g +/- %g: Abs %g +/- %g\n", b, func.getName(i), rel[i].getMean(), rel[i].getStandardDeviation(), abs[i].getMean(), abs[i].getStandardDeviation());
}
} finally {
Background = defaultBackground;
}
}
use of gdsc.smlm.fitting.linear.EJMLLinearSolver in project GDSC-SMLM by aherbert.
the class GradientCalculatorSpeedTest method gradientCalculatorComputesSameOutputWithBias.
@Test
public void gradientCalculatorComputesSameOutputWithBias() {
Gaussian2DFunction func = new SingleEllipticalGaussian2DFunction(blockWidth, blockWidth);
int nparams = func.getNumberOfGradients();
GradientCalculator calc = new GradientCalculator(nparams);
int n = func.size();
int iter = 100;
rdg = new RandomDataGenerator(new Well19937c(30051977));
ArrayList<double[]> paramsList = new ArrayList<double[]>(iter);
ArrayList<double[]> yList = new ArrayList<double[]>(iter);
ArrayList<double[][]> alphaList = new ArrayList<double[][]>(iter);
ArrayList<double[]> betaList = new ArrayList<double[]>(iter);
ArrayList<double[]> xList = new ArrayList<double[]>(iter);
// Manipulate the background
double defaultBackground = Background;
try {
Background = 1e-2;
createData(1, iter, paramsList, yList, true);
EJMLLinearSolver solver = new EJMLLinearSolver(1e-5, 1e-6);
for (int i = 0; i < paramsList.size(); i++) {
double[] y = yList.get(i);
double[] a = paramsList.get(i);
double[][] alpha = new double[nparams][nparams];
double[] beta = new double[nparams];
calc.findLinearised(n, y, a, alpha, beta, func);
alphaList.add(alpha);
betaList.add(beta.clone());
for (int j = 0; j < nparams; j++) {
if (Math.abs(beta[j]) < 1e-6)
System.out.printf("[%d] Tiny beta %s %g\n", i, func.getName(j), beta[j]);
}
// Solve
if (!solver.solve(alpha, beta))
throw new AssertionError();
xList.add(beta);
//System.out.println(Arrays.toString(beta));
}
double[][] alpha = new double[nparams][nparams];
double[] beta = new double[nparams];
//for (int b = 1; b < 1000; b *= 2)
for (double b : new double[] { -500, -100, -10, -1, -0.1, 0, 0.1, 1, 10, 100, 500 }) {
Statistics[] rel = new Statistics[nparams];
Statistics[] abs = new Statistics[nparams];
for (int i = 0; i < nparams; i++) {
rel[i] = new Statistics();
abs[i] = new Statistics();
}
for (int i = 0; i < paramsList.size(); i++) {
double[] y = add(yList.get(i), b);
double[] a = paramsList.get(i).clone();
a[0] += b;
calc.findLinearised(n, y, a, alpha, beta, func);
double[][] alpha2 = alphaList.get(i);
double[] beta2 = betaList.get(i);
double[] x2 = xList.get(i);
Assert.assertArrayEquals("Beta", beta2, beta, 1e-10);
for (int j = 0; j < nparams; j++) {
Assert.assertArrayEquals("Alpha", alpha2[j], alpha[j], 1e-10);
}
// Solve
solver.solve(alpha, beta);
Assert.assertArrayEquals("X", x2, beta, 1e-10);
for (int j = 0; j < nparams; j++) {
rel[j].add(DoubleEquality.relativeError(x2[j], beta[j]));
abs[j].add(Math.abs(x2[j] - beta[j]));
}
}
for (int i = 0; i < nparams; i++) System.out.printf("Bias = %.2f : %s : Rel %g +/- %g: Abs %g +/- %g\n", b, func.getName(i), rel[i].getMean(), rel[i].getStandardDeviation(), abs[i].getMean(), abs[i].getStandardDeviation());
}
} finally {
Background = defaultBackground;
}
}
Aggregations