use of org.apache.commons.math3.stat.descriptive.moment.Mean in project deeplearning4j by deeplearning4j.
the class TestReconstructionDistributions method testBernoulliLogProb.
@Test
public void testBernoulliLogProb() {
Nd4j.getRandom().setSeed(12345);
int inputSize = 4;
int[] mbs = new int[] { 1, 2, 5 };
Random r = new Random(12345);
for (boolean average : new boolean[] { true, false }) {
for (int minibatch : mbs) {
INDArray x = Nd4j.zeros(minibatch, inputSize);
for (int i = 0; i < minibatch; i++) {
for (int j = 0; j < inputSize; j++) {
x.putScalar(i, j, r.nextInt(2));
}
}
//i.e., pre-sigmoid prob
INDArray distributionParams = Nd4j.rand(minibatch, inputSize).muli(2).subi(1);
INDArray prob = Transforms.sigmoid(distributionParams, true);
ReconstructionDistribution dist = new BernoulliReconstructionDistribution("sigmoid");
double negLogProb = dist.negLogProbability(x, distributionParams, average);
INDArray exampleNegLogProb = dist.exampleNegLogProbability(x, distributionParams);
assertArrayEquals(new int[] { minibatch, 1 }, exampleNegLogProb.shape());
//Calculate the same thing, but using Apache Commons math
double logProbSum = 0.0;
for (int i = 0; i < minibatch; i++) {
double exampleSum = 0.0;
for (int j = 0; j < inputSize; j++) {
double p = prob.getDouble(i, j);
//Bernoulli is a special case of binomial
BinomialDistribution binomial = new BinomialDistribution(1, p);
double xVal = x.getDouble(i, j);
double thisLogProb = binomial.logProbability((int) xVal);
logProbSum += thisLogProb;
exampleSum += thisLogProb;
}
assertEquals(-exampleNegLogProb.getDouble(i), exampleSum, 1e-6);
}
double expNegLogProb;
if (average) {
expNegLogProb = -logProbSum / minibatch;
} else {
expNegLogProb = -logProbSum;
}
// System.out.println(x);
// System.out.println(expNegLogProb + "\t" + logProb + "\t" + (logProb / expNegLogProb));
assertEquals(expNegLogProb, negLogProb, 1e-6);
//Also: check random sampling...
int count = minibatch * inputSize;
INDArray arr = Nd4j.linspace(-3, 3, count).reshape(minibatch, inputSize);
INDArray sampleMean = dist.generateAtMean(arr);
INDArray sampleRandom = dist.generateRandom(arr);
for (int i = 0; i < minibatch; i++) {
for (int j = 0; j < inputSize; j++) {
double d1 = sampleMean.getDouble(i, j);
double d2 = sampleRandom.getDouble(i, j);
//Mean value - probability... could do 0 or 1 (based on most likely) but that isn't very useful...
assertTrue(d1 >= 0.0 || d1 <= 1.0);
assertTrue(d2 == 0.0 || d2 == 1.0);
}
}
}
}
}
use of org.apache.commons.math3.stat.descriptive.moment.Mean in project deeplearning4j by deeplearning4j.
the class TestReconstructionDistributions method testGaussianLogProb.
@Test
public void testGaussianLogProb() {
Nd4j.getRandom().setSeed(12345);
int inputSize = 4;
int[] mbs = new int[] { 1, 2, 5 };
for (boolean average : new boolean[] { true, false }) {
for (int minibatch : mbs) {
INDArray x = Nd4j.rand(minibatch, inputSize);
INDArray mean = Nd4j.randn(minibatch, inputSize);
INDArray logStdevSquared = Nd4j.rand(minibatch, inputSize).subi(0.5);
INDArray distributionParams = Nd4j.createUninitialized(new int[] { minibatch, 2 * inputSize });
distributionParams.get(NDArrayIndex.all(), NDArrayIndex.interval(0, inputSize)).assign(mean);
distributionParams.get(NDArrayIndex.all(), NDArrayIndex.interval(inputSize, 2 * inputSize)).assign(logStdevSquared);
ReconstructionDistribution dist = new GaussianReconstructionDistribution("identity");
double negLogProb = dist.negLogProbability(x, distributionParams, average);
INDArray exampleNegLogProb = dist.exampleNegLogProbability(x, distributionParams);
assertArrayEquals(new int[] { minibatch, 1 }, exampleNegLogProb.shape());
//Calculate the same thing, but using Apache Commons math
double logProbSum = 0.0;
for (int i = 0; i < minibatch; i++) {
double exampleSum = 0.0;
for (int j = 0; j < inputSize; j++) {
double mu = mean.getDouble(i, j);
double logSigma2 = logStdevSquared.getDouble(i, j);
double sigma = Math.sqrt(Math.exp(logSigma2));
NormalDistribution nd = new NormalDistribution(mu, sigma);
double xVal = x.getDouble(i, j);
double thisLogProb = nd.logDensity(xVal);
logProbSum += thisLogProb;
exampleSum += thisLogProb;
}
assertEquals(-exampleNegLogProb.getDouble(i), exampleSum, 1e-6);
}
double expNegLogProb;
if (average) {
expNegLogProb = -logProbSum / minibatch;
} else {
expNegLogProb = -logProbSum;
}
// System.out.println(expLogProb + "\t" + logProb + "\t" + (logProb / expLogProb));
assertEquals(expNegLogProb, negLogProb, 1e-6);
//Also: check random sampling...
int count = minibatch * inputSize;
INDArray arr = Nd4j.linspace(-3, 3, count).reshape(minibatch, inputSize);
INDArray sampleMean = dist.generateAtMean(arr);
INDArray sampleRandom = dist.generateRandom(arr);
}
}
}
use of org.apache.commons.math3.stat.descriptive.moment.Mean in project GDSC-SMLM by aherbert.
the class DiffusionRateTest method run.
/*
* (non-Javadoc)
*
* @see ij.plugin.PlugIn#run(java.lang.String)
*/
public void run(String arg) {
SMLMUsageTracker.recordPlugin(this.getClass(), arg);
if (IJ.controlKeyDown()) {
simpleTest();
return;
}
extraOptions = Utils.isExtraOptions();
if (!showDialog())
return;
lastSimulatedDataset[0] = lastSimulatedDataset[1] = "";
lastSimulatedPrecision = 0;
final int totalSteps = (int) Math.ceil(settings.seconds * settings.stepsPerSecond);
conversionFactor = 1000000.0 / (settings.pixelPitch * settings.pixelPitch);
// Diffusion rate is um^2/sec. Convert to pixels per simulation frame.
final double diffusionRateInPixelsPerSecond = settings.diffusionRate * conversionFactor;
final double diffusionRateInPixelsPerStep = diffusionRateInPixelsPerSecond / settings.stepsPerSecond;
final double precisionInPixels = myPrecision / settings.pixelPitch;
final boolean addError = myPrecision != 0;
Utils.log(TITLE + " : D = %s um^2/sec, Precision = %s nm", Utils.rounded(settings.diffusionRate, 4), Utils.rounded(myPrecision, 4));
Utils.log("Mean-displacement per dimension = %s nm/sec", Utils.rounded(1e3 * ImageModel.getRandomMoveDistance(settings.diffusionRate), 4));
if (extraOptions)
Utils.log("Step size = %s, precision = %s", Utils.rounded(ImageModel.getRandomMoveDistance(diffusionRateInPixelsPerStep)), Utils.rounded(precisionInPixels));
// Convert diffusion co-efficient into the standard deviation for the random walk
final double diffusionSigma = (settings.getDiffusionType() == DiffusionType.LINEAR_WALK) ? // Q. What should this be? At the moment just do 1D diffusion on a random vector
ImageModel.getRandomMoveDistance(diffusionRateInPixelsPerStep) : ImageModel.getRandomMoveDistance(diffusionRateInPixelsPerStep);
Utils.log("Simulation step-size = %s nm", Utils.rounded(settings.pixelPitch * diffusionSigma, 4));
// Move the molecules and get the diffusion rate
IJ.showStatus("Simulating ...");
final long start = System.nanoTime();
final long seed = System.currentTimeMillis() + System.identityHashCode(this);
RandomGenerator[] random = new RandomGenerator[3];
RandomGenerator[] random2 = new RandomGenerator[3];
for (int i = 0; i < 3; i++) {
random[i] = new Well19937c(seed + i * 12436);
random2[i] = new Well19937c(seed + i * 678678 + 3);
}
Statistics[] stats2D = new Statistics[totalSteps];
Statistics[] stats3D = new Statistics[totalSteps];
StoredDataStatistics jumpDistances2D = new StoredDataStatistics(totalSteps);
StoredDataStatistics jumpDistances3D = new StoredDataStatistics(totalSteps);
for (int j = 0; j < totalSteps; j++) {
stats2D[j] = new Statistics();
stats3D[j] = new Statistics();
}
SphericalDistribution dist = new SphericalDistribution(settings.confinementRadius / settings.pixelPitch);
Statistics asymptote = new Statistics();
// Save results to memory
MemoryPeakResults results = new MemoryPeakResults(totalSteps);
Calibration cal = new Calibration(settings.pixelPitch, 1, 1000.0 / settings.stepsPerSecond);
results.setCalibration(cal);
results.setName(TITLE);
int peak = 0;
// Store raw coordinates
ArrayList<Point> points = new ArrayList<Point>(totalSteps);
StoredData totalJumpDistances1D = new StoredData(settings.particles);
StoredData totalJumpDistances2D = new StoredData(settings.particles);
StoredData totalJumpDistances3D = new StoredData(settings.particles);
for (int i = 0; i < settings.particles; i++) {
if (i % 16 == 0) {
IJ.showProgress(i, settings.particles);
if (Utils.isInterrupted())
return;
}
// Increment the frame so that tracing analysis can distinguish traces
peak++;
double[] origin = new double[3];
final int id = i + 1;
MoleculeModel m = new MoleculeModel(id, origin.clone());
if (addError)
origin = addError(origin, precisionInPixels, random);
if (useConfinement) {
// Note: When using confinement the average displacement should asymptote
// at the average distance of a point from the centre of a ball. This is 3r/4.
// See: http://answers.yahoo.com/question/index?qid=20090131162630AAMTUfM
// The equivalent in 2D is 2r/3. However although we are plotting 2D distance
// this is a projection of the 3D position onto the plane and so the particles
// will not be evenly spread (there will be clustering at centre caused by the
// poles)
final double[] axis = (settings.getDiffusionType() == DiffusionType.LINEAR_WALK) ? nextVector() : null;
for (int j = 0; j < totalSteps; j++) {
double[] xyz = m.getCoordinates();
double[] originalXyz = xyz.clone();
for (int n = confinementAttempts; n-- > 0; ) {
if (settings.getDiffusionType() == DiffusionType.GRID_WALK)
m.walk(diffusionSigma, random);
else if (settings.getDiffusionType() == DiffusionType.LINEAR_WALK)
m.slide(diffusionSigma, axis, random[0]);
else
m.move(diffusionSigma, random);
if (!dist.isWithin(m.getCoordinates())) {
// Reset position
for (int k = 0; k < 3; k++) xyz[k] = originalXyz[k];
} else {
// The move was allowed
break;
}
}
points.add(new Point(id, xyz));
if (addError)
xyz = addError(xyz, precisionInPixels, random2);
peak = record(xyz, id, peak, stats2D[j], stats3D[j], jumpDistances2D, jumpDistances3D, origin, results);
}
asymptote.add(distance(m.getCoordinates()));
} else {
if (settings.getDiffusionType() == DiffusionType.GRID_WALK) {
for (int j = 0; j < totalSteps; j++) {
m.walk(diffusionSigma, random);
double[] xyz = m.getCoordinates();
points.add(new Point(id, xyz));
if (addError)
xyz = addError(xyz, precisionInPixels, random2);
peak = record(xyz, id, peak, stats2D[j], stats3D[j], jumpDistances2D, jumpDistances3D, origin, results);
}
} else if (settings.getDiffusionType() == DiffusionType.LINEAR_WALK) {
final double[] axis = nextVector();
for (int j = 0; j < totalSteps; j++) {
m.slide(diffusionSigma, axis, random[0]);
double[] xyz = m.getCoordinates();
points.add(new Point(id, xyz));
if (addError)
xyz = addError(xyz, precisionInPixels, random2);
peak = record(xyz, id, peak, stats2D[j], stats3D[j], jumpDistances2D, jumpDistances3D, origin, results);
}
} else {
for (int j = 0; j < totalSteps; j++) {
m.move(diffusionSigma, random);
double[] xyz = m.getCoordinates();
points.add(new Point(id, xyz));
if (addError)
xyz = addError(xyz, precisionInPixels, random2);
peak = record(xyz, id, peak, stats2D[j], stats3D[j], jumpDistances2D, jumpDistances3D, origin, results);
}
}
}
// Debug: record all the particles so they can be analysed
// System.out.printf("%f %f %f\n", m.getX(), m.getY(), m.getZ());
final double[] xyz = m.getCoordinates();
double d2 = 0;
totalJumpDistances1D.add(d2 = xyz[0] * xyz[0]);
totalJumpDistances2D.add(d2 += xyz[1] * xyz[1]);
totalJumpDistances3D.add(d2 += xyz[2] * xyz[2]);
}
final double time = (System.nanoTime() - start) / 1000000.0;
IJ.showProgress(1);
MemoryPeakResults.addResults(results);
lastSimulatedDataset[0] = results.getName();
lastSimulatedPrecision = myPrecision;
// Convert pixels^2/step to um^2/sec
final double msd2D = (jumpDistances2D.getMean() / conversionFactor) / (results.getCalibration().getExposureTime() / 1000);
final double msd3D = (jumpDistances3D.getMean() / conversionFactor) / (results.getCalibration().getExposureTime() / 1000);
Utils.log("Raw data D=%s um^2/s, Precision = %s nm, N=%d, step=%s s, mean2D=%s um^2, MSD 2D = %s um^2/s, mean3D=%s um^2, MSD 3D = %s um^2/s", Utils.rounded(settings.diffusionRate), Utils.rounded(myPrecision), jumpDistances2D.getN(), Utils.rounded(results.getCalibration().getExposureTime() / 1000), Utils.rounded(jumpDistances2D.getMean() / conversionFactor), Utils.rounded(msd2D), Utils.rounded(jumpDistances3D.getMean() / conversionFactor), Utils.rounded(msd3D));
aggregateIntoFrames(points, addError, precisionInPixels, random2);
IJ.showStatus("Analysing results ...");
if (showDiffusionExample) {
showExample(totalSteps, diffusionSigma, random);
}
// Plot a graph of mean squared distance
double[] xValues = new double[stats2D.length];
double[] yValues2D = new double[stats2D.length];
double[] yValues3D = new double[stats3D.length];
double[] upper2D = new double[stats2D.length];
double[] lower2D = new double[stats2D.length];
double[] upper3D = new double[stats3D.length];
double[] lower3D = new double[stats3D.length];
SimpleRegression r2D = new SimpleRegression(false);
SimpleRegression r3D = new SimpleRegression(false);
final int firstN = (useConfinement) ? fitN : totalSteps;
for (int j = 0; j < totalSteps; j++) {
// Convert steps to seconds
xValues[j] = (double) (j + 1) / settings.stepsPerSecond;
// Convert values in pixels^2 to um^2
final double mean2D = stats2D[j].getMean() / conversionFactor;
final double mean3D = stats3D[j].getMean() / conversionFactor;
final double sd2D = stats2D[j].getStandardDeviation() / conversionFactor;
final double sd3D = stats3D[j].getStandardDeviation() / conversionFactor;
yValues2D[j] = mean2D;
yValues3D[j] = mean3D;
upper2D[j] = mean2D + sd2D;
lower2D[j] = mean2D - sd2D;
upper3D[j] = mean3D + sd3D;
lower3D[j] = mean3D - sd3D;
if (j < firstN) {
r2D.addData(xValues[j], yValues2D[j]);
r3D.addData(xValues[j], yValues3D[j]);
}
}
// TODO - Fit using the equation for 2D confined diffusion:
// MSD = 4s^2 + R^2 (1 - 0.99e^(-1.84^2 Dt / R^2)
// s = localisation precision
// R = confinement radius
// D = 2D diffusion coefficient
// t = time
final PolynomialFunction fitted2D, fitted3D;
if (r2D.getN() > 0) {
// Do linear regression to get diffusion rate
final double[] best2D = new double[] { r2D.getIntercept(), r2D.getSlope() };
fitted2D = new PolynomialFunction(best2D);
final double[] best3D = new double[] { r3D.getIntercept(), r3D.getSlope() };
fitted3D = new PolynomialFunction(best3D);
// For 2D diffusion: d^2 = 4D
// where: d^2 = mean-square displacement
double D = best2D[1] / 4.0;
String msg = "2D Diffusion rate = " + Utils.rounded(D, 4) + " um^2 / sec (" + Utils.timeToString(time) + ")";
IJ.showStatus(msg);
Utils.log(msg);
D = best3D[1] / 6.0;
Utils.log("3D Diffusion rate = " + Utils.rounded(D, 4) + " um^2 / sec (" + Utils.timeToString(time) + ")");
} else {
fitted2D = fitted3D = null;
}
// Create plots
plotMSD(totalSteps, xValues, yValues2D, lower2D, upper2D, fitted2D, 2);
plotMSD(totalSteps, xValues, yValues3D, lower3D, upper3D, fitted3D, 3);
plotJumpDistances(TITLE, jumpDistances2D, 2, 1);
plotJumpDistances(TITLE, jumpDistances3D, 3, 1);
if (idCount > 0)
new WindowOrganiser().tileWindows(idList);
if (useConfinement)
Utils.log("3D asymptote distance = %s nm (expected %.2f)", Utils.rounded(asymptote.getMean() * settings.pixelPitch, 4), 3 * settings.confinementRadius / 4);
}
use of org.apache.commons.math3.stat.descriptive.moment.Mean in project GDSC-SMLM by aherbert.
the class BinomialFitter method fitBinomial.
/**
* Fit the binomial distribution (n,p) to the input data. Performs fitting assuming a fixed n value and attempts to
* optimise p. All n from minN to maxN are evaluated. If maxN is zero then all possible n from minN are evaluated
* until the fit is worse.
*
* @param data
* The input data (all value must be positive)
* @param minN
* The minimum n to evaluate
* @param maxN
* The maximum n to evaluate. Set to zero to evaluate all possible values.
* @param zeroTruncated
* True if the model should ignore n=0 (zero-truncated binomial)
* @return The best fit (n, p)
* @throws IllegalArgumentException
* If any of the input data values are negative
*/
public double[] fitBinomial(int[] data, int minN, int maxN, boolean zeroTruncated) {
double[] histogram = getHistogram(data, false);
final double initialSS = Double.POSITIVE_INFINITY;
double bestSS = initialSS;
double[] parameters = null;
int worse = 0;
int N = (int) histogram.length - 1;
if (minN < 1)
minN = 1;
if (maxN > 0) {
if (N > maxN) {
// Limit the number fitted to maximum
N = maxN;
} else if (N < maxN) {
// Expand the histogram to the maximum
histogram = Arrays.copyOf(histogram, maxN + 1);
N = maxN;
}
}
if (minN > N)
minN = N;
final double mean = getMean(histogram);
String name = (zeroTruncated) ? "Zero-truncated Binomial distribution" : "Binomial distribution";
log("Mean cluster size = %s", Utils.rounded(mean));
log("Fitting cumulative " + name);
// score several times in succession)
for (int n = minN; n <= N; n++) {
PointValuePair solution = fitBinomial(histogram, mean, n, zeroTruncated);
if (solution == null)
continue;
double p = solution.getPointRef()[0];
log("Fitted %s : N=%d, p=%s. SS=%g", name, n, Utils.rounded(p), solution.getValue());
if (bestSS > solution.getValue()) {
bestSS = solution.getValue();
parameters = new double[] { n, p };
worse = 0;
} else if (bestSS != initialSS) {
if (++worse >= 3)
break;
}
}
return parameters;
}
use of org.apache.commons.math3.stat.descriptive.moment.Mean in project GDSC-SMLM by aherbert.
the class BinomialFitter method fitBinomial.
/**
* Fit the binomial distribution (n,p) to the cumulative histogram. Performs fitting assuming a fixed n value and
* attempts to optimise p.
*
* @param histogram
* The input histogram
* @param mean
* The histogram mean (used to estimate p). Calculated if NaN.
* @param n
* The n to evaluate
* @param zeroTruncated
* True if the model should ignore n=0 (zero-truncated binomial)
* @return The best fit (n, p)
* @throws IllegalArgumentException
* If any of the input data values are negative
* @throws IllegalArgumentException
* If any fitting a zero truncated binomial and there are no values above zero
*/
public PointValuePair fitBinomial(double[] histogram, double mean, int n, boolean zeroTruncated) {
if (Double.isNaN(mean))
mean = getMean(histogram);
if (zeroTruncated && histogram[0] > 0) {
log("Fitting zero-truncated histogram but there are zero values - Renormalising to ignore zero");
double cumul = 0;
for (int i = 1; i < histogram.length; i++) cumul += histogram[i];
if (cumul == 0)
throw new IllegalArgumentException("Fitting zero-truncated histogram but there are no non-zero values");
histogram[0] = 0;
for (int i = 1; i < histogram.length; i++) histogram[i] /= cumul;
}
int nFittedPoints = Math.min(histogram.length, n + 1) - ((zeroTruncated) ? 1 : 0);
if (nFittedPoints < 1) {
log("No points to fit (%d): Histogram.length = %d, n = %d, zero-truncated = %b", nFittedPoints, histogram.length, n, zeroTruncated);
return null;
}
// The model is only fitting the probability p
// For a binomial n*p = mean => p = mean/n
double[] initialSolution = new double[] { FastMath.min(mean / n, 1) };
// Create the function
BinomialModelFunction function = new BinomialModelFunction(histogram, n, zeroTruncated);
double[] lB = new double[1];
double[] uB = new double[] { 1 };
SimpleBounds bounds = new SimpleBounds(lB, uB);
// Fit
// CMAESOptimizer or BOBYQAOptimizer support bounds
// CMAESOptimiser based on Matlab code:
// https://www.lri.fr/~hansen/cmaes.m
// Take the defaults from the Matlab documentation
int maxIterations = 2000;
//Double.NEGATIVE_INFINITY;
double stopFitness = 0;
boolean isActiveCMA = true;
int diagonalOnly = 0;
int checkFeasableCount = 1;
RandomGenerator random = new Well19937c();
boolean generateStatistics = false;
ConvergenceChecker<PointValuePair> checker = new SimpleValueChecker(1e-6, 1e-10);
// The sigma determines the search range for the variables. It should be 1/3 of the initial search region.
OptimizationData sigma = new CMAESOptimizer.Sigma(new double[] { (uB[0] - lB[0]) / 3 });
OptimizationData popSize = new CMAESOptimizer.PopulationSize((int) (4 + Math.floor(3 * Math.log(2))));
try {
PointValuePair solution = null;
boolean noRefit = maximumLikelihood;
if (n == 1 && zeroTruncated) {
// No need to fit
solution = new PointValuePair(new double[] { 1 }, 0);
noRefit = true;
} else {
GoalType goalType = (maximumLikelihood) ? GoalType.MAXIMIZE : GoalType.MINIMIZE;
// Iteratively fit
CMAESOptimizer opt = new CMAESOptimizer(maxIterations, stopFitness, isActiveCMA, diagonalOnly, checkFeasableCount, random, generateStatistics, checker);
for (int iteration = 0; iteration <= fitRestarts; iteration++) {
try {
// Start from the initial solution
PointValuePair result = opt.optimize(new InitialGuess(initialSolution), new ObjectiveFunction(function), goalType, bounds, sigma, popSize, new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
// opt.getEvaluations());
if (solution == null || result.getValue() < solution.getValue()) {
solution = result;
}
} catch (TooManyEvaluationsException e) {
} catch (TooManyIterationsException e) {
}
if (solution == null)
continue;
try {
// Also restart from the current optimum
PointValuePair result = opt.optimize(new InitialGuess(solution.getPointRef()), new ObjectiveFunction(function), goalType, bounds, sigma, popSize, new MaxIter(maxIterations), new MaxEval(maxIterations * 2));
// opt.getEvaluations());
if (result.getValue() < solution.getValue()) {
solution = result;
}
} catch (TooManyEvaluationsException e) {
} catch (TooManyIterationsException e) {
}
}
if (solution == null)
return null;
}
if (noRefit) {
// Although we fit the log-likelihood, return the sum-of-squares to allow
// comparison across different n
double p = solution.getPointRef()[0];
double ss = 0;
double[] obs = function.p;
double[] exp = function.getP(p);
for (int i = 0; i < obs.length; i++) ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
return new PointValuePair(solution.getPointRef(), ss);
} else // We can do a LVM refit if the number of fitted points is more than 1
if (nFittedPoints > 1) {
// Improve SS fit with a gradient based LVM optimizer
LevenbergMarquardtOptimizer optimizer = new LevenbergMarquardtOptimizer();
try {
final BinomialModelFunctionGradient gradientFunction = new BinomialModelFunctionGradient(histogram, n, zeroTruncated);
//@formatter:off
LeastSquaresProblem problem = new LeastSquaresBuilder().maxEvaluations(Integer.MAX_VALUE).maxIterations(3000).start(solution.getPointRef()).target(gradientFunction.p).weight(new DiagonalMatrix(gradientFunction.getWeights())).model(gradientFunction, new MultivariateMatrixFunction() {
public double[][] value(double[] point) throws IllegalArgumentException {
return gradientFunction.jacobian(point);
}
}).build();
//@formatter:on
Optimum lvmSolution = optimizer.optimize(problem);
// Check the pValue is valid since the LVM is not bounded.
double p = lvmSolution.getPoint().getEntry(0);
if (p <= 1 && p >= 0) {
// True if the weights are 1
double ss = lvmSolution.getResiduals().dotProduct(lvmSolution.getResiduals());
// ss += (obs[i] - exp[i]) * (obs[i] - exp[i]);
if (ss < solution.getValue()) {
// Utils.rounded(100 * (solution.getValue() - ss) / solution.getValue(), 4));
return new PointValuePair(lvmSolution.getPoint().toArray(), ss);
}
}
} catch (TooManyIterationsException e) {
log("Failed to re-fit: Too many iterations: %s", e.getMessage());
} catch (ConvergenceException e) {
log("Failed to re-fit: %s", e.getMessage());
} catch (Exception e) {
// Ignore this ...
}
}
return solution;
} catch (Exception e) {
log("Failed to fit Binomial distribution with N=%d : %s", n, e.getMessage());
}
return null;
}
Aggregations