use of org.apache.commons.math3.stat.descriptive.summary.Sum in project tetrad by cmu-phil.
the class SemBicScoreImages3 method logdet2.
private double logdet2(TetradMatrix m) {
if (m.rows() == 0)
return 0.0;
RealMatrix M = m.getRealMatrix();
final LUDecomposition luDecomposition = new LUDecomposition(M);
RealMatrix L = luDecomposition.getL();
RealMatrix U = luDecomposition.getU();
RealMatrix P = luDecomposition.getP();
// System.out.println(new TetradMatrix(L.multiply(U)));
// System.out.println(new TetradMatrix(m));
// System.out.println(new TetradMatrix(L));
// System.out.println(new TetradMatrix(U));
// System.out.println(new TetradMatrix(P));
double sum = 0.0;
for (int i = 0; i < L.getRowDimension(); i++) {
sum += FastMath.log(L.getEntry(i, i));
}
for (int i = 0; i < U.getRowDimension(); i++) {
sum += FastMath.log(U.getEntry(i, i));
}
return sum;
}
use of org.apache.commons.math3.stat.descriptive.summary.Sum in project tetrad by cmu-phil.
the class SemLikelihood2 method logdet.
private double logdet(TetradMatrix m) {
if (m.rows() == 0)
return 0;
RealMatrix M = m.getRealMatrix();
final double tol = 1e-9;
RealMatrix LT = new org.apache.commons.math3.linear.CholeskyDecomposition(M, tol, tol).getLT();
double sum = 0.0;
for (int i = 0; i < LT.getRowDimension(); i++) {
sum += FastMath.log(LT.getEntry(i, i));
}
return 2.0 * sum;
}
use of org.apache.commons.math3.stat.descriptive.summary.Sum in project chordatlas by twak.
the class Prof method findProfileLines.
/**
* We find an initial base offset. Then we cluster the start point of all
* (clean) profiles. If any are a good distance from the initial base, we
* add those as their own profile lines.
*
* The original line is offset by the remaiing data.
*/
public static List<SuperLine> findProfileLines(Collection<Prof> profiles, Line3d line) {
List<SuperLine> out = new ArrayList();
// PaintThing.debug.clear();
SuperLine superLine = new SuperLine(line.start.x, line.start.z, line.end.x, line.end.z);
double outLen = superLine.length();
double min = Double.MAX_VALUE, max = -Double.MAX_VALUE;
Cache<Prof, Double> vLength = new Cache<Prof, Double>() {
@Override
public Double create(Prof i) {
return i.verticalLength(0.5);
}
};
double vLen = profiles.stream().mapToDouble(p -> vLength.get(p)).sum();
boolean useVertical = vLen / profiles.size() > 1;
class Wrapper implements Clusterable {
double[] pt;
public Wrapper(Point2d pt) {
this.pt = new double[] { pt.x, pt.y };
}
@Override
public double[] getPoint() {
return pt;
}
}
List<Wrapper> toCluster = new ArrayList();
List<Double> baseLineOffset = new ArrayList();
for (Prof p : profiles) {
if (// vLen / (5*profiles.size()))
useVertical && vLength.get(p) < 1)
continue;
Prof clean = p.parameterize();
Point2d pt = clean.get(0);
Point3d pt3 = clean.to3d(pt);
double ppram = superLine.findPPram(new Point2d(pt3.x, pt3.z));
baseLineOffset.add(pt.x);
toCluster.add(new Wrapper(new Point2d(pt.x, ppram * outLen)));
min = Math.min(min, ppram);
max = Math.max(max, ppram);
}
if (min == max || toCluster.isEmpty())
return out;
if (true) {
baseLineOffset.sort(Double::compareTo);
double modeBaselineOffset = baseLineOffset.get(baseLineOffset.size() / 2);
DBSCANClusterer<Wrapper> cr = new DBSCANClusterer<>(1.5, 0);
List<Cluster<Wrapper>> results = cr.cluster(toCluster);
Iterator<Cluster<Wrapper>> cit = results.iterator();
while (cit.hasNext()) {
Cluster<Wrapper> cw = cit.next();
if (cw.getPoints().size() < 2 / TweedSettings.settings.profileHSampleDist) {
cit.remove();
double cMeanY = cw.getPoints().stream().mapToDouble(x -> x.pt[1]).average().getAsDouble();
double bestDist = Double.MAX_VALUE;
Cluster<Wrapper> bestWrapper = null;
for (Cluster<Wrapper> near : results) {
double meanY = near.getPoints().stream().mapToDouble(x -> x.pt[1]).average().getAsDouble();
double dist = Math.abs(meanY - cMeanY);
if (dist < bestDist) {
bestDist = dist;
bestWrapper = near;
}
}
if (bestWrapper != null)
bestWrapper.getPoints().addAll(cw.getPoints());
}
}
{
baseLineOffset.clear();
int c = 0;
for (Cluster<Wrapper> cw : results) {
double[] minMax = cw.getPoints().stream().map(p -> new double[] { p.pt[1] }).collect(new InAxDoubleArray());
double[] offsetA = cw.getPoints().stream().mapToDouble(p -> p.pt[0]).sorted().toArray();
double offset = offsetA[offsetA.length / 2];
if (offset - modeBaselineOffset < 1) {
for (Wrapper w : cw.getPoints()) baseLineOffset.add(w.pt[0]);
continue;
}
SuperLine sl = new SuperLine(superLine.fromPPram(minMax[0] / outLen), superLine.fromPPram(minMax[1] / outLen));
sl.moveLeft(offset);
out.add(sl);
List<Point2d> pts = cw.getPoints().stream().map(w -> new Point2d(w.pt[0], w.pt[1])).collect(Collectors.toList());
PaintThing.debug(Rainbow.getColour(c++), 1, pts);
}
}
}
Point2d nStart = superLine.fromPPram(min), nEnd = superLine.fromPPram(max);
superLine.start = nStart;
superLine.end = nEnd;
baseLineOffset.sort(Double::compare);
if (!baseLineOffset.isEmpty())
superLine.moveLeft(baseLineOffset.get(baseLineOffset.size() / 2));
out.add(0, superLine);
return out;
}
use of org.apache.commons.math3.stat.descriptive.summary.Sum in project druid by druid-io.
the class BenchmarkColumnValueGenerator method initDistribution.
private void initDistribution() {
BenchmarkColumnSchema.ValueDistribution distributionType = schema.getDistributionType();
ValueType type = schema.getType();
List<Object> enumeratedValues = schema.getEnumeratedValues();
List<Double> enumeratedProbabilities = schema.getEnumeratedProbabilities();
List<Pair<Object, Double>> probabilities = new ArrayList<>();
switch(distributionType) {
case SEQUENTIAL:
// not random, just cycle through numbers from start to end, or cycle through enumerated values if provided
distribution = new SequentialDistribution(schema.getStartInt(), schema.getEndInt(), schema.getEnumeratedValues());
break;
case UNIFORM:
distribution = new UniformRealDistribution(schema.getStartDouble(), schema.getEndDouble());
break;
case DISCRETE_UNIFORM:
if (enumeratedValues == null) {
enumeratedValues = new ArrayList<>();
for (int i = schema.getStartInt(); i < schema.getEndInt(); i++) {
Object val = convertType(i, type);
enumeratedValues.add(val);
}
}
// give them all equal probability, the library will normalize probabilities to sum to 1.0
for (int i = 0; i < enumeratedValues.size(); i++) {
probabilities.add(new Pair<>(enumeratedValues.get(i), 0.1));
}
distribution = new EnumeratedTreeDistribution<>(probabilities);
break;
case NORMAL:
distribution = new NormalDistribution(schema.getMean(), schema.getStandardDeviation());
break;
case ROUNDED_NORMAL:
NormalDistribution normalDist = new NormalDistribution(schema.getMean(), schema.getStandardDeviation());
distribution = new RealRoundingDistribution(normalDist);
break;
case ZIPF:
int cardinality;
if (enumeratedValues == null) {
Integer startInt = schema.getStartInt();
cardinality = schema.getEndInt() - startInt;
ZipfDistribution zipf = new ZipfDistribution(cardinality, schema.getZipfExponent());
for (int i = 0; i < cardinality; i++) {
probabilities.add(new Pair<>((Object) (i + startInt), zipf.probability(i)));
}
} else {
cardinality = enumeratedValues.size();
ZipfDistribution zipf = new ZipfDistribution(enumeratedValues.size(), schema.getZipfExponent());
for (int i = 0; i < cardinality; i++) {
probabilities.add(new Pair<>(enumeratedValues.get(i), zipf.probability(i)));
}
}
distribution = new EnumeratedTreeDistribution<>(probabilities);
break;
case ENUMERATED:
for (int i = 0; i < enumeratedValues.size(); i++) {
probabilities.add(new Pair<>(enumeratedValues.get(i), enumeratedProbabilities.get(i)));
}
distribution = new EnumeratedTreeDistribution<>(probabilities);
break;
default:
throw new UnsupportedOperationException("Unknown distribution type: " + distributionType);
}
if (distribution instanceof AbstractIntegerDistribution) {
((AbstractIntegerDistribution) distribution).reseedRandomGenerator(seed);
} else if (distribution instanceof AbstractRealDistribution) {
((AbstractRealDistribution) distribution).reseedRandomGenerator(seed);
} else if (distribution instanceof EnumeratedDistribution) {
((EnumeratedDistribution) distribution).reseedRandomGenerator(seed);
}
}
use of org.apache.commons.math3.stat.descriptive.summary.Sum in project GDSC-SMLM by aherbert.
the class PeakResult method computeI1.
/**
* Compute the function I1 using numerical integration. See Mortensen, et al (2010) Nature Methods 7, 377-383), SI
* equation 43.
*
* <pre>
* I1 = 1 + sum [ ln(t) / (1 + t/rho) ] dt
* = - sum [ t * ln(t) / (t + rho) ] dt
* </pre>
*
* Where sum is the integral between 0 and 1. In the case of rho=0 the function returns 1;
*
* @param rho
* @param integrationPoints
* the number of integration points for the LegendreGaussIntegrator
* @return the I1 value
*/
private static double computeI1(final double rho, int integrationPoints) {
if (rho == 0)
return 1;
final double relativeAccuracy = 1e-4;
final double absoluteAccuracy = 1e-8;
final int minimalIterationCount = 3;
final int maximalIterationCount = 32;
// Use an integrator that does not use the boundary since log(0) is undefined.
UnivariateIntegrator i = new IterativeLegendreGaussIntegrator(integrationPoints, relativeAccuracy, absoluteAccuracy, minimalIterationCount, maximalIterationCount);
// Specify the function to integrate
UnivariateFunction f = new UnivariateFunction() {
public double value(double x) {
return x * Math.log(x) / (x + rho);
}
};
final double i1 = -i.integrate(2000, f, 0, 1);
return i1;
}
Aggregations