use of org.apache.commons.math3.stat.descriptive.summary.Sum in project uPortal by Jasig.
the class JpaStatisticalSummaryTest method testSummaryStatisticsJson.
// @Test
// public void testJpaStatisticalSummary() {
// final long id = this.executeInTransaction(new Callable<Long>() {
// @Override
// public Long call() throws Exception {
// final JpaStatisticalSummary jpaStatisticalSummary = new
// JpaStatisticalSummary();
//
// final Random r = new Random(0);
// for (int i = 0; i < 10; i++) {
// final int nextInt = r.nextInt(100000000);
// jpaStatisticalSummary.addValue(nextInt);
// }
//
// getEntityManager().persist(jpaStatisticalSummary);
//
// System.out.println(jpaStatisticalSummary);
//
// return jpaStatisticalSummary.getStatSummaryId();
// }
// });
//
// System.out.println(id);
//
// this.executeInTransaction(new CallableWithoutResult() {
// @Override
// protected void callWithoutResult() {
// final JpaStatisticalSummary jpaStatisticalSummary =
// getEntityManager().find(JpaStatisticalSummary.class, id);
//
// System.out.println(jpaStatisticalSummary);
// }
// });
// }
@Ignore
@Test
public void testSummaryStatisticsJson() throws Exception {
final SecondMoment secondMoment = new SecondMoment();
final Sum sum = new Sum();
final SumOfSquares sumsq = new SumOfSquares();
final Min min = new Min();
final Max max = new Max();
final SumOfLogs sumLog = new SumOfLogs();
final Random r = new Random(0);
for (int i = 0; i < 10; i++) {
final int nextInt = r.nextInt(100000000);
secondMoment.increment(nextInt);
sum.increment(nextInt);
sumsq.increment(nextInt);
min.increment(nextInt);
max.increment(nextInt);
sumLog.increment(nextInt);
}
testStorelessUnivariateStatistic(secondMoment, 7.513432791665536E15);
testStorelessUnivariateStatistic(sum, 6.01312177E8);
testStorelessUnivariateStatistic(sumsq, 4.3671066212513456E16);
testStorelessUnivariateStatistic(min, 2116447.0);
testStorelessUnivariateStatistic(max, 8.5505948E7);
testStorelessUnivariateStatistic(sumLog, 175.91713800250577);
}
use of org.apache.commons.math3.stat.descriptive.summary.Sum in project pyramid by cheng-li.
the class Splitter method sample.
static Optional<SplitResult> sample(List<SplitResult> splitResults) {
if (splitResults.size() == 0) {
return Optional.empty();
}
if (splitResults.get(0).getReduction() == 0) {
return Optional.empty();
}
double total = splitResults.stream().mapToDouble(SplitResult::getReduction).sum();
double[] probs = splitResults.stream().mapToDouble(splitResult -> splitResult.getReduction() / total).toArray();
int[] singletons = IntStream.range(0, splitResults.size()).toArray();
EnumeratedIntegerDistribution distribution = new EnumeratedIntegerDistribution(singletons, probs);
int sample = distribution.sample();
return Optional.of(splitResults.get(sample));
}
use of org.apache.commons.math3.stat.descriptive.summary.Sum in project gatk by broadinstitute.
the class AlleleFractionInitializer method initialMinorFractions.
/**
* Initialize minor fractions assuming no allelic bias <p></p>
*
* We integrate over f to get posterior probabilities (responsibilities) of alt / ref minor
* that is, responsibility of alt minor is int_{0 to 1/2} f^a (1-f)^r df
* responsibility of ref minor is int_{0 to 1/2} f^r (1-f)^a df
* these are proportional to I(1/2, a + 1, r + 1) and I(1/2, r + 1, a + 1),
* respectively, where I is the (incomplete) regularized Beta function.
* By definition these likelihoods sum to 1, ie they are already normalized. <p></p>
*
* Finally, we set each minor fraction to the responsibility-weighted total count of
* reads in minor allele divided by total reads, ignoring outliers.
*/
private AlleleFractionState.MinorFractions initialMinorFractions(final AlleleFractionData data) {
final int numSegments = data.getNumSegments();
final AlleleFractionState.MinorFractions result = new AlleleFractionState.MinorFractions(numSegments);
for (int segment = 0; segment < numSegments; segment++) {
double responsibilityWeightedMinorAlleleReadCount = 0.0;
double responsibilityWeightedTotalReadCount = 0.0;
for (final AllelicCount count : data.getCountsInSegment(segment)) {
final int a = count.getAltReadCount();
final int r = count.getRefReadCount();
double altMinorResponsibility;
try {
altMinorResponsibility = Beta.regularizedBeta(0.5, a + 1, r + 1);
} catch (final MaxCountExceededException e) {
//if the special function can't be computed, give an all-or-nothing responsibility
altMinorResponsibility = a < r ? 1.0 : 0.0;
}
responsibilityWeightedMinorAlleleReadCount += altMinorResponsibility * a + (1 - altMinorResponsibility) * r;
responsibilityWeightedTotalReadCount += a + r;
}
// we achieve a flat prior via a single pseudocount for minor and non-minor reads, hence the +1 and +2
result.add((responsibilityWeightedMinorAlleleReadCount + 1) / (responsibilityWeightedTotalReadCount + 2));
}
return result;
}
use of org.apache.commons.math3.stat.descriptive.summary.Sum in project gatk-protected by broadinstitute.
the class CoverageModelEMWorkspace method updateSampleUnexplainedVariance.
/**
* E-step update of the sample-specific unexplained variance
*
* @return a {@link SubroutineSignal} containing the update size (key: "error_norm") and the average
* number of function evaluations per sample (key: "iterations")
*/
@EvaluatesRDD
@UpdatesRDD
@CachesRDD
public SubroutineSignal updateSampleUnexplainedVariance() {
mapWorkers(cb -> cb.cloneWithUpdatedCachesByTag(CoverageModelEMComputeBlock.CoverageModelICGCacheTag.E_STEP_GAMMA));
cacheWorkers("after E-step for sample unexplained variance initialization");
/* create a compound objective function for simultaneous multi-sample queries */
final java.util.function.Function<Map<Integer, Double>, Map<Integer, Double>> objFunc = arg -> {
if (arg.isEmpty()) {
return Collections.emptyMap();
}
final int[] sampleIndices = arg.keySet().stream().mapToInt(i -> i).toArray();
final INDArray gammaValues = Nd4j.create(Arrays.stream(sampleIndices).mapToDouble(arg::get).toArray(), new int[] { sampleIndices.length, 1 });
final INDArray eval = mapWorkersAndReduce(cb -> cb.calculateSampleSpecificVarianceObjectiveFunctionMultiSample(sampleIndices, gammaValues), INDArray::add);
final Map<Integer, Double> output = new HashMap<>();
IntStream.range(0, sampleIndices.length).forEach(evalIdx -> output.put(sampleIndices[evalIdx], eval.getDouble(evalIdx)));
return output;
};
final java.util.function.Function<UnivariateSolverSpecifications, AbstractUnivariateSolver> solverFactory = spec -> new RobustBrentSolver(spec.getRelativeAccuracy(), spec.getAbsoluteAccuracy(), spec.getFunctionValueAccuracy(), null, config.getSampleSpecificVarianceSolverNumBisections(), config.getSampleSpecificVarianceSolverRefinementDepth());
/* instantiate a synchronized multi-sample root finder and add jobs */
final SynchronizedUnivariateSolver syncSolver = new SynchronizedUnivariateSolver(objFunc, solverFactory, numSamples);
IntStream.range(0, numSamples).forEach(si -> {
final double x0 = 0.5 * config.getSampleSpecificVarianceUpperLimit();
syncSolver.add(si, 0, config.getSampleSpecificVarianceUpperLimit(), x0, config.getSampleSpecificVarianceAbsoluteTolerance(), config.getSampleSpecificVarianceRelativeTolerance(), config.getSampleSpecificVarianceMaximumIterations());
});
/* solve and collect statistics */
final INDArray newSampleUnexplainedVariance = Nd4j.create(numSamples, 1);
final List<Integer> numberOfEvaluations = new ArrayList<>(numSamples);
try {
final Map<Integer, SynchronizedUnivariateSolver.UnivariateSolverSummary> newSampleSpecificVarianceMap = syncSolver.solve();
newSampleSpecificVarianceMap.entrySet().forEach(entry -> {
final int sampleIndex = entry.getKey();
final SynchronizedUnivariateSolver.UnivariateSolverSummary summary = entry.getValue();
double val = 0;
switch(summary.status) {
case SUCCESS:
val = summary.x;
break;
case TOO_MANY_EVALUATIONS:
logger.warn("Could not locate the root of gamma -- increase the maximum number of" + "function evaluations");
break;
}
newSampleUnexplainedVariance.put(sampleIndex, 0, val);
numberOfEvaluations.add(summary.evaluations);
});
} catch (final InterruptedException ex) {
throw new RuntimeException("The update of sample unexplained variance was interrupted -- can not continue");
}
/* admix */
final INDArray newSampleUnexplainedVarianceAdmixed = newSampleUnexplainedVariance.mul(config.getMeanFieldAdmixingRatio()).addi(sampleUnexplainedVariance.mul(1 - config.getMeanFieldAdmixingRatio()));
/* calculate the error */
final double errorNormInfinity = CoverageModelEMWorkspaceMathUtils.getINDArrayNormInfinity(newSampleUnexplainedVarianceAdmixed.sub(sampleUnexplainedVariance));
/* update local copy */
sampleUnexplainedVariance.assign(newSampleUnexplainedVarianceAdmixed);
/* push to workers */
pushToWorkers(newSampleUnexplainedVarianceAdmixed, (arr, cb) -> cb.cloneWithUpdatedPrimitive(CoverageModelEMComputeBlock.CoverageModelICGCacheNode.gamma_s, newSampleUnexplainedVarianceAdmixed));
final int iterations = (int) (numberOfEvaluations.stream().mapToDouble(d -> d).sum() / numSamples);
return SubroutineSignal.builder().put(StandardSubroutineSignals.RESIDUAL_ERROR_NORM, errorNormInfinity).put(StandardSubroutineSignals.ITERATIONS, iterations).build();
}
use of org.apache.commons.math3.stat.descriptive.summary.Sum in project gatk by broadinstitute.
the class CoverageModelEMWorkspace method updateSampleUnexplainedVariance.
/**
* E-step update of the sample-specific unexplained variance
*
* @return a {@link SubroutineSignal} containing the update size (key: "error_norm") and the average
* number of function evaluations per sample (key: "iterations")
*/
@EvaluatesRDD
@UpdatesRDD
@CachesRDD
public SubroutineSignal updateSampleUnexplainedVariance() {
mapWorkers(cb -> cb.cloneWithUpdatedCachesByTag(CoverageModelEMComputeBlock.CoverageModelICGCacheTag.E_STEP_GAMMA));
cacheWorkers("after E-step for sample unexplained variance initialization");
/* create a compound objective function for simultaneous multi-sample queries */
final java.util.function.Function<Map<Integer, Double>, Map<Integer, Double>> objFunc = arg -> {
if (arg.isEmpty()) {
return Collections.emptyMap();
}
final int[] sampleIndices = arg.keySet().stream().mapToInt(i -> i).toArray();
final INDArray gammaValues = Nd4j.create(Arrays.stream(sampleIndices).mapToDouble(arg::get).toArray(), new int[] { sampleIndices.length, 1 });
final INDArray eval = mapWorkersAndReduce(cb -> cb.calculateSampleSpecificVarianceObjectiveFunctionMultiSample(sampleIndices, gammaValues), INDArray::add);
final Map<Integer, Double> output = new HashMap<>();
IntStream.range(0, sampleIndices.length).forEach(evalIdx -> output.put(sampleIndices[evalIdx], eval.getDouble(evalIdx)));
return output;
};
final java.util.function.Function<UnivariateSolverSpecifications, AbstractUnivariateSolver> solverFactory = spec -> new RobustBrentSolver(spec.getRelativeAccuracy(), spec.getAbsoluteAccuracy(), spec.getFunctionValueAccuracy(), null, config.getSampleSpecificVarianceSolverNumBisections(), config.getSampleSpecificVarianceSolverRefinementDepth());
/* instantiate a synchronized multi-sample root finder and add jobs */
final SynchronizedUnivariateSolver syncSolver = new SynchronizedUnivariateSolver(objFunc, solverFactory, numSamples);
IntStream.range(0, numSamples).forEach(si -> {
final double x0 = 0.5 * config.getSampleSpecificVarianceUpperLimit();
syncSolver.add(si, 0, config.getSampleSpecificVarianceUpperLimit(), x0, config.getSampleSpecificVarianceAbsoluteTolerance(), config.getSampleSpecificVarianceRelativeTolerance(), config.getSampleSpecificVarianceMaximumIterations());
});
/* solve and collect statistics */
final INDArray newSampleUnexplainedVariance = Nd4j.create(numSamples, 1);
final List<Integer> numberOfEvaluations = new ArrayList<>(numSamples);
try {
final Map<Integer, SynchronizedUnivariateSolver.UnivariateSolverSummary> newSampleSpecificVarianceMap = syncSolver.solve();
newSampleSpecificVarianceMap.entrySet().forEach(entry -> {
final int sampleIndex = entry.getKey();
final SynchronizedUnivariateSolver.UnivariateSolverSummary summary = entry.getValue();
double val = 0;
switch(summary.status) {
case SUCCESS:
val = summary.x;
break;
case TOO_MANY_EVALUATIONS:
logger.warn("Could not locate the root of gamma -- increase the maximum number of" + "function evaluations");
break;
}
newSampleUnexplainedVariance.put(sampleIndex, 0, val);
numberOfEvaluations.add(summary.evaluations);
});
} catch (final InterruptedException ex) {
throw new RuntimeException("The update of sample unexplained variance was interrupted -- can not continue");
}
/* admix */
final INDArray newSampleUnexplainedVarianceAdmixed = newSampleUnexplainedVariance.mul(config.getMeanFieldAdmixingRatio()).addi(sampleUnexplainedVariance.mul(1 - config.getMeanFieldAdmixingRatio()));
/* calculate the error */
final double errorNormInfinity = CoverageModelEMWorkspaceMathUtils.getINDArrayNormInfinity(newSampleUnexplainedVarianceAdmixed.sub(sampleUnexplainedVariance));
/* update local copy */
sampleUnexplainedVariance.assign(newSampleUnexplainedVarianceAdmixed);
/* push to workers */
pushToWorkers(newSampleUnexplainedVarianceAdmixed, (arr, cb) -> cb.cloneWithUpdatedPrimitive(CoverageModelEMComputeBlock.CoverageModelICGCacheNode.gamma_s, newSampleUnexplainedVarianceAdmixed));
final int iterations = (int) (numberOfEvaluations.stream().mapToDouble(d -> d).sum() / numSamples);
return SubroutineSignal.builder().put(StandardSubroutineSignals.RESIDUAL_ERROR_NORM, errorNormInfinity).put(StandardSubroutineSignals.ITERATIONS, iterations).build();
}
Aggregations