use of org.apache.commons.math3.fraction.Fraction in project GDSC-SMLM by aherbert.
the class PCPALMMolecules method performDistanceAnalysis.
private void performDistanceAnalysis(double[][] intraHist, int p99) {
// We want to know the fraction of distances between molecules at the 99th percentile
// that are intra- rather than inter-molecule.
// Do single linkage clustering of closest pair at this distance and count the number of
// links that are inter and intra.
// Convert molecules for clustering
ArrayList<ClusterPoint> points = new ArrayList<ClusterPoint>(molecules.size());
for (Molecule m : molecules) // Precision was used to store the molecule ID
points.add(ClusterPoint.newClusterPoint((int) m.precision, m.x, m.y, m.photons));
ClusteringEngine engine = new ClusteringEngine(Prefs.getThreads(), ClusteringAlgorithm.PARTICLE_SINGLE_LINKAGE, new IJTrackProgress());
IJ.showStatus("Clustering to check inter-molecule distances");
engine.setTrackJoins(true);
ArrayList<Cluster> clusters = engine.findClusters(points, intraHist[0][p99]);
IJ.showStatus("");
if (clusters != null) {
double[] intraIdDistances = engine.getIntraIdDistances();
double[] interIdDistances = engine.getInterIdDistances();
int all = interIdDistances.length + intraIdDistances.length;
log(" * Fraction of inter-molecule particle linkage @ %s nm = %s %%", Utils.rounded(intraHist[0][p99], 4), (all > 0) ? Utils.rounded(100.0 * interIdDistances.length / all, 4) : "0");
// Show a double cumulative histogram plot
double[][] intraIdHist = Maths.cumulativeHistogram(intraIdDistances, false);
double[][] interIdHist = Maths.cumulativeHistogram(interIdDistances, false);
// Plot
String title = TITLE + " molecule linkage distance";
Plot2 plot = new Plot2(title, "Distance", "Frequency", intraIdHist[0], intraIdHist[1]);
double max = (intraIdHist[1].length > 0) ? intraIdHist[1][intraIdHist[1].length - 1] : 0;
if (interIdHist[1].length > 0)
max = FastMath.max(max, interIdHist[1][interIdHist[1].length - 1]);
plot.setLimits(0, intraIdHist[0][intraIdHist[0].length - 1], 0, max);
plot.setColor(Color.blue);
plot.addPoints(interIdHist[0], interIdHist[1], Plot2.LINE);
plot.setColor(Color.black);
Utils.display(title, plot);
} else {
log("Aborted clustering to check inter-molecule distances");
}
}
use of org.apache.commons.math3.fraction.Fraction in project GDSC-SMLM by aherbert.
the class PCPALMMolecules method runSimulation.
private void runSimulation(boolean resultsAvailable) {
if (resultsAvailable && !showSimulationDialog())
return;
startLog();
log("Simulation parameters");
if (blinkingDistribution == 3) {
log(" - Clusters = %d", nMolecules);
log(" - Simulation size = %s um", Utils.rounded(simulationSize, 4));
log(" - Molecules/cluster = %s", Utils.rounded(blinkingRate, 4));
log(" - Blinking distribution = %s", BLINKING_DISTRIBUTION[blinkingDistribution]);
log(" - p-Value = %s", Utils.rounded(p, 4));
} else {
log(" - Molecules = %d", nMolecules);
log(" - Simulation size = %s um", Utils.rounded(simulationSize, 4));
log(" - Blinking rate = %s", Utils.rounded(blinkingRate, 4));
log(" - Blinking distribution = %s", BLINKING_DISTRIBUTION[blinkingDistribution]);
}
log(" - Average precision = %s nm", Utils.rounded(sigmaS, 4));
log(" - Clusters simulation = " + CLUSTER_SIMULATION[clusterSimulation]);
if (clusterSimulation > 0) {
log(" - Cluster number = %s +/- %s", Utils.rounded(clusterNumber, 4), Utils.rounded(clusterNumberSD, 4));
log(" - Cluster radius = %s nm", Utils.rounded(clusterRadius, 4));
}
final double nmPerPixel = 100;
double width = simulationSize * 1000.0;
// Allow a border of 3 x sigma for +/- precision
//if (blinkingRate > 1)
width -= 3 * sigmaS;
RandomGenerator randomGenerator = new Well19937c(System.currentTimeMillis() + System.identityHashCode(this));
RandomDataGenerator dataGenerator = new RandomDataGenerator(randomGenerator);
UniformDistribution dist = new UniformDistribution(null, new double[] { width, width, 0 }, randomGenerator.nextInt());
molecules = new ArrayList<Molecule>(nMolecules);
// Create some dummy results since the calibration is required for later analysis
results = new MemoryPeakResults();
results.setCalibration(new gdsc.smlm.results.Calibration(nmPerPixel, 1, 100));
results.setSource(new NullSource("Molecule Simulation"));
results.begin();
int count = 0;
// Generate a sequence of coordinates
ArrayList<double[]> xyz = new ArrayList<double[]>((int) (nMolecules * 1.1));
Statistics statsRadius = new Statistics();
Statistics statsSize = new Statistics();
String maskTitle = TITLE + " Cluster Mask";
ByteProcessor bp = null;
double maskScale = 0;
if (clusterSimulation > 0) {
// Simulate clusters.
// Note: In the Veatch et al. paper (Plos 1, e31457) correlation functions are built using circles
// with small radii of 4-8 Arbitrary Units (AU) or large radii of 10-30 AU. A fluctuations model is
// created at T = 1.075 Tc. It is not clear exactly how the particles are distributed.
// It may be that a mask is created first using the model. The particles are placed on the mask using
// a specified density. This simulation produces a figure to show either a damped cosine function
// (circles) or an exponential (fluctuations). The number of particles in each circle may be randomly
// determined just by density. The figure does not discuss the derivation of the cluster size
// statistic.
//
// If this plugin simulation is run with a uniform distribution and blinking rate of 1 then the damped
// cosine function is reproduced. The curve crosses g(r)=1 at a value equivalent to the average
// distance to the centre-of-mass of each drawn cluster, not the input cluster radius parameter (which
// is a hard upper limit on the distance to centre).
final int maskSize = lowResolutionImageSize;
int[] mask = null;
// scale is in nm/pixel
maskScale = width / maskSize;
ArrayList<double[]> clusterCentres = new ArrayList<double[]>();
int totalSteps = 1 + (int) Math.ceil(nMolecules / clusterNumber);
if (clusterSimulation == 2 || clusterSimulation == 3) {
// Clusters are non-overlapping circles
// Ensure the circles do not overlap by using an exclusion mask that accumulates
// out-of-bounds pixels by drawing the last cluster (plus some border) on an image. When no
// more pixels are available then stop generating molecules.
// This is done by cumulatively filling a mask and using the MaskDistribution to select
// a new point. This may be slow but it works.
// TODO - Allow clusters of different sizes...
mask = new int[maskSize * maskSize];
Arrays.fill(mask, 255);
MaskDistribution maskDistribution = new MaskDistribution(mask, maskSize, maskSize, 0, maskScale, maskScale, randomGenerator);
double[] centre;
IJ.showStatus("Computing clusters mask");
int roiRadius = (int) Math.round((clusterRadius * 2) / maskScale);
if (clusterSimulation == 3) {
// Generate a mask of circles then sample from that.
// If we want to fill the mask completely then adjust the total steps to be the number of
// circles that can fit inside the mask.
totalSteps = (int) (maskSize * maskSize / (Math.PI * Math.pow(clusterRadius / maskScale, 2)));
}
while ((centre = maskDistribution.next()) != null && clusterCentres.size() < totalSteps) {
IJ.showProgress(clusterCentres.size(), totalSteps);
// The mask returns the coordinates with the centre of the image at 0,0
centre[0] += width / 2;
centre[1] += width / 2;
clusterCentres.add(centre);
// Fill in the mask around the centre to exclude any more circles that could overlap
double cx = centre[0] / maskScale;
double cy = centre[1] / maskScale;
fillMask(mask, maskSize, (int) cx, (int) cy, roiRadius, 0);
//Utils.display("Mask", new ColorProcessor(maskSize, maskSize, mask));
try {
maskDistribution = new MaskDistribution(mask, maskSize, maskSize, 0, maskScale, maskScale, randomGenerator);
} catch (IllegalArgumentException e) {
// This can happen when there are no more non-zero pixels
log("WARNING: No more room for clusters on the mask area (created %d of estimated %d)", clusterCentres.size(), totalSteps);
break;
}
}
IJ.showProgress(1);
IJ.showStatus("");
} else {
// Pick centres randomly from the distribution
while (clusterCentres.size() < totalSteps) clusterCentres.add(dist.next());
}
if (showClusterMask || clusterSimulation == 3) {
// Show the mask for the clusters
if (mask == null)
mask = new int[maskSize * maskSize];
else
Arrays.fill(mask, 0);
int roiRadius = (int) Math.round((clusterRadius) / maskScale);
for (double[] c : clusterCentres) {
double cx = c[0] / maskScale;
double cy = c[1] / maskScale;
fillMask(mask, maskSize, (int) cx, (int) cy, roiRadius, 1);
}
if (clusterSimulation == 3) {
// We have the mask. Now pick points at random from the mask.
MaskDistribution maskDistribution = new MaskDistribution(mask, maskSize, maskSize, 0, maskScale, maskScale, randomGenerator);
// Allocate each molecule position to a parent circle so defining clusters.
int[][] clusters = new int[clusterCentres.size()][];
int[] clusterSize = new int[clusters.length];
for (int i = 0; i < nMolecules; i++) {
double[] centre = maskDistribution.next();
// The mask returns the coordinates with the centre of the image at 0,0
centre[0] += width / 2;
centre[1] += width / 2;
xyz.add(centre);
// Output statistics on cluster size and number.
// TODO - Finding the closest cluster could be done better than an all-vs-all comparison
double max = distance2(centre, clusterCentres.get(0));
int cluster = 0;
for (int j = 1; j < clusterCentres.size(); j++) {
double d2 = distance2(centre, clusterCentres.get(j));
if (d2 < max) {
max = d2;
cluster = j;
}
}
// Assign point i to cluster
centre[2] = cluster;
if (clusterSize[cluster] == 0) {
clusters[cluster] = new int[10];
}
if (clusters[cluster].length <= clusterSize[cluster]) {
clusters[cluster] = Arrays.copyOf(clusters[cluster], (int) (clusters[cluster].length * 1.5));
}
clusters[cluster][clusterSize[cluster]++] = i;
}
// Generate real cluster size statistics
for (int j = 0; j < clusterSize.length; j++) {
final int size = clusterSize[j];
if (size == 0)
continue;
statsSize.add(size);
if (size == 1) {
statsRadius.add(0);
continue;
}
// Find centre of cluster and add the distance to each point
double[] com = new double[2];
for (int n = 0; n < size; n++) {
double[] xy = xyz.get(clusters[j][n]);
for (int k = 0; k < 2; k++) com[k] += xy[k];
}
for (int k = 0; k < 2; k++) com[k] /= size;
for (int n = 0; n < size; n++) {
double dx = xyz.get(clusters[j][n])[0] - com[0];
double dy = xyz.get(clusters[j][n])[1] - com[1];
statsRadius.add(Math.sqrt(dx * dx + dy * dy));
}
}
}
if (showClusterMask) {
bp = new ByteProcessor(maskSize, maskSize);
for (int i = 0; i < mask.length; i++) if (mask[i] != 0)
bp.set(i, 128);
Utils.display(maskTitle, bp);
}
}
// Use the simulated cluster centres to create clusters of the desired size
if (clusterSimulation == 1 || clusterSimulation == 2) {
for (double[] clusterCentre : clusterCentres) {
int clusterN = (int) Math.round((clusterNumberSD > 0) ? dataGenerator.nextGaussian(clusterNumber, clusterNumberSD) : clusterNumber);
if (clusterN < 1)
continue;
//double[] clusterCentre = dist.next();
if (clusterN == 1) {
// No need for a cluster around a point
xyz.add(clusterCentre);
statsRadius.add(0);
statsSize.add(1);
} else {
// Generate N random points within a circle of the chosen cluster radius.
// Locate the centre-of-mass and the average distance to the centre.
double[] com = new double[3];
int j = 0;
while (j < clusterN) {
// Generate a random point within a circle uniformly
// http://stackoverflow.com/questions/5837572/generate-a-random-point-within-a-circle-uniformly
double t = 2.0 * Math.PI * randomGenerator.nextDouble();
double u = randomGenerator.nextDouble() + randomGenerator.nextDouble();
double r = clusterRadius * ((u > 1) ? 2 - u : u);
double x = r * Math.cos(t);
double y = r * Math.sin(t);
double[] xy = new double[] { clusterCentre[0] + x, clusterCentre[1] + y };
xyz.add(xy);
for (int k = 0; k < 2; k++) com[k] += xy[k];
j++;
}
// Add the distance of the points from the centre of the cluster.
// Note this does not account for the movement due to precision.
statsSize.add(j);
if (j == 1) {
statsRadius.add(0);
} else {
for (int k = 0; k < 2; k++) com[k] /= j;
while (j > 0) {
double dx = xyz.get(xyz.size() - j)[0] - com[0];
double dy = xyz.get(xyz.size() - j)[1] - com[1];
statsRadius.add(Math.sqrt(dx * dx + dy * dy));
j--;
}
}
}
}
}
} else {
// Random distribution
for (int i = 0; i < nMolecules; i++) xyz.add(dist.next());
}
// The Gaussian sigma should be applied so the overall distance from the centre
// ( sqrt(x^2+y^2) ) has a standard deviation of sigmaS?
final double sigma1D = sigmaS / Math.sqrt(2);
// Show optional histograms
StoredDataStatistics intraDistances = null;
StoredData blinks = null;
if (showHistograms) {
int capacity = (int) (xyz.size() * blinkingRate);
intraDistances = new StoredDataStatistics(capacity);
blinks = new StoredData(capacity);
}
Statistics statsSigma = new Statistics();
for (int i = 0; i < xyz.size(); i++) {
int nOccurrences = getBlinks(dataGenerator, blinkingRate);
if (showHistograms)
blinks.add(nOccurrences);
final int size = molecules.size();
// Get coordinates in nm
final double[] moleculeXyz = xyz.get(i);
if (bp != null && nOccurrences > 0) {
bp.putPixel((int) Math.round(moleculeXyz[0] / maskScale), (int) Math.round(moleculeXyz[1] / maskScale), 255);
}
while (nOccurrences-- > 0) {
final double[] localisationXy = Arrays.copyOf(moleculeXyz, 2);
// Add random precision
if (sigma1D > 0) {
final double dx = dataGenerator.nextGaussian(0, sigma1D);
final double dy = dataGenerator.nextGaussian(0, sigma1D);
localisationXy[0] += dx;
localisationXy[1] += dy;
if (!dist.isWithinXY(localisationXy))
continue;
// Calculate mean-squared displacement
statsSigma.add(dx * dx + dy * dy);
}
final double x = localisationXy[0];
final double y = localisationXy[1];
molecules.add(new Molecule(x, y, i, 1));
// Store in pixels
float[] params = new float[7];
params[Gaussian2DFunction.X_POSITION] = (float) (x / nmPerPixel);
params[Gaussian2DFunction.Y_POSITION] = (float) (y / nmPerPixel);
results.addf(i + 1, (int) x, (int) y, 0, 0, 0, params, null);
}
if (molecules.size() > size) {
count++;
if (showHistograms) {
int newCount = molecules.size() - size;
if (newCount == 1) {
//intraDistances.add(0);
continue;
}
// Get the distance matrix between these molecules
double[][] matrix = new double[newCount][newCount];
for (int ii = size, x = 0; ii < molecules.size(); ii++, x++) {
for (int jj = size + 1, y = 1; jj < molecules.size(); jj++, y++) {
final double d2 = molecules.get(ii).distance2(molecules.get(jj));
matrix[x][y] = matrix[y][x] = d2;
}
}
// Get the maximum distance for particle linkage clustering of this molecule
double max = 0;
for (int x = 0; x < newCount; x++) {
// Compare to all-other molecules and get the minimum distance
// needed to join at least one
double linkDistance = Double.POSITIVE_INFINITY;
for (int y = 0; y < newCount; y++) {
if (x == y)
continue;
if (matrix[x][y] < linkDistance)
linkDistance = matrix[x][y];
}
// Check if this is larger
if (max < linkDistance)
max = linkDistance;
}
intraDistances.add(Math.sqrt(max));
}
}
}
results.end();
if (bp != null)
Utils.display(maskTitle, bp);
// Used for debugging
//System.out.printf(" * Molecules = %d (%d activated)\n", xyz.size(), count);
//if (clusterSimulation > 0)
// System.out.printf(" * Cluster number = %s +/- %s. Radius = %s +/- %s\n",
// Utils.rounded(statsSize.getMean(), 4), Utils.rounded(statsSize.getStandardDeviation(), 4),
// Utils.rounded(statsRadius.getMean(), 4), Utils.rounded(statsRadius.getStandardDeviation(), 4));
log("Simulation results");
log(" * Molecules = %d (%d activated)", xyz.size(), count);
log(" * Blinking rate = %s", Utils.rounded((double) molecules.size() / xyz.size(), 4));
log(" * Precision (Mean-displacement) = %s nm", (statsSigma.getN() > 0) ? Utils.rounded(Math.sqrt(statsSigma.getMean()), 4) : "0");
if (showHistograms) {
if (intraDistances.getN() == 0) {
log(" * Mean Intra-Molecule particle linkage distance = 0 nm");
log(" * Fraction of inter-molecule particle linkage @ 0 nm = 0 %%");
} else {
plot(blinks, "Blinks/Molecule", true);
double[][] intraHist = plot(intraDistances, "Intra-molecule particle linkage distance", false);
// Determine 95th and 99th percentile
int p99 = intraHist[0].length - 1;
double limit1 = 0.99 * intraHist[1][p99];
double limit2 = 0.95 * intraHist[1][p99];
while (intraHist[1][p99] > limit1 && p99 > 0) p99--;
int p95 = p99;
while (intraHist[1][p95] > limit2 && p95 > 0) p95--;
log(" * Mean Intra-Molecule particle linkage distance = %s nm (95%% = %s, 99%% = %s, 100%% = %s)", Utils.rounded(intraDistances.getMean(), 4), Utils.rounded(intraHist[0][p95], 4), Utils.rounded(intraHist[0][p99], 4), Utils.rounded(intraHist[0][intraHist[0].length - 1], 4));
if (distanceAnalysis) {
performDistanceAnalysis(intraHist, p99);
}
}
}
if (clusterSimulation > 0) {
log(" * Cluster number = %s +/- %s", Utils.rounded(statsSize.getMean(), 4), Utils.rounded(statsSize.getStandardDeviation(), 4));
log(" * Cluster radius = %s +/- %s nm (mean distance to centre-of-mass)", Utils.rounded(statsRadius.getMean(), 4), Utils.rounded(statsRadius.getStandardDeviation(), 4));
}
}
use of org.apache.commons.math3.fraction.Fraction in project gatk by broadinstitute.
the class AlleleFractionInitializer method initialMinorFractions.
/**
* Initialize minor fractions assuming no allelic bias <p></p>
*
* We integrate over f to get posterior probabilities (responsibilities) of alt / ref minor
* that is, responsibility of alt minor is int_{0 to 1/2} f^a (1-f)^r df
* responsibility of ref minor is int_{0 to 1/2} f^r (1-f)^a df
* these are proportional to I(1/2, a + 1, r + 1) and I(1/2, r + 1, a + 1),
* respectively, where I is the (incomplete) regularized Beta function.
* By definition these likelihoods sum to 1, ie they are already normalized. <p></p>
*
* Finally, we set each minor fraction to the responsibility-weighted total count of
* reads in minor allele divided by total reads, ignoring outliers.
*/
private AlleleFractionState.MinorFractions initialMinorFractions(final AlleleFractionData data) {
final int numSegments = data.getNumSegments();
final AlleleFractionState.MinorFractions result = new AlleleFractionState.MinorFractions(numSegments);
for (int segment = 0; segment < numSegments; segment++) {
double responsibilityWeightedMinorAlleleReadCount = 0.0;
double responsibilityWeightedTotalReadCount = 0.0;
for (final AllelicCount count : data.getCountsInSegment(segment)) {
final int a = count.getAltReadCount();
final int r = count.getRefReadCount();
double altMinorResponsibility;
try {
altMinorResponsibility = Beta.regularizedBeta(0.5, a + 1, r + 1);
} catch (final MaxCountExceededException e) {
//if the special function can't be computed, give an all-or-nothing responsibility
altMinorResponsibility = a < r ? 1.0 : 0.0;
}
responsibilityWeightedMinorAlleleReadCount += altMinorResponsibility * a + (1 - altMinorResponsibility) * r;
responsibilityWeightedTotalReadCount += a + r;
}
// we achieve a flat prior via a single pseudocount for minor and non-minor reads, hence the +1 and +2
result.add((responsibilityWeightedMinorAlleleReadCount + 1) / (responsibilityWeightedTotalReadCount + 2));
}
return result;
}
use of org.apache.commons.math3.fraction.Fraction in project gatk by broadinstitute.
the class StrandArtifact method annotate.
@Override
public void annotate(final ReferenceContext ref, final VariantContext vc, final Genotype g, final GenotypeBuilder gb, final ReadLikelihoods<Allele> likelihoods) {
Utils.nonNull(gb);
Utils.nonNull(vc);
Utils.nonNull(likelihoods);
// do not annotate the genotype fields for normal
if (g.isHomRef()) {
return;
}
pi.put(NO_ARTIFACT, 0.95);
pi.put(ART_FWD, 0.025);
pi.put(ART_REV, 0.025);
// We use the allele with highest LOD score
final double[] tumorLods = GATKProtectedVariantContextUtils.getAttributeAsDoubleArray(vc, GATKVCFConstants.TUMOR_LOD_KEY, () -> null, -1);
final int indexOfMaxTumorLod = MathUtils.maxElementIndex(tumorLods);
final Allele altAlelle = vc.getAlternateAllele(indexOfMaxTumorLod);
final Collection<ReadLikelihoods<Allele>.BestAllele<Allele>> bestAlleles = likelihoods.bestAlleles(g.getSampleName());
final int numFwdAltReads = (int) bestAlleles.stream().filter(ba -> !ba.read.isReverseStrand() && ba.isInformative() && ba.allele.equals(altAlelle)).count();
final int numRevAltReads = (int) bestAlleles.stream().filter(ba -> ba.read.isReverseStrand() && ba.isInformative() && ba.allele.equals(altAlelle)).count();
final int numFwdReads = (int) bestAlleles.stream().filter(ba -> !ba.read.isReverseStrand() && ba.isInformative()).count();
final int numRevReads = (int) bestAlleles.stream().filter(ba -> ba.read.isReverseStrand() && ba.isInformative()).count();
final int numAltReads = numFwdAltReads + numRevAltReads;
final int numReads = numFwdReads + numRevReads;
final EnumMap<StrandArtifactZ, Double> unnormalized_posterior_probabilities = new EnumMap<>(StrandArtifactZ.class);
final EnumMap<StrandArtifactZ, Double> maximum_a_posteriori_allele_fraction_estimates = new EnumMap<>(StrandArtifactZ.class);
/*** Compute the posterior probability of ARTIFACT_FWD and ARTIFACT_REV; it's a double integral over f and epsilon ***/
// the integrand is a polynomial of degree n, where n is the number of reads at the locus
// thus to integrate exactly with Gauss-Legendre we need (n/2)+1 points
final int numIntegPointsForAlleleFraction = numReads / 2 + 1;
final int numIntegPointsForEpsilon = (numReads + ALPHA + BETA - 2) / 2 + 1;
final double likelihoodForArtifactFwd = IntegrationUtils.integrate2d((f, epsilon) -> getIntegrandGivenArtifact(f, epsilon, numFwdReads, numRevReads, numFwdAltReads, numRevAltReads), 0.0, 1.0, numIntegPointsForAlleleFraction, 0.0, 1.0, numIntegPointsForEpsilon);
final double likelihoodForArtifactRev = IntegrationUtils.integrate2d((f, epsilon) -> getIntegrandGivenArtifact(f, epsilon, numRevReads, numFwdReads, numRevAltReads, numFwdAltReads), 0.0, 1.0, numIntegPointsForAlleleFraction, 0.0, 1.0, numIntegPointsForEpsilon);
unnormalized_posterior_probabilities.put(ART_FWD, pi.get(ART_FWD) * likelihoodForArtifactFwd);
unnormalized_posterior_probabilities.put(ART_REV, pi.get(ART_REV) * likelihoodForArtifactRev);
/*** Compute the posterior probability of NO_ARTIFACT; evaluate a single integral over the allele fraction ***/
final double likelihoodForNoArtifact = IntegrationUtils.integrate(f -> getIntegrandGivenNoArtifact(f, numFwdReads, numRevReads, numFwdAltReads, numRevAltReads), 0.0, 1.0, numIntegPointsForAlleleFraction);
unnormalized_posterior_probabilities.put(NO_ARTIFACT, pi.get(NO_ARTIFACT) * likelihoodForNoArtifact);
final double[] posterior_probabilities = MathUtils.normalizeFromRealSpace(unnormalized_posterior_probabilities.values().stream().mapToDouble(Double::doubleValue).toArray());
/*** Compute the maximum a posteriori estimate for allele fraction given strand artifact ***/
// For a fixed f, integrate the double integral over epsilons. This gives us the likelihood p(x^+, x^- | f, z) for a fixed f, which is proportional to
// the posterior probability of p(f | x^+, x^-, z)
final int numSamplePoints = 100;
final double[] samplePoints = GATKProtectedMathUtils.createEvenlySpacedPoints(0.0, 1.0, numSamplePoints);
double[] likelihoodsGivenForwardArtifact = new double[numSamplePoints];
double[] likelihoodsGivenReverseArtifact = new double[numSamplePoints];
for (int i = 0; i < samplePoints.length; i++) {
final double f = samplePoints[i];
likelihoodsGivenForwardArtifact[i] = IntegrationUtils.integrate(epsilon -> getIntegrandGivenArtifact(f, epsilon, numFwdReads, numRevReads, numFwdAltReads, numRevAltReads), 0.0, 1.0, numIntegPointsForEpsilon);
likelihoodsGivenReverseArtifact[i] = IntegrationUtils.integrate(epsilon -> getIntegrandGivenArtifact(f, epsilon, numRevReads, numFwdReads, numRevAltReads, numFwdAltReads), 0.0, 1.0, numIntegPointsForEpsilon);
}
final int maxAlleleFractionIndexFwd = MathUtils.maxElementIndex(likelihoodsGivenForwardArtifact);
final int maxAlleleFractionIndexRev = MathUtils.maxElementIndex(likelihoodsGivenReverseArtifact);
maximum_a_posteriori_allele_fraction_estimates.put(ART_FWD, samplePoints[maxAlleleFractionIndexFwd]);
maximum_a_posteriori_allele_fraction_estimates.put(ART_REV, samplePoints[maxAlleleFractionIndexRev]);
// In the absence of strand artifact, MAP estimate for f reduces to the sample alt allele fraction
maximum_a_posteriori_allele_fraction_estimates.put(NO_ARTIFACT, (double) numAltReads / numReads);
gb.attribute(POSTERIOR_PROBABILITIES_KEY, posterior_probabilities);
gb.attribute(MAP_ALLELE_FRACTIONS_KEY, maximum_a_posteriori_allele_fraction_estimates.values().stream().mapToDouble(Double::doubleValue).toArray());
}
use of org.apache.commons.math3.fraction.Fraction in project gatk-protected by broadinstitute.
the class AlleleFractionInitializer method initialMinorFractions.
/**
* Initialize minor fractions assuming no allelic bias <p></p>
*
* We integrate over f to get posterior probabilities (responsibilities) of alt / ref minor
* that is, responsibility of alt minor is int_{0 to 1/2} f^a (1-f)^r df
* responsibility of ref minor is int_{0 to 1/2} f^r (1-f)^a df
* these are proportional to I(1/2, a + 1, r + 1) and I(1/2, r + 1, a + 1),
* respectively, where I is the (incomplete) regularized Beta function.
* By definition these likelihoods sum to 1, ie they are already normalized. <p></p>
*
* Finally, we set each minor fraction to the responsibility-weighted total count of
* reads in minor allele divided by total reads, ignoring outliers.
*/
private AlleleFractionState.MinorFractions initialMinorFractions(final AlleleFractionData data) {
final int numSegments = data.getNumSegments();
final AlleleFractionState.MinorFractions result = new AlleleFractionState.MinorFractions(numSegments);
for (int segment = 0; segment < numSegments; segment++) {
double responsibilityWeightedMinorAlleleReadCount = 0.0;
double responsibilityWeightedTotalReadCount = 0.0;
for (final AllelicCount count : data.getCountsInSegment(segment)) {
final int a = count.getAltReadCount();
final int r = count.getRefReadCount();
double altMinorResponsibility;
try {
altMinorResponsibility = Beta.regularizedBeta(0.5, a + 1, r + 1);
} catch (final MaxCountExceededException e) {
//if the special function can't be computed, give an all-or-nothing responsibility
altMinorResponsibility = a < r ? 1.0 : 0.0;
}
responsibilityWeightedMinorAlleleReadCount += altMinorResponsibility * a + (1 - altMinorResponsibility) * r;
responsibilityWeightedTotalReadCount += a + r;
}
// we achieve a flat prior via a single pseudocount for minor and non-minor reads, hence the +1 and +2
result.add((responsibilityWeightedMinorAlleleReadCount + 1) / (responsibilityWeightedTotalReadCount + 2));
}
return result;
}
Aggregations