use of uk.ac.babraham.SeqMonk.DataTypes.Probes.ProbeSet in project SeqMonk by s-andrews.
the class DeduplicationProbeGenerator method run.
/* (non-Javadoc)
* @see java.lang.Runnable#run()
*/
public void run() {
Chromosome[] chromosomes = collection.genome().getAllChromosomes();
boolean separateStrands = separateStrandsBox.isSelected();
int maxDistance = 0;
if (maxDistanceField.getText().length() > 0) {
maxDistance = Integer.parseInt(maxDistanceField.getText());
}
Vector<Probe> newProbes = new Vector<Probe>();
for (int c = 0; c < chromosomes.length; c++) {
// Time for an update
updateGenerationProgress("Processed " + c + " chromosomes", c, chromosomes.length);
Probe[] startingProbes = initialList.getProbesForChromosome(chromosomes[c]);
// For directional merging we use 3 probes
Probe currentForward = null;
Probe currentReverse = null;
Probe currentUnknown = null;
// For non-directional merging we use only one
Probe currentProbe = null;
// Now we can make the actual probes
for (int i = 0; i < startingProbes.length; i++) {
if (cancel) {
generationCancelled();
return;
}
if (separateStrands) {
switch(startingProbes[i].strand()) {
case (Probe.FORWARD):
if (currentForward == null) {
currentForward = new Probe(chromosomes[c], startingProbes[i].packedPosition());
} else if (startingProbes[i].start() <= currentForward.end() + maxDistance) {
if (startingProbes[i].end() > currentForward.end()) {
// Extend the current probe
currentForward = new Probe(chromosomes[c], currentForward.start(), startingProbes[i].end(), currentForward.strand(), currentForward.name());
}
} else {
newProbes.add(currentForward);
currentForward = new Probe(chromosomes[c], startingProbes[i].packedPosition());
}
break;
case (Probe.REVERSE):
if (currentReverse == null) {
currentReverse = new Probe(chromosomes[c], startingProbes[i].packedPosition());
} else if (startingProbes[i].start() <= currentReverse.end() + maxDistance) {
if (startingProbes[i].end() > currentReverse.end()) {
// Extend the current probe
currentReverse = new Probe(chromosomes[c], currentReverse.start(), startingProbes[i].end(), currentReverse.strand(), currentReverse.name());
}
} else {
newProbes.add(currentReverse);
currentReverse = new Probe(chromosomes[c], startingProbes[i].packedPosition());
}
break;
case (Probe.UNKNOWN):
if (currentUnknown == null) {
currentUnknown = new Probe(chromosomes[c], startingProbes[i].packedPosition());
} else if (startingProbes[i].start() <= currentUnknown.end() + maxDistance) {
if (startingProbes[i].end() > currentUnknown.end()) {
// Extend the current probe
currentUnknown = new Probe(chromosomes[c], currentUnknown.start(), startingProbes[i].end(), currentUnknown.strand(), currentUnknown.name());
}
} else {
newProbes.add(currentUnknown);
currentUnknown = new Probe(chromosomes[c], startingProbes[i].packedPosition());
}
break;
}
} else {
if (currentProbe == null) {
currentProbe = new Probe(chromosomes[c], startingProbes[i].packedPosition());
} else if (startingProbes[i].start() <= currentProbe.end() + maxDistance) {
if (startingProbes[i].end() > currentProbe.end() || startingProbes[i].strand() != currentProbe.strand()) {
// Update the current probe
int usedStrand = currentProbe.strand();
if (startingProbes[i].strand() != currentProbe.strand()) {
usedStrand = Probe.UNKNOWN;
}
currentProbe = new Probe(chromosomes[c], currentProbe.start(), Math.max(startingProbes[i].end(), currentProbe.end()), usedStrand, currentProbe.name());
}
} else {
newProbes.add(currentProbe);
currentProbe = new Probe(chromosomes[c], startingProbes[i].packedPosition());
}
}
}
// At the end of each chromosome we add any probes we have remaining
if (currentProbe != null)
newProbes.add(currentProbe);
if (currentForward != null)
newProbes.add(currentForward);
if (currentReverse != null)
newProbes.add(currentReverse);
if (currentUnknown != null)
newProbes.add(currentUnknown);
}
Probe[] finalList = newProbes.toArray(new Probe[0]);
ProbeSet finalSet = new ProbeSet(getDescription(), finalList);
generationComplete(finalSet);
}
use of uk.ac.babraham.SeqMonk.DataTypes.Probes.ProbeSet in project SeqMonk by s-andrews.
the class FeatureProbeGenerator method run.
/* (non-Javadoc)
* @see java.lang.Runnable#run()
*/
public void run() {
// We get the problem here that we won't get per chromosome updates, but this should be
// so quick I don't think we care:
updateGenerationProgress("Making probes...", 0, 1);
Probe[] finalList = optionPanel.getProbes();
if (removeDuplicates) {
// System.out.println("Removing duplicates from original list of "+finalList.length+" probes");
finalList = removeDuplicates(finalList);
// System.out.println("Unique list has "+finalList.length+" probes");
}
ProbeSet finalSet = new ProbeSet(getDescription(), finalList);
generationComplete(finalSet);
}
use of uk.ac.babraham.SeqMonk.DataTypes.Probes.ProbeSet in project SeqMonk by s-andrews.
the class MacsPeakCaller method run.
public void run() {
// for (int i=0;i<selectedChIPStores.length;i++) {
// System.err.println("Selcted ChIP="+selectedChIPStores[i]);
// }
// for (int i=0;i<selectedInputStores.length;i++) {
// System.err.println("Selcted Input="+selectedInputStores[i]);
// }
// First find the tag offsets between the watson and crick strands
// Work out the total average coverage for all of the combined ChIP samples
long totalChIPCoverage = 0;
for (int i = 0; i < selectedChIPStores.length; i++) {
totalChIPCoverage += selectedChIPStores[i].getTotalReadLength();
}
if (cancel) {
generationCancelled();
return;
}
double averageChIPCoveragePerBase = totalChIPCoverage / (double) collection.genome().getTotalGenomeLength();
double lowerCoverage = averageChIPCoveragePerBase * minFoldEnrichment;
double upperCoverage = averageChIPCoveragePerBase * maxFoldEnrichment;
System.err.println("Coverage range for high confidence peaks is " + lowerCoverage + " - " + upperCoverage);
// Now we go through the data to find locations for our high confidence peaks so we can
// randomly select 1000 of these to use to find the offset between the two strands
Chromosome[] chromosomes = collection.genome().getAllChromosomes();
Vector<Probe> potentialHighConfidencePeaks = new Vector<Probe>();
for (int c = 0; c < chromosomes.length; c++) {
if (cancel) {
generationCancelled();
return;
}
// Time for an update
updateGenerationProgress("Finding high confidence peaks on chromosome " + chromosomes[c].name(), c, chromosomes.length);
Probe lastValidProbe = null;
for (int startPosition = 1; startPosition < chromosomes[c].length() - fragmentSize; startPosition += fragmentSize / 2) {
// See if we need to quit
if (cancel) {
generationCancelled();
return;
}
long totalLength = 0;
Probe probe = new Probe(chromosomes[c], startPosition, startPosition + fragmentSize);
for (int s = 0; s < selectedChIPStores.length; s++) {
long[] reads = selectedChIPStores[s].getReadsForProbe(probe);
for (int j = 0; j < reads.length; j++) {
totalLength += SequenceRead.length(reads[j]);
}
}
if (totalLength >= (lowerCoverage * probe.length()) && totalLength <= upperCoverage * probe.length()) {
if (lastValidProbe != null && SequenceRead.overlaps(lastValidProbe.packedPosition(), probe.packedPosition())) {
lastValidProbe = new Probe(chromosomes[c], lastValidProbe.start(), probe.end());
} else if (lastValidProbe != null) {
// Check that the overall density over the region falls within our limits
totalLength = 0;
for (int s = 0; s < selectedChIPStores.length; s++) {
long[] reads = selectedChIPStores[s].getReadsForProbe(lastValidProbe);
for (int j = 0; j < reads.length; j++) {
totalLength += SequenceRead.length(reads[j]);
}
}
if (totalLength >= (lowerCoverage * lastValidProbe.length()) && totalLength <= upperCoverage * lastValidProbe.length()) {
potentialHighConfidencePeaks.add(lastValidProbe);
}
lastValidProbe = probe;
} else {
lastValidProbe = probe;
}
}
}
if (lastValidProbe != null) {
long totalLength = 0;
for (int s = 0; s < selectedChIPStores.length; s++) {
long[] reads = selectedChIPStores[s].getReadsForProbe(lastValidProbe);
for (int j = 0; j < reads.length; j++) {
totalLength += SequenceRead.length(reads[j]);
}
}
if (totalLength >= (lowerCoverage * lastValidProbe.length()) && totalLength <= upperCoverage * lastValidProbe.length()) {
potentialHighConfidencePeaks.add(lastValidProbe);
}
}
}
if (potentialHighConfidencePeaks.size() == 0) {
JOptionPane.showMessageDialog(SeqMonkApplication.getInstance(), "No high confidence peaks found", "Quitting generator", JOptionPane.INFORMATION_MESSAGE);
generationCancelled();
return;
}
// System.err.println("Found "+potentialHighConfidencePeaks.size()+" high confidence peaks");
// Now we select 1000 random probes from this set
Probe[] highConfidencePeaks = potentialHighConfidencePeaks.toArray(new Probe[0]);
Collections.shuffle(Arrays.asList(highConfidencePeaks));
Probe[] randomHighConfidenceProbes = new Probe[Math.min(highConfidencePeaks.length, 1000)];
for (int i = 0; i < randomHighConfidenceProbes.length; i++) {
randomHighConfidenceProbes[i] = highConfidencePeaks[i];
}
// Now find the average distance between forward / reverse reads in the candidate peaks
int[] distances = new int[highConfidencePeaks.length];
// Sort the candidates so we don't do stupid stuff with the cache
Arrays.sort(randomHighConfidenceProbes);
for (int p = 0; p < randomHighConfidenceProbes.length; p++) {
// See if we need to quit
if (cancel) {
generationCancelled();
return;
}
distances[p] = getInterStrandDistance(randomHighConfidenceProbes[p], selectedChIPStores);
}
int medianInterStrandDistance = (int) SimpleStats.median(distances);
if (medianInterStrandDistance < 0)
medianInterStrandDistance = 0;
// System.err.println("Median inter strand difference = "+medianInterStrandDistance);
// Now we find the depth cutoff for overrepresented single tags using a binomial distribution
int totalReadCount = 0;
for (int i = 0; i < selectedChIPStores.length; i++) {
totalReadCount += selectedChIPStores[i].getTotalReadCount();
}
BinomialDistribution bin = new BinomialDistribution(totalReadCount, 1d / collection.genome().getTotalGenomeLength());
// We want to know what depth has a chance of less than 1^-5
int redundantThreshold = bin.inverseCumulativeProbability(1 - 0.00001d);
if (redundantThreshold < 1)
redundantThreshold = 1;
// System.err.println("Redundancy threshold is "+redundantThreshold);
// Now we construct a poisson distribution to work out the threshold to use for
// constructing a full candidate peak set.
updateGenerationProgress("Counting non-redundant reads", 0, 1);
// To do this we need to get the full non-redundant length from the whole set
int totalNonRedCount = getNonRedundantReadCount(selectedChIPStores, redundantThreshold);
// System.err.println("Total non-redundant sequences is "+totalNonRedCount);
// We need to know the median read length for the data
int readLength = 0;
for (int i = 0; i < selectedChIPStores.length; i++) {
readLength += selectedChIPStores[i].getTotalReadLength() / selectedChIPStores[i].getTotalReadCount();
}
readLength /= selectedChIPStores.length;
double expectedCountsPerWindow = getExpectedCountPerWindow(totalNonRedCount, collection.genome().getTotalGenomeLength(), fragmentSize, readLength);
PoissonDistribution poisson = new PoissonDistribution(expectedCountsPerWindow);
int readCountCutoff = poisson.inverseCumulativeProbability(1 - pValue);
// System.err.println("Threshold for enrichment in a window is "+readCountCutoff+" reads using a p-value of "+pValue+" and a mean of "+(totalNonRedCount/(collection.genome().getTotalGenomeLength()/(double)fragmentSize)));
// Now we go back through the whole dataset to do a search for all possible candidate probes
// We re-use the peak vector we came up with before.
potentialHighConfidencePeaks.clear();
for (int c = 0; c < chromosomes.length; c++) {
// Time for an update
updateGenerationProgress("Finding candidate peaks on chromosome " + chromosomes[c].name(), c, chromosomes.length);
Probe lastValidProbe = null;
for (int startPosition = 1; startPosition < chromosomes[c].length() - fragmentSize; startPosition += fragmentSize / 2) {
// See if we need to quit
if (cancel) {
generationCancelled();
return;
}
// We expand the region we're looking at by the inter-strand distance as we're going to
// be adjusting the read positions
Probe probe = new Probe(chromosomes[c], startPosition, (startPosition + fragmentSize - 1));
long[] mergedProbeReads = getReadsFromDataStoreCollection(probe, selectedChIPStores, medianInterStrandDistance);
mergedProbeReads = deduplicateReads(mergedProbeReads, redundantThreshold);
SequenceRead.sort(mergedProbeReads);
int thisProbeOverlapCount = 0;
for (int i = 0; i < mergedProbeReads.length; i++) {
if (SequenceRead.overlaps(mergedProbeReads[i], probe.packedPosition())) {
++thisProbeOverlapCount;
}
}
if (thisProbeOverlapCount > readCountCutoff) {
if (lastValidProbe != null && SequenceRead.overlaps(lastValidProbe.packedPosition(), probe.packedPosition())) {
lastValidProbe = new Probe(chromosomes[c], lastValidProbe.start(), probe.end());
} else if (lastValidProbe != null) {
potentialHighConfidencePeaks.add(lastValidProbe);
lastValidProbe = probe;
} else {
lastValidProbe = probe;
}
}
}
if (lastValidProbe != null) {
potentialHighConfidencePeaks.add(lastValidProbe);
}
}
// Finally we re-filter the peaks we have using local poisson distributions with densities taken
// from either the input samples (if there are any), or the local region. The densities are
// estimated over 1,5 and 10kb around the peak and genome wide and the max of these is taken.
// If there is no input then the 1kb region is not used.
Probe[] allCandidateProbes = potentialHighConfidencePeaks.toArray(new Probe[0]);
// Work out which stores we're using to validate against.
DataStore[] validationStores;
boolean useInput = false;
double inputCorrection = 1;
int validationNonRedCount;
if (selectedInputStores != null && selectedInputStores.length > 0) {
// See if we need to quit
if (cancel) {
generationCancelled();
return;
}
validationStores = selectedInputStores;
useInput = true;
// We also need to work out the total number of nonredundant seqences
// in the input so we can work out a scaling factor so that the densities
// for input and chip are comparable.
validationNonRedCount = getNonRedundantReadCount(validationStores, redundantThreshold);
inputCorrection = totalNonRedCount / (double) validationNonRedCount;
System.err.println("From chip=" + totalNonRedCount + " input=" + validationNonRedCount + " correction is " + inputCorrection);
} else {
validationStores = selectedChIPStores;
validationNonRedCount = totalNonRedCount;
}
Vector<Probe> finalValidatedProbes = new Vector<Probe>();
for (int p = 0; p < allCandidateProbes.length; p++) {
// See if we need to quit
if (cancel) {
generationCancelled();
return;
}
if (p % 100 == 0) {
updateGenerationProgress("Validated " + p + " out of " + allCandidateProbes.length + " raw peaks", p, allCandidateProbes.length);
}
// System.err.println("Validating "+allCandidateProbes[p].chromosome()+":"+allCandidateProbes[p].start()+"-"+allCandidateProbes[p].end());
// We now need to find the maximum read density per 2*bandwidth against which
// we're going to validate this peak
// We're going to get all reads within 10kb of the peak, and then we can subselect from there
int midPoint = allCandidateProbes[p].middle();
Probe region10kb = new Probe(allCandidateProbes[p].chromosome(), Math.max(midPoint - 5000, 1), Math.min(midPoint + 4999, allCandidateProbes[p].chromosome().length()), allCandidateProbes[p].strand());
Probe region5kb = new Probe(allCandidateProbes[p].chromosome(), Math.max(midPoint - 2500, 1), Math.min(midPoint + 2499, allCandidateProbes[p].chromosome().length()), allCandidateProbes[p].strand());
Probe region1kb = new Probe(allCandidateProbes[p].chromosome(), Math.max(midPoint - 500, 1), Math.min(midPoint + 499, allCandidateProbes[p].chromosome().length()), allCandidateProbes[p].strand());
// Get the probes for the largest region
long[] thisRegionReads = getReadsFromDataStoreCollection(region10kb, validationStores, 0);
// Deduplicate so it's a fair comparison
// Should we recalculate the redundant threshold based on the input coverage?
thisRegionReads = deduplicateReads(thisRegionReads, redundantThreshold);
int region10kbcount = thisRegionReads.length;
int region5kbcount = 0;
int region1kbcount = 0;
// Go through the reads seeing if they fit into the 5 or 1kb regions
for (int r = 0; r < thisRegionReads.length; r++) {
if (SequenceRead.overlaps(region5kb.packedPosition(), thisRegionReads[r]))
++region5kbcount;
if (SequenceRead.overlaps(region1kb.packedPosition(), thisRegionReads[r]))
++region1kbcount;
}
// System.err.println("Input counts 10kb="+region10kbcount+" 5kb="+region5kbcount+" 1kb="+region1kbcount);
// Convert to densities per window and ajdust for global coverage
double globalDensity = getExpectedCountPerWindow(validationNonRedCount, collection.genome().getTotalGenomeLength(), allCandidateProbes[p].length(), readLength) * inputCorrection;
double density10kb = getExpectedCountPerWindow(region10kbcount, region10kb.length(), allCandidateProbes[p].length(), readLength) * inputCorrection;
double density5kb = getExpectedCountPerWindow(region5kbcount, region5kb.length(), allCandidateProbes[p].length(), readLength) * inputCorrection;
double density1kb = getExpectedCountPerWindow(region1kbcount, region1kb.length(), allCandidateProbes[p].length(), readLength) * inputCorrection;
// Find the highest density to use for the validation
double highestDensity = globalDensity;
if (density10kb > highestDensity)
highestDensity = density10kb;
if (density5kb > highestDensity)
highestDensity = density5kb;
if (useInput && density1kb > highestDensity)
highestDensity = density1kb;
// System.err.println("Global="+globalDensity+" 10kb="+density10kb+" 5kb="+density5kb+" 1kb="+density1kb+" using="+highestDensity);
// Construct a poisson distribution with this density
PoissonDistribution localPoisson = new PoissonDistribution(highestDensity);
// System.err.println("Cutoff from global="+(new PoissonDistribution(globalDensity)).inverseCumulativeProbability(1-pValue)+" 10kb="+(new PoissonDistribution(density10kb)).inverseCumulativeProbability(1-pValue)+" 5kb="+(new PoissonDistribution(density5kb)).inverseCumulativeProbability(1-pValue)+" 1kb="+(new PoissonDistribution(density1kb)).inverseCumulativeProbability(1-pValue));
// Now check to see if the actual count from this peak is enough to still pass
long[] mergedProbeReads = getReadsFromDataStoreCollection(allCandidateProbes[p], selectedChIPStores, medianInterStrandDistance);
mergedProbeReads = deduplicateReads(mergedProbeReads, redundantThreshold);
SequenceRead.sort(mergedProbeReads);
int thisProbeOverlapCount = 0;
for (int i = 0; i < mergedProbeReads.length; i++) {
if (SequenceRead.overlaps(mergedProbeReads[i], allCandidateProbes[p].packedPosition())) {
++thisProbeOverlapCount;
}
}
if (thisProbeOverlapCount > localPoisson.inverseCumulativeProbability(1 - pValue)) {
finalValidatedProbes.add(allCandidateProbes[p]);
// System.err.println("Adding probe to final set");
}
}
// System.err.println("From "+allCandidateProbes.length+" candidates "+finalValidatedProbes.size()+" peaks were validated");
ProbeSet finalSet = new ProbeSet(getDescription(), finalValidatedProbes.toArray(new Probe[0]));
generationComplete(finalSet);
}
use of uk.ac.babraham.SeqMonk.DataTypes.Probes.ProbeSet in project SeqMonk by s-andrews.
the class RNASeqPipeline method startPipeline.
protected void startPipeline() {
// We first need to generate probes over all of the features listed in
// the feature types. The probes should cover the whole area of the
// feature regardless of where it splices.
Vector<Probe> probes = new Vector<Probe>();
boolean mergeTranscripts = optionsPanel.mergeTranscripts();
boolean pairedEnd = optionsPanel.pairedEnd();
boolean logTransform = optionsPanel.logTransform();
boolean applyTranscriptLengthCorrection = optionsPanel.applyTranscriptLengthCorrection();
boolean rawCounts = optionsPanel.rawCounts();
boolean noValueForZeroCounts = optionsPanel.noValueForZeroCounts();
boolean correctDNAContamination = optionsPanel.correctForDNAContamination();
boolean correctDuplication = optionsPanel.correctForDNADuplication();
if (rawCounts) {
logTransform = false;
applyTranscriptLengthCorrection = false;
noValueForZeroCounts = false;
}
Chromosome[] chrs = collection().genome().getAllChromosomes();
for (int c = 0; c < chrs.length; c++) {
// System.err.println("Processing chr "+chrs[c].name());
if (cancel) {
progressCancelled();
return;
}
progressUpdated("Making features for chr" + chrs[c].name(), c, chrs.length * 2);
Feature[] features = collection().genome().annotationCollection().getFeaturesForType(chrs[c], optionsPanel.getSelectedFeatureType());
Arrays.sort(features);
FeatureGroup[] mergedTranscripts = mergeTranscripts(features, mergeTranscripts);
for (int f = 0; f < mergedTranscripts.length; f++) {
if (cancel) {
progressCancelled();
return;
}
probes.add(new Probe(chrs[c], mergedTranscripts[f].start(), mergedTranscripts[f].end(), mergedTranscripts[f].strand(), mergedTranscripts[f].name()));
}
}
Probe[] allProbes = probes.toArray(new Probe[0]);
Arrays.sort(allProbes);
if (collection().probeSet() == null) {
collection().setProbeSet(new ProbeSet("Transcript features over " + optionsPanel.getSelectedFeatureType(), allProbes));
} else {
Probe[] existingProbes = collection().probeSet().getAllProbes();
Arrays.sort(existingProbes);
if (allProbes.length != existingProbes.length) {
collection().setProbeSet(new ProbeSet("Transcript features over " + optionsPanel.getSelectedFeatureType(), allProbes));
} else {
// Check the positions against the new ones
boolean areTheyTheSame = true;
for (int p = 0; p < allProbes.length; p++) {
if (allProbes[p].packedPosition() != existingProbes[p].packedPosition()) {
areTheyTheSame = false;
break;
}
}
if (areTheyTheSame) {
allProbes = existingProbes;
} else {
collection().setProbeSet(new ProbeSet("Transcript features over " + optionsPanel.getSelectedFeatureType(), allProbes));
}
}
}
// If we're correcting for DNA contamination we need to work out the average density of
// reads in intergenic regions
float[] dnaDensityPerKb = new float[data.length];
int[] correctedTotalCounts = new int[data.length];
if (correctDNAContamination) {
// We need to make interstitial probes to the set we already have, ignoring those at the end of chromosomes
Vector<Probe> intergenicProbes = new Vector<Probe>();
Chromosome lastChr = allProbes[0].chromosome();
for (int p = 1; p < allProbes.length; p++) {
if (allProbes[p].chromosome() != lastChr) {
lastChr = allProbes[p].chromosome();
continue;
}
// See if there's a gap back to the last probe
if (allProbes[p].start() > allProbes[p - 1].end()) {
if (allProbes[p].start() - allProbes[p - 1].end() < 1000) {
// Don't bother with really short probes
continue;
}
intergenicProbes.add(new Probe(lastChr, allProbes[p - 1].end() + 1, allProbes[p].start() - 1));
}
}
Probe[] allIntergenicProbes = intergenicProbes.toArray(new Probe[0]);
for (int d = 0; d < data.length; d++) {
progressUpdated("Quantitating DNA contamination", 1, 2);
float[] densities = new float[allIntergenicProbes.length];
for (int p = 0; p < allIntergenicProbes.length; p++) {
densities[p] = data[d].getReadsForProbe(allIntergenicProbes[p]).length / (allIntergenicProbes[p].length() / 1000f);
}
dnaDensityPerKb[d] = SimpleStats.median(densities);
}
// Work out adjusted total counts having subtracted the DNA contamination
for (int d = 0; d < data.length; d++) {
int predictedContamination = (int) (dnaDensityPerKb[d] * (SeqMonkApplication.getInstance().dataCollection().genome().getTotalGenomeLength() / 1000));
int correctedTotalReadCount = data[d].getTotalReadCount() - predictedContamination;
correctedTotalCounts[d] = correctedTotalReadCount;
}
// Halve the density if they're doing a directional quantitation
if (optionsPanel.isDirectional()) {
for (int i = 0; i < dnaDensityPerKb.length; i++) {
dnaDensityPerKb[i] /= 2;
}
}
// Halve the density if the libraries are paired end
if (pairedEnd) {
for (int i = 0; i < dnaDensityPerKb.length; i++) {
dnaDensityPerKb[i] /= 2;
}
}
}
// If we're correcting for duplication we need to work out the modal count depth in
// intergenic regions
int[] modalDuplicationLevels = new int[data.length];
if (correctDuplication) {
for (int d = 0; d < data.length; d++) {
progressUpdated("Quantitating DNA duplication", 1, 2);
// We're not going to look at depths which are > 200. If it's that duplicated
// then there's no point continuing anyway.
int[] depthCount = new int[200];
for (int p = 0; p < allProbes.length; p++) {
long[] reads = data[d].getReadsForProbe(allProbes[p]);
int currentCount = 0;
for (int r = 1; r < reads.length; r++) {
if (reads[r] == reads[r - 1]) {
++currentCount;
} else {
if (currentCount > 0 && currentCount < 200) {
++depthCount[currentCount];
}
currentCount = 1;
}
}
}
// Find the modal depth in intergenic regions. This is the best estimate
// of duplication
// Since unique reads turn up all over the place even in duplicated
// data we say that if unique reads are higher than the sum of 2-10 there
// is no duplication
int twoTenSum = 0;
for (int i = 2; i <= 10; i++) {
twoTenSum += depthCount[i];
}
if (depthCount[1] > twoTenSum) {
modalDuplicationLevels[d] = 1;
} else {
int highestDepth = 0;
int bestDupGuess = 1;
for (int i = 2; i < depthCount.length; i++) {
// System.err.println("For depth "+i+" count was "+depthCount[i]);
if (depthCount[i] > highestDepth) {
bestDupGuess = i;
highestDepth = depthCount[i];
}
}
modalDuplicationLevels[d] = bestDupGuess;
}
}
}
// Having made probes we now need to quantitate them. We'll fetch the
// probes overlapping each sub-feature and then aggregate these together
// to get the final quantitation.
QuantitationStrandType readFilter = optionsPanel.readFilter();
int currentIndex = 0;
for (int c = 0; c < chrs.length; c++) {
if (cancel) {
progressCancelled();
return;
}
progressUpdated("Quantitating features on chr" + chrs[c].name(), chrs.length + c, chrs.length * 2);
Feature[] features = collection().genome().annotationCollection().getFeaturesForType(chrs[c], optionsPanel.getSelectedFeatureType());
Arrays.sort(features);
FeatureGroup[] mergedTranscripts = mergeTranscripts(features, mergeTranscripts);
int[] readLengths = new int[data.length];
for (int d = 0; d < data.length; d++) {
readLengths[d] = data[d].getMaxReadLength();
// actual length.
if (pairedEnd) {
readLengths[d] *= 2;
}
}
for (int f = 0; f < mergedTranscripts.length; f++) {
Location[] subLocations = mergedTranscripts[f].getSubLocations();
int totalLength = 0;
// Find the total length of all of the exons
for (int s = 0; s < subLocations.length; s++) {
totalLength += subLocations[s].length();
}
for (int d = 0; d < data.length; d++) {
if (cancel) {
progressCancelled();
return;
}
long totalCount = 0;
for (int s = 0; s < subLocations.length; s++) {
long[] reads = data[d].getReadsForProbe(new Probe(chrs[c], subLocations[s].start(), subLocations[s].end()));
for (int r = 0; r < reads.length; r++) {
if (!readFilter.useRead(subLocations[s], reads[r])) {
continue;
}
int overlap = (Math.min(subLocations[s].end(), SequenceRead.end(reads[r])) - Math.max(subLocations[s].start(), SequenceRead.start(reads[r]))) + 1;
totalCount += overlap;
}
}
// Now we correct the count by the total length of reads in the data and by
// the length of the split parts of the probe, and assign this to the probe.
// As we're correcting for read length then we work out the whole number of
// reads which this count could comprise, rounding down to a whole number.
totalCount /= readLengths[d];
// We can now subtract the DNA contamination prediction.
if (correctDNAContamination) {
int predictedContamination = (int) ((totalLength / 1000f) * dnaDensityPerKb[d]);
totalCount -= predictedContamination;
// Makes no sense to have negative counts
if (totalCount < 0)
totalCount = 0;
}
// ..and we can divide by the duplication level if we know it.
if (correctDuplication) {
totalCount /= modalDuplicationLevels[d];
}
// System.err.println("Total read count for "+mergedTranscripts[f].name+" is "+totalCount);
float value = totalCount;
if (value == 0 && noValueForZeroCounts) {
value = Float.NaN;
}
// If we're log transforming then we need to set zero values to 0.9
if (logTransform && value == 0 && !noValueForZeroCounts) {
value = 0.9f;
}
// been asked to.
if (applyTranscriptLengthCorrection) {
value /= (totalLength / 1000f);
}
// We also correct by the total read count
if (!rawCounts) {
// System.err.println("True total is "+data[d].getTotalReadCount()+" corrected total is "+correctedTotalCounts[d]);
// If these libraries are paired end then the total number of
// reads is also effectively halved.
float totalReadCount;
// calculated this already, but otherwise we'll take the total count (total length/read length)
if (correctDNAContamination) {
totalReadCount = correctedTotalCounts[d];
} else {
totalReadCount = data[d].getTotalReadLength() / readLengths[d];
}
// If we're correcting for duplication we divide by the duplication level.
if (correctDuplication) {
totalReadCount /= modalDuplicationLevels[d];
}
// Finally we work out millions of reads (single end) or fragments (paired end)
if (pairedEnd) {
totalReadCount /= 2000000f;
} else {
totalReadCount /= 1000000f;
}
// Lastly we divide the value by the total millions of reads to get the globally corrected count.
value /= totalReadCount;
}
// Finally we do the log transform if we've been asked to
if (logTransform) {
value = (float) Math.log(value) / log2;
}
data[d].setValueForProbe(allProbes[currentIndex], value);
}
currentIndex++;
}
}
collection().probeSet().setCurrentQuantitation(getQuantitationDescription(mergeTranscripts, applyTranscriptLengthCorrection, correctDNAContamination, logTransform, rawCounts));
// If we estimated any parameters let's report them.
if (correctDNAContamination || correctDuplication) {
float[] dna = null;
if (correctDNAContamination) {
dna = dnaDensityPerKb;
}
int[] dup = null;
if (correctDuplication) {
dup = modalDuplicationLevels;
}
RNASeqParametersModel model = new RNASeqParametersModel(data, dna, dup);
ReportTableDialog report = new ReportTableDialog(SeqMonkApplication.getInstance(), new Report(null, null) {
@Override
public void run() {
}
@Override
public String name() {
return "RNA-Seq parameter";
}
@Override
public boolean isReady() {
return true;
}
@Override
public JPanel getOptionsPanel() {
return null;
}
@Override
public void generateReport() {
}
}, model);
}
quantitatonComplete();
}
use of uk.ac.babraham.SeqMonk.DataTypes.Probes.ProbeSet in project SeqMonk by s-andrews.
the class BisulphiteFeaturePipeline method startPipeline.
protected void startPipeline() {
// We first need to generate probes over all of the features listed in
// the feature types. The probes should cover the whole area of the
// feature regardless of where it splices.
Vector<Probe> probes = new Vector<Probe>();
int minCount = optionsPanel.getMinCount();
int minFeatureCountValue = optionsPanel.getMinFeatureCount();
String combineString = optionsPanel.getCombineType();
int combineType = 0;
if (combineString.equals("Mean")) {
combineType = MEAN;
} else if (combineString.equals("Median")) {
combineType = MEDIAN;
} else if (combineString.equals("Max")) {
combineType = MAX;
} else if (combineString.equals("Min")) {
combineType = MIN;
} else {
throw new IllegalStateException("Unknown combine type '" + combineString + "' found in methylation pipeline options");
}
Chromosome[] chrs = collection().genome().getAllChromosomes();
if (optionsPanel.getSelectedFeatureType() != "[Existing Probes]") {
for (int c = 0; c < chrs.length; c++) {
if (cancel) {
progressCancelled();
return;
}
progressUpdated("Making features for chr" + chrs[c].name(), c, chrs.length * 2);
Feature[] features = collection().genome().annotationCollection().getFeaturesForType(chrs[c], optionsPanel.getSelectedFeatureType());
Arrays.sort(features);
for (int f = 0; f < features.length; f++) {
if (cancel) {
progressCancelled();
return;
}
probes.add(new Probe(chrs[c], features[f].location().start(), features[f].location().end(), features[f].location().strand(), features[f].name()));
}
}
Probe[] allProbes = probes.toArray(new Probe[0]);
collection().setProbeSet(new ProbeSet("Features over " + optionsPanel.getSelectedFeatureType(), allProbes));
}
for (int c = 0; c < chrs.length; c++) {
if (cancel) {
progressCancelled();
return;
}
progressUpdated("Quantitating features on chr" + chrs[c].name(), chrs.length + c, chrs.length * 2);
Probe[] thisChrProbes = collection().probeSet().getProbesForChromosome(chrs[c]);
for (int p = 0; p < thisChrProbes.length; p++) {
// This data structure is going to hold the counts. The int array will hold
// pairs of values for the different data stores (meth and unmeth) so the length
// of this array will be data store length * 2.
Hashtable<Integer, int[]> counts = new Hashtable<Integer, int[]>();
for (int d = 0; d < data.length; d++) {
if (cancel) {
progressCancelled();
return;
}
long[] reads = data[d].getReadsForProbe(thisChrProbes[p]);
for (int r = 0; r < reads.length; r++) {
for (int pos = SequenceRead.start(reads[r]); pos <= SequenceRead.end(reads[r]); pos++) {
if (pos < thisChrProbes[p].start())
continue;
if (pos > thisChrProbes[p].end())
break;
if (!counts.containsKey(pos)) {
counts.put(pos, new int[data.length * 2]);
}
if (SequenceRead.strand(reads[r]) == Location.FORWARD) {
counts.get(pos)[2 * d]++;
} else if (SequenceRead.strand(reads[r]) == Location.REVERSE) {
counts.get(pos)[(2 * d) + 1]++;
}
}
}
}
if (optionsPanel.applyMinCountAcrossAllStores()) {
Enumeration<Integer> en = counts.keys();
OUTER: while (en.hasMoreElements()) {
Integer i = en.nextElement();
int[] values = counts.get(i);
for (int d = 0; d < data.length; d++) {
if (values[2 * d] + values[(2 * d) + 1] < minCount) {
counts.remove(i);
continue OUTER;
}
}
}
}
for (int d = 0; d < data.length; d++) {
Enumeration<Integer> en = counts.keys();
Vector<Float> percentages = new Vector<Float>();
while (en.hasMoreElements()) {
Integer pos = en.nextElement();
int total = counts.get(pos)[2 * d] + counts.get(pos)[(2 * d) + 1];
if (total < minCount)
continue;
float percent = 100f * (counts.get(pos)[2 * d] / (float) total);
percentages.add(percent);
}
// Now combine the methylation percentages we have in the way they
// wanted.
float[] validMeasures = new float[percentages.size()];
float finalMeasure = Float.NaN;
for (int i = 0; i < validMeasures.length; i++) {
validMeasures[i] = percentages.elementAt(i).floatValue();
}
if (validMeasures.length < minFeatureCountValue) {
finalMeasure = Float.NaN;
} else if (validMeasures.length == 0) {
finalMeasure = Float.NaN;
} else if (validMeasures.length == 1) {
finalMeasure = validMeasures[0];
} else {
Arrays.sort(validMeasures);
switch(combineType) {
case MEAN:
finalMeasure = SimpleStats.mean(validMeasures);
break;
case MEDIAN:
finalMeasure = SimpleStats.median(validMeasures);
break;
case MIN:
finalMeasure = validMeasures[0];
break;
case MAX:
finalMeasure = validMeasures[validMeasures.length - 1];
break;
}
}
data[d].setValueForProbe(thisChrProbes[p], finalMeasure);
}
}
}
if (optionsPanel.getSelectedFeatureType() != "[Existing Probes]") {
collection().probeSet().setDescription("Probes over " + optionsPanel.getSelectedFeatureType() + " features");
}
StringBuffer quantitationDescription = new StringBuffer();
quantitationDescription.append("Methylation feature pipeline quantitation using ");
quantitationDescription.append(optionsPanel.getSelectedFeatureType());
quantitationDescription.append(" features with min count ");
if (optionsPanel.applyMinCountAcrossAllStores()) {
quantitationDescription.append(" in all stores ");
}
quantitationDescription.append("of ");
quantitationDescription.append(optionsPanel.getMinCount());
quantitationDescription.append(" per position, and at least ");
quantitationDescription.append(optionsPanel.getMinFeatureCount());
quantitationDescription.append(" observations per feature, then combining using the ");
quantitationDescription.append(optionsPanel.getCombineType());
collection().probeSet().setCurrentQuantitation(quantitationDescription.toString());
quantitatonComplete();
}
Aggregations