use of uk.ac.babraham.SeqMonk.SeqMonkException in project SeqMonk by s-andrews.
the class MultiBoxWhiskerDialog method run.
public void run() {
whiskers = new BoxWhisker[probes.length][stores.length];
try {
for (int p = 0; p < probes.length; p++) {
for (int s = 0; s < stores.length; s++) {
whiskers[p][s] = new BoxWhisker(stores[s], probes[p]);
if (p == 0 && s == 0) {
min = (float) whiskers[p][s].minValue();
max = (float) whiskers[p][s].maxValue();
}
if (whiskers[p][s].minValue() < min) {
min = (float) whiskers[p][s].minValue();
}
if (whiskers[p][s].maxValue() > max) {
max = (float) whiskers[p][s].maxValue();
}
}
}
updateGraphs();
setVisible(true);
} catch (SeqMonkException sme) {
throw new IllegalStateException(sme);
}
}
use of uk.ac.babraham.SeqMonk.SeqMonkException in project SeqMonk by s-andrews.
the class SubsetNormalisationQuantitation method run.
/* (non-Javadoc)
* @see java.lang.Runnable#run()
*/
public void run() {
if (!isReady()) {
progressExceptionReceived(new SeqMonkException("Options weren't set correctly"));
}
Probe[] allProbes = application.dataCollection().probeSet().getAllProbes();
Probe[] calculateProbes = ((ProbeList) calculateFromProbeList.getSelectedItem()).getAllProbes();
float[] summedValues = new float[data.length];
// since we need that whether or not they're taking the mean.
for (int d = 0; d < data.length; d++) {
progressUpdated("Calculating correction for " + data[d].name(), d, data.length);
for (int p = 0; p < calculateProbes.length; p++) {
try {
summedValues[d] += data[d].getValueForProbe(calculateProbes[p]);
} catch (SeqMonkException e) {
progressExceptionReceived(e);
}
}
}
if (summationAction == VALUE_MEAN) {
for (int i = 0; i < summedValues.length; i++) {
summedValues[i] /= calculateProbes.length;
}
}
float targetValue = 1;
if (targetAction == TARGET_LARGEST) {
targetValue = summedValues[0];
for (int i = 1; i < summedValues.length; i++) {
if (summedValues[i] > targetValue)
targetValue = summedValues[i];
}
}
for (int d = 0; d < data.length; d++) {
// See if we need to quit
if (cancel) {
progressCancelled();
return;
}
progressUpdated(d, data.length);
// Get the correction value
float correctionFactor = 1;
if (correctionAction == ADD) {
correctionFactor = targetValue - summedValues[d];
} else if (correctionAction == MULTIPLY) {
correctionFactor = targetValue / summedValues[d];
}
System.err.println("For " + data[d].name() + " correction factor is " + correctionFactor);
try {
for (int p = 0; p < allProbes.length; p++) {
// See if we need to quit
if (cancel) {
progressCancelled();
return;
}
if (correctionAction == ADD) {
data[d].setValueForProbe(allProbes[p], data[d].getValueForProbe(allProbes[p]) + correctionFactor);
} else if (correctionAction == MULTIPLY) {
data[d].setValueForProbe(allProbes[p], data[d].getValueForProbe(allProbes[p]) * correctionFactor);
}
}
} catch (SeqMonkException e) {
progressExceptionReceived(e);
}
}
quantitatonComplete();
}
use of uk.ac.babraham.SeqMonk.SeqMonkException in project SeqMonk by s-andrews.
the class DataStoreSummaryReport method run.
/* (non-Javadoc)
* @see java.lang.Runnable#run()
*/
public void run() {
summaries = new StoreSummary[storesToAnnotate.length];
Probe[] probes;
if (dataCollection().probeSet() != null) {
probes = dataCollection().probeSet().getActiveList().getAllProbes();
} else {
probes = new Probe[0];
}
for (int s = 0; s < storesToAnnotate.length; s++) {
summaries[s] = new StoreSummary(storesToAnnotate[s]);
summaries[s].forwardReads = storesToAnnotate[s].getReadCountForStrand(Location.FORWARD);
summaries[s].reverseReads = storesToAnnotate[s].getReadCountForStrand(Location.REVERSE);
summaries[s].unknownReads = storesToAnnotate[s].getReadCountForStrand(Location.UNKNOWN);
summaries[s].totalReads = storesToAnnotate[s].getTotalReadCount();
summaries[s].totalReadLength = storesToAnnotate[s].getTotalReadLength();
progressUpdated("Processing " + storesToAnnotate[s].name(), s, storesToAnnotate.length);
// Now work out the mean read length
summaries[s].medianReadLength = (int) (summaries[s].totalReadLength / summaries[s].totalReads);
// ..and the fold coverage
summaries[s].foldCoverage = ((double) summaries[s].totalReadLength) / dataCollection().genome().getTotalGenomeLength();
// If we don't have any probes then we don't need to do this
if (probes.length > 0) {
float[] values = new float[probes.length];
for (int p = 0; p < probes.length; p++) {
if (cancel) {
progressCancelled();
return;
}
try {
values[p] = storesToAnnotate[s].getValueForProbe(probes[p]);
if (!(Float.isNaN(values[p]) || Float.isInfinite(values[p]))) {
summaries[s].totalQuantitation += values[p];
summaries[s].validQuantiationCount++;
}
} catch (SeqMonkException e) {
values[p] = Float.NaN;
}
}
// Now work out the stats
summaries[s].meanQuantitation = SimpleStats.mean(values);
summaries[s].medianQuantitation = SimpleStats.median(values);
} else {
summaries[s].meanQuantitation = 0;
summaries[s].medianQuantitation = 0;
}
}
TableModel model = new AnnotationTableModel(summaries);
reportComplete(model);
}
use of uk.ac.babraham.SeqMonk.SeqMonkException in project SeqMonk by s-andrews.
the class EnrichmentNormalisationQuantitation method run.
/* (non-Javadoc)
* @see java.lang.Runnable#run()
*/
public void run() {
if (!isReady()) {
progressExceptionReceived(new SeqMonkException("Options weren't set correctly"));
}
Probe[] allProbes = application.dataCollection().probeSet().getAllProbes();
Probe[] calculateProbes = ((ProbeList) calculateFromProbeList.getSelectedItem()).getAllProbes();
float[] lowerPercentileValues = new float[data.length];
float[] upperPercentileValues = new float[data.length];
// Work out the value at the appropriate percentile
for (int d = 0; d < data.length; d++) {
progressUpdated("Calculating correction for " + data[d].name(), d, data.length);
float[] theseValues = new float[calculateProbes.length];
for (int p = 0; p < calculateProbes.length; p++) {
try {
theseValues[p] = data[d].getValueForProbe(calculateProbes[p]);
} catch (SeqMonkException e) {
progressExceptionReceived(e);
}
}
Arrays.sort(theseValues);
lowerPercentileValues[d] = getPercentileValue(theseValues, lowerPercentile);
upperPercentileValues[d] = getPercentileValue(theseValues, upperPercentile);
System.err.println("Lower percentile for " + data[d].name() + " is " + lowerPercentileValues[d] + " from " + theseValues.length + " values");
}
float lowerMedian = SimpleStats.median(Arrays.copyOf(lowerPercentileValues, lowerPercentileValues.length));
float upperMedian = SimpleStats.median(Arrays.copyOf(upperPercentileValues, upperPercentileValues.length));
for (int d = 0; d < data.length; d++) {
System.err.println("Looking at " + data[d].name());
// See if we need to quit
if (cancel) {
progressCancelled();
return;
}
progressUpdated(d, data.length);
// Now we work out the correction factor we're actually going to use
float lowerCorrectionFactor = lowerMedian - lowerPercentileValues[d];
float upperCorrectionFactor = ((upperPercentileValues[d] + lowerCorrectionFactor) - lowerMedian) / (upperMedian - lowerMedian);
System.err.println("Lower percentile = " + lowerPercentileValues[d]);
System.err.println("Upper percentile = " + upperPercentileValues[d]);
System.err.println("Lower median = " + lowerMedian);
System.err.println("Upper median = " + upperMedian);
System.err.println("Lower correction = " + lowerCorrectionFactor);
System.err.println("Upper correction = " + upperCorrectionFactor);
System.err.println("\n\n");
// Apply the correction to all probes
try {
for (int p = 0; p < allProbes.length; p++) {
// See if we need to quit
if (cancel) {
progressCancelled();
return;
}
float probeValue = data[d].getValueForProbe(allProbes[p]);
probeValue += lowerCorrectionFactor;
probeValue -= lowerMedian;
probeValue /= upperCorrectionFactor;
probeValue += lowerMedian;
data[d].setValueForProbe(allProbes[p], probeValue);
}
} catch (SeqMonkException e) {
progressExceptionReceived(e);
}
}
quantitatonComplete();
}
use of uk.ac.babraham.SeqMonk.SeqMonkException in project SeqMonk by s-andrews.
the class FourCEnrichmentQuantitation method run.
/* (non-Javadoc)
* @see java.lang.Runnable#run()
*/
public void run() {
// Start off by finding the right HiC data for each 4C dataset
HiCDataStore[] parentHiC = new HiCDataStore[data.length];
Probe[] parentProbes = new Probe[data.length];
ProbeList[] significantLists = new ProbeList[data.length];
for (int d = 0; d < data.length; d++) {
String filename = data[d].fileName();
filename = filename.replaceAll("HiC other end of ", "");
filename = filename.replaceAll(" for region.*", "");
System.err.println("Looking for HiC match to " + filename);
for (int h = 0; h < hiCData.length; h++) {
if (((DataStore) hiCData[h]).name().equals(filename)) {
parentHiC[d] = hiCData[h];
}
}
if (parentHiC[d] == null) {
progressWarningReceived(new SeqMonkException("Failed to find HiC dataset '" + filename + "' for 4C dataset " + data[d].name()));
continue;
}
significantLists[d] = new ProbeList(application.dataCollection().probeSet(), "4C p<0.05 " + data[d].name(), "4C pipeline significance < 0.05", "P-value");
// Also make up a probe to represent the region from which
// the dataset was made
filename = data[d].fileName();
filename = filename.replaceAll("^.*for region ", "");
String[] locationSections = filename.split("[-:]");
if (locationSections.length != 3) {
progressWarningReceived(new SeqMonkException("Couldn't extract location from " + filename));
continue;
}
try {
parentProbes[d] = new Probe(application.dataCollection().genome().getChromosome(locationSections[0]).chromosome(), Integer.parseInt(locationSections[1]), Integer.parseInt(locationSections[2]));
} catch (Exception e) {
progressExceptionReceived(e);
return;
}
}
// Make strength calculators for each HiC set
HiCInteractionStrengthCalculator[] strengthCalculators = new HiCInteractionStrengthCalculator[parentHiC.length];
for (int i = 0; i < parentHiC.length; i++) {
strengthCalculators[i] = new HiCInteractionStrengthCalculator(parentHiC[i], true);
}
// Get the cis/trans counts for the parent probes
int[] parentProbeCisCounts = new int[parentHiC.length];
int[] parentProbeTransCounts = new int[parentHiC.length];
for (int p = 0; p < parentHiC.length; p++) {
if (parentHiC[p] == null)
continue;
HiCHitCollection hits = parentHiC[p].getHiCReadsForProbe(parentProbes[p]);
String[] chrNames = hits.getChromosomeNamesWithHits();
for (int c = 0; c < chrNames.length; c++) {
if (chrNames[c].equals(hits.getSourceChromosomeName())) {
parentProbeCisCounts[p] = hits.getSourcePositionsForChromosome(chrNames[c]).length;
} else {
parentProbeTransCounts[p] += hits.getSourcePositionsForChromosome(chrNames[c]).length;
}
}
}
Probe[] probes = application.dataCollection().probeSet().getAllProbes();
for (int p = 0; p < probes.length; p++) {
// See if we need to quit
if (cancel) {
progressCancelled();
return;
}
progressUpdated(p, probes.length);
for (int d = 0; d < data.length; d++) {
if (parentHiC[d] == null)
continue;
int thisProbeCisCount = 0;
int thisProbeTransCount = 0;
HiCHitCollection hiCHits = parentHiC[d].getHiCReadsForProbe(probes[p]);
String[] chrNames = hiCHits.getChromosomeNamesWithHits();
for (int c = 0; c < chrNames.length; c++) {
if (chrNames[c].equals(hiCHits.getSourceChromosomeName())) {
thisProbeCisCount = hiCHits.getSourcePositionsForChromosome(chrNames[c]).length;
} else {
thisProbeTransCount += hiCHits.getSourcePositionsForChromosome(chrNames[c]).length;
}
}
strengthCalculators[d].calculateInteraction(data[d].getReadsForProbe(probes[p]).length, thisProbeCisCount, thisProbeTransCount, parentProbeCisCounts[d], parentProbeTransCounts[d], probes[p], parentProbes[d]);
float obsExp = (float) strengthCalculators[d].obsExp();
data[d].setValueForProbe(probes[p], obsExp);
float pValue = (float) strengthCalculators[d].rawPValue() * probes.length;
if (pValue < 0.05) {
significantLists[d].addProbe(probes[p], pValue);
}
}
}
quantitatonComplete();
}
Aggregations