use of org.apache.spark.api.java.JavaRDD in project gatk-protected by broadinstitute.
the class CNLOHCaller method calcNewRhos.
private double[] calcNewRhos(final List<ACNVModeledSegment> segments, final List<double[][][]> responsibilitiesBySeg, final double lambda, final double[] rhos, final int[] mVals, final int[] nVals, final JavaSparkContext ctx) {
// Since, we pass in the entire responsibilities matrix, we need the correct index for each rho. That, and the
// fact that this is a univariate objective function, means we need to create an instance for each rho. And
// then we blast across Spark.
final List<Pair<? extends Function<Double, Double>, SearchInterval>> objectives = IntStream.range(0, rhos.length).mapToObj(i -> new Pair<>(new Function<Double, Double>() {
@Override
public Double apply(Double rho) {
return calculateESmnObjective(rho, segments, responsibilitiesBySeg, mVals, nVals, lambda, i);
}
}, new SearchInterval(0.0, 1.0, rhos[i]))).collect(Collectors.toList());
final JavaRDD<Pair<? extends Function<Double, Double>, SearchInterval>> objectivesRDD = ctx.parallelize(objectives);
final List<Double> resultsAsDouble = objectivesRDD.map(objective -> optimizeIt(objective.getFirst(), objective.getSecond())).collect();
return resultsAsDouble.stream().mapToDouble(Double::doubleValue).toArray();
}
use of org.apache.spark.api.java.JavaRDD in project gatk by broadinstitute.
the class SparkGenomeReadCounts method collectReads.
private void collectReads() {
if (readArguments.getReadFilesNames().size() != 1) {
throw new UserException("This tool only accepts a single bam/sam/cram as input");
}
final SampleCollection sampleCollection = new SampleCollection(getHeaderForReads());
if (sampleCollection.sampleCount() > 1) {
throw new UserException.BadInput("We do not support bams with more than one sample.");
}
final String sampleName = sampleCollection.sampleIds().get(0);
final String[] commentsForRawCoverage = { "##fileFormat = tsv", "##commandLine = " + getCommandLine(), String.format("##title = Coverage counts in %d base bins for WGS", binsize) };
final ReadFilter filter = makeGenomeReadFilter();
final SAMSequenceDictionary sequenceDictionary = getReferenceSequenceDictionary();
logger.info("Starting Spark coverage collection...");
final long coverageCollectionStartTime = System.currentTimeMillis();
final JavaRDD<GATKRead> rawReads = getReads();
final JavaRDD<GATKRead> reads = rawReads.filter(read -> filter.test(read));
//Note: using a field inside a closure will pull in the whole enclosing object to serialization
// (which leads to bad performance and can blow up if some objects in the fields are not
// Serializable - closures always use java Serializable and not Kryo)
//Solution here is to use a temp variable for binsize because it's just an int.
final int binsize_tmp = binsize;
final JavaRDD<SimpleInterval> readIntervals = reads.filter(read -> sequenceDictionary.getSequence(read.getContig()) != null).map(read -> SparkGenomeReadCounts.createKey(read, sequenceDictionary, binsize_tmp));
final Map<SimpleInterval, Long> byKey = readIntervals.countByValue();
final Set<SimpleInterval> readIntervalKeySet = byKey.keySet();
final long totalReads = byKey.values().stream().mapToLong(v -> v).sum();
final long coverageCollectionEndTime = System.currentTimeMillis();
logger.info(String.format("Finished the spark coverage collection with %d targets and %d reads. Elapse of %d seconds", readIntervalKeySet.size(), totalReads, (coverageCollectionEndTime - coverageCollectionStartTime) / 1000));
final String[] commentsForProportionalCoverage = { commentsForRawCoverage[0], commentsForRawCoverage[1], String.format("##title = Proportional coverage counts in %d base bins for WGS (total reads: %d)", binsize, totalReads) };
logger.info("Creating full genome bins...");
final long createGenomeBinsStartTime = System.currentTimeMillis();
final List<SimpleInterval> fullGenomeBins = createFullGenomeBins(binsize);
List<Target> fullGenomeTargetCollection = createTargetListFromSimpleInterval(fullGenomeBins);
TargetWriter.writeTargetsToFile(new File(outputFile.getAbsolutePath() + ".targets.tsv"), fullGenomeTargetCollection);
final long createGenomeBinsEndTime = System.currentTimeMillis();
logger.info(String.format("Finished creating genome bins. Elapse of %d seconds", (createGenomeBinsEndTime - createGenomeBinsStartTime) / 1000));
logger.info("Creating missing genome bins...");
final long createMissingGenomeBinsStartTime = System.currentTimeMillis();
logger.info("Creating missing genome bins: Creating a mutable mapping...");
final Map<SimpleInterval, Long> byKeyMutable = new HashMap<>();
byKeyMutable.putAll(byKey);
logger.info("Creating missing genome bins: Populating mutable mapping with zero counts for empty regions...");
fullGenomeBins.stream().forEach(b -> byKeyMutable.putIfAbsent(b, 0l));
final long createMissingGenomeBinsEndTime = System.currentTimeMillis();
logger.info(String.format("Finished creating missing genome bins. Elapse of %d seconds", (createMissingGenomeBinsEndTime - createMissingGenomeBinsStartTime) / 1000));
logger.info("Creating final map...");
final long createFinalMapStartTime = System.currentTimeMillis();
final SortedMap<SimpleInterval, Long> byKeySorted = new TreeMap<>(IntervalUtils.LEXICOGRAPHICAL_ORDER_COMPARATOR);
byKeySorted.putAll(byKeyMutable);
final long createFinalMapEndTime = System.currentTimeMillis();
logger.info(String.format("Finished creating final map. Elapse of %d seconds", (createFinalMapEndTime - createFinalMapStartTime) / 1000));
logger.info("Creating proportional coverage... ");
final long pCovFileStartTime = System.currentTimeMillis();
final SortedMap<SimpleInterval, Double> byKeyProportionalSorted = new TreeMap<>(IntervalUtils.LEXICOGRAPHICAL_ORDER_COMPARATOR);
byKeySorted.entrySet().stream().forEach(e -> byKeyProportionalSorted.put(e.getKey(), (double) e.getValue() / totalReads));
final long pCovFileEndTime = System.currentTimeMillis();
logger.info(String.format("Finished creating proportional coverage map. Elapse of %d seconds", (pCovFileEndTime - pCovFileStartTime) / 1000));
logger.info("Writing raw coverage file ...");
final long writingCovFileStartTime = System.currentTimeMillis();
ReadCountCollectionUtils.writeReadCountsFromSimpleInterval(new File(outputFile.getAbsolutePath() + RAW_COV_OUTPUT_EXTENSION), sampleName, byKeySorted, commentsForRawCoverage);
final long writingCovFileEndTime = System.currentTimeMillis();
logger.info(String.format("Finished writing coverage file. Elapse of %d seconds", (writingCovFileEndTime - writingCovFileStartTime) / 1000));
logger.info("Writing proportional coverage file ...");
final long writingPCovFileStartTime = System.currentTimeMillis();
ReadCountCollectionUtils.writeReadCountsFromSimpleInterval(outputFile, sampleName, byKeyProportionalSorted, commentsForProportionalCoverage);
final long writingPCovFileEndTime = System.currentTimeMillis();
logger.info(String.format("Finished writing proportional coverage file. Elapse of %d seconds", (writingPCovFileEndTime - writingPCovFileStartTime) / 1000));
}
use of org.apache.spark.api.java.JavaRDD in project gatk by broadinstitute.
the class ReadsSparkSinkUnitTest method readsSinkShardedTest.
@Test(dataProvider = "loadReadsBAM", groups = "spark")
public void readsSinkShardedTest(String inputBam, String outputFileName, String referenceFile, String outputFileExtension) throws IOException {
final File outputFile = createTempFile(outputFileName, outputFileExtension);
JavaSparkContext ctx = SparkContextFactory.getTestSparkContext();
ReadsSparkSource readSource = new ReadsSparkSource(ctx);
JavaRDD<GATKRead> rddParallelReads = readSource.getParallelReads(inputBam, referenceFile);
// ensure that the output is in two shards
rddParallelReads = rddParallelReads.repartition(2);
SAMFileHeader header = readSource.getHeader(inputBam, referenceFile);
ReadsSparkSink.writeReads(ctx, outputFile.getAbsolutePath(), referenceFile, rddParallelReads, header, ReadsWriteFormat.SHARDED);
int shards = outputFile.listFiles((dir, name) -> !name.startsWith(".") && !name.startsWith("_")).length;
Assert.assertEquals(shards, 2);
// check that no local .crc files are created
int crcs = outputFile.listFiles((dir, name) -> name.startsWith(".") && name.endsWith(".crc")).length;
Assert.assertEquals(crcs, 0);
JavaRDD<GATKRead> rddParallelReads2 = readSource.getParallelReads(outputFile.getAbsolutePath(), referenceFile);
// reads are not globally sorted, so don't test that
Assert.assertEquals(rddParallelReads.count(), rddParallelReads2.count());
}
use of org.apache.spark.api.java.JavaRDD in project gatk by broadinstitute.
the class SparkSharderUnitTest method testSingleContig.
@Test
public void testSingleContig() throws IOException {
JavaSparkContext ctx = SparkContextFactory.getTestSparkContext();
// Consider the following reads (divided into four partitions), and intervals.
// This test counts the number of reads that overlap each interval.
// 1 2
// 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7
// ---------------------------------------------------------
// Reads in partition 0
// [-----]
// [-----]
// [-----]
// ---------------------------------------------------------
// Reads in partition 1
// [-----]
// [-----]
// [-----]
// ---------------------------------------------------------
// Reads in partition 2
// [-----]
// [-----]
// [-----]
// ---------------------------------------------------------
// Reads in partition 3
// [-----]
// [-----]
// [-----]
// ---------------------------------------------------------
// Per-partition read extents
// [-----------------]
// [-----]
// [---------------]
// [---------------------]
// ---------------------------------------------------------
// Intervals
// [-----]
// [---------]
// [-----------------------]
//
// 1 2
// ---------------------------------------------------------
// 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7
JavaRDD<TestRead> reads = ctx.parallelize(ImmutableList.of(new TestRead(1, 3), new TestRead(5, 7), new TestRead(7, 9), new TestRead(7, 9), new TestRead(7, 9), new TestRead(7, 9), new TestRead(7, 9), new TestRead(11, 13), new TestRead(12, 14), new TestRead(17, 19), new TestRead(21, 23), new TestRead(25, 27)), 4);
List<SimpleInterval> intervals = ImmutableList.of(new SimpleInterval("1", 2, 4), new SimpleInterval("1", 8, 12), new SimpleInterval("1", 11, 22));
List<ShardBoundary> shardBoundaries = intervals.stream().map(si -> new ShardBoundary(si, si)).collect(Collectors.toList());
ImmutableMap<SimpleInterval, Integer> expectedReadsPerInterval = ImmutableMap.of(intervals.get(0), 1, intervals.get(1), 7, intervals.get(2), 4);
JavaPairRDD<Locatable, Integer> readsPerInterval = SparkSharder.shard(ctx, reads, TestRead.class, sequenceDictionary, shardBoundaries, STANDARD_READ_LENGTH, false).flatMapToPair(new CountOverlappingReadsFunction());
assertEquals(readsPerInterval.collectAsMap(), expectedReadsPerInterval);
JavaPairRDD<Locatable, Integer> readsPerIntervalShuffle = SparkSharder.shard(ctx, reads, TestRead.class, sequenceDictionary, shardBoundaries, STANDARD_READ_LENGTH, true).flatMapToPair(new CountOverlappingReadsFunction());
assertEquals(readsPerIntervalShuffle.collectAsMap(), expectedReadsPerInterval);
try {
// max read length less than actual causes exception
int maxReadLength = STANDARD_READ_LENGTH - 1;
SparkSharder.shard(ctx, reads, TestRead.class, sequenceDictionary, shardBoundaries, maxReadLength, true).flatMapToPair(new CountOverlappingReadsFunction()).collect();
} catch (Exception e) {
assertEquals(e.getCause().getClass(), UserException.class);
}
}
use of org.apache.spark.api.java.JavaRDD in project gatk-protected by broadinstitute.
the class SparkGenomeReadCounts method collectReads.
private void collectReads() {
if (readArguments.getReadFilesNames().size() != 1) {
throw new UserException("This tool only accepts a single bam/sam/cram as input");
}
final SampleCollection sampleCollection = new SampleCollection(getHeaderForReads());
if (sampleCollection.sampleCount() > 1) {
throw new UserException.BadInput("We do not support bams with more than one sample.");
}
final String sampleName = sampleCollection.sampleIds().get(0);
final String[] commentsForRawCoverage = { "##fileFormat = tsv", "##commandLine = " + getCommandLine(), String.format("##title = Coverage counts in %d base bins for WGS", binsize) };
final ReadFilter filter = makeGenomeReadFilter();
final SAMSequenceDictionary sequenceDictionary = getReferenceSequenceDictionary();
logger.info("Starting Spark coverage collection...");
final long coverageCollectionStartTime = System.currentTimeMillis();
final JavaRDD<GATKRead> rawReads = getReads();
final JavaRDD<GATKRead> reads = rawReads.filter(read -> filter.test(read));
//Note: using a field inside a closure will pull in the whole enclosing object to serialization
// (which leads to bad performance and can blow up if some objects in the fields are not
// Serializable - closures always use java Serializable and not Kryo)
//Solution here is to use a temp variable for binsize because it's just an int.
final int binsize_tmp = binsize;
final JavaRDD<SimpleInterval> readIntervals = reads.filter(read -> sequenceDictionary.getSequence(read.getContig()) != null).map(read -> SparkGenomeReadCounts.createKey(read, sequenceDictionary, binsize_tmp));
final Map<SimpleInterval, Long> byKey = readIntervals.countByValue();
final Set<SimpleInterval> readIntervalKeySet = byKey.keySet();
final long totalReads = byKey.values().stream().mapToLong(v -> v).sum();
final long coverageCollectionEndTime = System.currentTimeMillis();
logger.info(String.format("Finished the spark coverage collection with %d targets and %d reads. Elapse of %d seconds", readIntervalKeySet.size(), totalReads, (coverageCollectionEndTime - coverageCollectionStartTime) / 1000));
final String[] commentsForProportionalCoverage = { commentsForRawCoverage[0], commentsForRawCoverage[1], String.format("##title = Proportional coverage counts in %d base bins for WGS (total reads: %d)", binsize, totalReads) };
logger.info("Creating full genome bins...");
final long createGenomeBinsStartTime = System.currentTimeMillis();
final List<SimpleInterval> fullGenomeBins = createFullGenomeBins(binsize);
List<Target> fullGenomeTargetCollection = createTargetListFromSimpleInterval(fullGenomeBins);
TargetWriter.writeTargetsToFile(new File(outputFile.getAbsolutePath() + ".targets.tsv"), fullGenomeTargetCollection);
final long createGenomeBinsEndTime = System.currentTimeMillis();
logger.info(String.format("Finished creating genome bins. Elapse of %d seconds", (createGenomeBinsEndTime - createGenomeBinsStartTime) / 1000));
logger.info("Creating missing genome bins...");
final long createMissingGenomeBinsStartTime = System.currentTimeMillis();
logger.info("Creating missing genome bins: Creating a mutable mapping...");
final Map<SimpleInterval, Long> byKeyMutable = new HashMap<>();
byKeyMutable.putAll(byKey);
logger.info("Creating missing genome bins: Populating mutable mapping with zero counts for empty regions...");
fullGenomeBins.stream().forEach(b -> byKeyMutable.putIfAbsent(b, 0l));
final long createMissingGenomeBinsEndTime = System.currentTimeMillis();
logger.info(String.format("Finished creating missing genome bins. Elapse of %d seconds", (createMissingGenomeBinsEndTime - createMissingGenomeBinsStartTime) / 1000));
logger.info("Creating final map...");
final long createFinalMapStartTime = System.currentTimeMillis();
final SortedMap<SimpleInterval, Long> byKeySorted = new TreeMap<>(IntervalUtils.LEXICOGRAPHICAL_ORDER_COMPARATOR);
byKeySorted.putAll(byKeyMutable);
final long createFinalMapEndTime = System.currentTimeMillis();
logger.info(String.format("Finished creating final map. Elapse of %d seconds", (createFinalMapEndTime - createFinalMapStartTime) / 1000));
logger.info("Creating proportional coverage... ");
final long pCovFileStartTime = System.currentTimeMillis();
final SortedMap<SimpleInterval, Double> byKeyProportionalSorted = new TreeMap<>(IntervalUtils.LEXICOGRAPHICAL_ORDER_COMPARATOR);
byKeySorted.entrySet().stream().forEach(e -> byKeyProportionalSorted.put(e.getKey(), (double) e.getValue() / totalReads));
final long pCovFileEndTime = System.currentTimeMillis();
logger.info(String.format("Finished creating proportional coverage map. Elapse of %d seconds", (pCovFileEndTime - pCovFileStartTime) / 1000));
logger.info("Writing raw coverage file ...");
final long writingCovFileStartTime = System.currentTimeMillis();
ReadCountCollectionUtils.writeReadCountsFromSimpleInterval(new File(outputFile.getAbsolutePath() + RAW_COV_OUTPUT_EXTENSION), sampleName, byKeySorted, commentsForRawCoverage);
final long writingCovFileEndTime = System.currentTimeMillis();
logger.info(String.format("Finished writing coverage file. Elapse of %d seconds", (writingCovFileEndTime - writingCovFileStartTime) / 1000));
logger.info("Writing proportional coverage file ...");
final long writingPCovFileStartTime = System.currentTimeMillis();
ReadCountCollectionUtils.writeReadCountsFromSimpleInterval(outputFile, sampleName, byKeyProportionalSorted, commentsForProportionalCoverage);
final long writingPCovFileEndTime = System.currentTimeMillis();
logger.info(String.format("Finished writing proportional coverage file. Elapse of %d seconds", (writingPCovFileEndTime - writingPCovFileStartTime) / 1000));
}
Aggregations