use of org.broadinstitute.hellbender.engine.filters.ReadFilter in project gatk-protected by broadinstitute.
the class SparkGenomeReadCounts method collectReads.
private void collectReads() {
if (readArguments.getReadFilesNames().size() != 1) {
throw new UserException("This tool only accepts a single bam/sam/cram as input");
}
final SampleCollection sampleCollection = new SampleCollection(getHeaderForReads());
if (sampleCollection.sampleCount() > 1) {
throw new UserException.BadInput("We do not support bams with more than one sample.");
}
final String sampleName = sampleCollection.sampleIds().get(0);
final String[] commentsForRawCoverage = { "##fileFormat = tsv", "##commandLine = " + getCommandLine(), String.format("##title = Coverage counts in %d base bins for WGS", binsize) };
final ReadFilter filter = makeGenomeReadFilter();
final SAMSequenceDictionary sequenceDictionary = getReferenceSequenceDictionary();
logger.info("Starting Spark coverage collection...");
final long coverageCollectionStartTime = System.currentTimeMillis();
final JavaRDD<GATKRead> rawReads = getReads();
final JavaRDD<GATKRead> reads = rawReads.filter(read -> filter.test(read));
//Note: using a field inside a closure will pull in the whole enclosing object to serialization
// (which leads to bad performance and can blow up if some objects in the fields are not
// Serializable - closures always use java Serializable and not Kryo)
//Solution here is to use a temp variable for binsize because it's just an int.
final int binsize_tmp = binsize;
final JavaRDD<SimpleInterval> readIntervals = reads.filter(read -> sequenceDictionary.getSequence(read.getContig()) != null).map(read -> SparkGenomeReadCounts.createKey(read, sequenceDictionary, binsize_tmp));
final Map<SimpleInterval, Long> byKey = readIntervals.countByValue();
final Set<SimpleInterval> readIntervalKeySet = byKey.keySet();
final long totalReads = byKey.values().stream().mapToLong(v -> v).sum();
final long coverageCollectionEndTime = System.currentTimeMillis();
logger.info(String.format("Finished the spark coverage collection with %d targets and %d reads. Elapse of %d seconds", readIntervalKeySet.size(), totalReads, (coverageCollectionEndTime - coverageCollectionStartTime) / 1000));
final String[] commentsForProportionalCoverage = { commentsForRawCoverage[0], commentsForRawCoverage[1], String.format("##title = Proportional coverage counts in %d base bins for WGS (total reads: %d)", binsize, totalReads) };
logger.info("Creating full genome bins...");
final long createGenomeBinsStartTime = System.currentTimeMillis();
final List<SimpleInterval> fullGenomeBins = createFullGenomeBins(binsize);
List<Target> fullGenomeTargetCollection = createTargetListFromSimpleInterval(fullGenomeBins);
TargetWriter.writeTargetsToFile(new File(outputFile.getAbsolutePath() + ".targets.tsv"), fullGenomeTargetCollection);
final long createGenomeBinsEndTime = System.currentTimeMillis();
logger.info(String.format("Finished creating genome bins. Elapse of %d seconds", (createGenomeBinsEndTime - createGenomeBinsStartTime) / 1000));
logger.info("Creating missing genome bins...");
final long createMissingGenomeBinsStartTime = System.currentTimeMillis();
logger.info("Creating missing genome bins: Creating a mutable mapping...");
final Map<SimpleInterval, Long> byKeyMutable = new HashMap<>();
byKeyMutable.putAll(byKey);
logger.info("Creating missing genome bins: Populating mutable mapping with zero counts for empty regions...");
fullGenomeBins.stream().forEach(b -> byKeyMutable.putIfAbsent(b, 0l));
final long createMissingGenomeBinsEndTime = System.currentTimeMillis();
logger.info(String.format("Finished creating missing genome bins. Elapse of %d seconds", (createMissingGenomeBinsEndTime - createMissingGenomeBinsStartTime) / 1000));
logger.info("Creating final map...");
final long createFinalMapStartTime = System.currentTimeMillis();
final SortedMap<SimpleInterval, Long> byKeySorted = new TreeMap<>(IntervalUtils.LEXICOGRAPHICAL_ORDER_COMPARATOR);
byKeySorted.putAll(byKeyMutable);
final long createFinalMapEndTime = System.currentTimeMillis();
logger.info(String.format("Finished creating final map. Elapse of %d seconds", (createFinalMapEndTime - createFinalMapStartTime) / 1000));
logger.info("Creating proportional coverage... ");
final long pCovFileStartTime = System.currentTimeMillis();
final SortedMap<SimpleInterval, Double> byKeyProportionalSorted = new TreeMap<>(IntervalUtils.LEXICOGRAPHICAL_ORDER_COMPARATOR);
byKeySorted.entrySet().stream().forEach(e -> byKeyProportionalSorted.put(e.getKey(), (double) e.getValue() / totalReads));
final long pCovFileEndTime = System.currentTimeMillis();
logger.info(String.format("Finished creating proportional coverage map. Elapse of %d seconds", (pCovFileEndTime - pCovFileStartTime) / 1000));
logger.info("Writing raw coverage file ...");
final long writingCovFileStartTime = System.currentTimeMillis();
ReadCountCollectionUtils.writeReadCountsFromSimpleInterval(new File(outputFile.getAbsolutePath() + RAW_COV_OUTPUT_EXTENSION), sampleName, byKeySorted, commentsForRawCoverage);
final long writingCovFileEndTime = System.currentTimeMillis();
logger.info(String.format("Finished writing coverage file. Elapse of %d seconds", (writingCovFileEndTime - writingCovFileStartTime) / 1000));
logger.info("Writing proportional coverage file ...");
final long writingPCovFileStartTime = System.currentTimeMillis();
ReadCountCollectionUtils.writeReadCountsFromSimpleInterval(outputFile, sampleName, byKeyProportionalSorted, commentsForProportionalCoverage);
final long writingPCovFileEndTime = System.currentTimeMillis();
logger.info(String.format("Finished writing proportional coverage file. Elapse of %d seconds", (writingPCovFileEndTime - writingPCovFileStartTime) / 1000));
}
use of org.broadinstitute.hellbender.engine.filters.ReadFilter in project gatk by broadinstitute.
the class PileupSpark method getDefaultReadFilters.
@Override
public List<ReadFilter> getDefaultReadFilters() {
List<ReadFilter> filterList = new ArrayList<>(5);
filterList.add(ReadFilterLibrary.MAPPED);
filterList.add(ReadFilterLibrary.NOT_DUPLICATE);
filterList.add(ReadFilterLibrary.PASSES_VENDOR_QUALITY_CHECK);
filterList.add(ReadFilterLibrary.PRIMARY_ALIGNMENT);
filterList.add(new WellformedReadFilter());
return filterList;
}
use of org.broadinstitute.hellbender.engine.filters.ReadFilter in project gatk by broadinstitute.
the class BaseRecalibratorSparkSharded method runPipeline.
@Override
protected void runPipeline(JavaSparkContext ctx) {
if (readArguments.getReadFilesNames().size() != 1) {
throw new UserException("Sorry, we only support a single reads input for now.");
}
final String bam = readArguments.getReadFilesNames().get(0);
final String referenceURL = referenceArguments.getReferenceFileName();
auth = getAuthHolder();
final ReferenceMultiSource rds = new ReferenceMultiSource(auth, referenceURL, BaseRecalibrationEngine.BQSR_REFERENCE_WINDOW_FUNCTION);
SAMFileHeader readsHeader = new ReadsSparkSource(ctx, readArguments.getReadValidationStringency()).getHeader(bam, referenceURL);
final SAMSequenceDictionary readsDictionary = readsHeader.getSequenceDictionary();
final SAMSequenceDictionary refDictionary = rds.getReferenceSequenceDictionary(readsDictionary);
final ReadFilter readFilterToApply = ReadFilter.fromList(BaseRecalibrator.getStandardBQSRReadFilterList(), readsHeader);
SequenceDictionaryUtils.validateDictionaries("reference", refDictionary, "reads", readsDictionary);
Broadcast<SAMFileHeader> readsHeaderBcast = ctx.broadcast(readsHeader);
Broadcast<SAMSequenceDictionary> refDictionaryBcast = ctx.broadcast(refDictionary);
List<SimpleInterval> intervals = intervalArgumentCollection.intervalsSpecified() ? intervalArgumentCollection.getIntervals(readsHeader.getSequenceDictionary()) : IntervalUtils.getAllIntervalsForReference(readsHeader.getSequenceDictionary());
List<String> localVariants = knownVariants;
localVariants = hackilyCopyFromGCSIfNecessary(localVariants);
List<GATKVariant> variants = VariantsSource.getVariantsList(localVariants);
// get reads, reference, variants
JavaRDD<ContextShard> readsWithContext = AddContextDataToReadSparkOptimized.add(ctx, intervals, bam, variants, readFilterToApply, rds);
// run BaseRecalibratorEngine.
BaseRecalibratorEngineSparkWrapper recal = new BaseRecalibratorEngineSparkWrapper(readsHeaderBcast, refDictionaryBcast, bqsrArgs);
JavaRDD<RecalibrationTables> tables = readsWithContext.mapPartitions(s -> recal.apply(s));
final RecalibrationTables emptyRecalibrationTable = new RecalibrationTables(new StandardCovariateList(bqsrArgs, readsHeader));
final RecalibrationTables table = tables.treeAggregate(emptyRecalibrationTable, RecalibrationTables::inPlaceCombine, RecalibrationTables::inPlaceCombine, Math.max(1, (int) (Math.log(tables.partitions().size()) / Math.log(2))));
BaseRecalibrationEngine.finalizeRecalibrationTables(table);
try {
BaseRecalibratorEngineSparkWrapper.saveTextualReport(outputTablesPath, readsHeader, table, bqsrArgs, auth);
} catch (IOException e) {
throw new UserException.CouldNotCreateOutputFile(new File(outputTablesPath), e);
}
}
use of org.broadinstitute.hellbender.engine.filters.ReadFilter in project gatk by broadinstitute.
the class CollectMultipleMetricsSpark method runTool.
@Override
protected void runTool(final JavaSparkContext ctx) {
final JavaRDD<GATKRead> unFilteredReads = getUnfilteredReads();
List<SparkCollectorProvider> collectorsToRun = getCollectorsToRun();
if (collectorsToRun.size() > 1) {
// if there is more than one collector to run, cache the
// unfiltered RDD so we don't recompute it
unFilteredReads.cache();
}
for (final SparkCollectorProvider provider : collectorsToRun) {
MetricsCollectorSpark<? extends MetricsArgumentCollection> metricsCollector = provider.createCollector(outputBaseName, metricAccumulationLevel.accumulationLevels, getDefaultHeaders(), getHeaderForReads());
validateCollector(metricsCollector, collectorsToRun.get(collectorsToRun.indexOf(provider)).getClass().getName());
// Execute the collector's lifecycle
//Bypass the framework merging of command line filters and just apply the default
//ones specified by the collector
ReadFilter readFilter = ReadFilter.fromList(metricsCollector.getDefaultReadFilters(), getHeaderForReads());
metricsCollector.collectMetrics(unFilteredReads.filter(r -> readFilter.test(r)), getHeaderForReads());
metricsCollector.saveMetrics(getReadSourceName(), getAuthHolder());
}
}
use of org.broadinstitute.hellbender.engine.filters.ReadFilter in project gatk by broadinstitute.
the class ReadsPipelineSpark method runTool.
@Override
protected void runTool(final JavaSparkContext ctx) {
if (joinStrategy == JoinStrategy.BROADCAST && !getReference().isCompatibleWithSparkBroadcast()) {
throw new UserException.Require2BitReferenceForBroadcast();
}
//TOOO: should this use getUnfilteredReads? getReads will apply default and command line filters
final JavaRDD<GATKRead> initialReads = getReads();
final JavaRDD<GATKRead> markedReadsWithOD = MarkDuplicatesSpark.mark(initialReads, getHeaderForReads(), duplicatesScoringStrategy, new OpticalDuplicateFinder(), getRecommendedNumReducers());
final JavaRDD<GATKRead> markedReads = MarkDuplicatesSpark.cleanupTemporaryAttributes(markedReadsWithOD);
// The markedReads have already had the WellformedReadFilter applied to them, which
// is all the filtering that MarkDupes and ApplyBQSR want. BQSR itself wants additional
// filtering performed, so we do that here.
//NOTE: this doesn't honor enabled/disabled commandline filters
final ReadFilter bqsrReadFilter = ReadFilter.fromList(BaseRecalibrator.getBQSRSpecificReadFilterList(), getHeaderForReads());
final JavaRDD<GATKRead> markedFilteredReadsForBQSR = markedReads.filter(read -> bqsrReadFilter.test(read));
VariantsSparkSource variantsSparkSource = new VariantsSparkSource(ctx);
JavaRDD<GATKVariant> bqsrKnownVariants = variantsSparkSource.getParallelVariants(baseRecalibrationKnownVariants, getIntervals());
JavaPairRDD<GATKRead, ReadContextData> rddReadContext = AddContextDataToReadSpark.add(ctx, markedFilteredReadsForBQSR, getReference(), bqsrKnownVariants, joinStrategy, getReferenceSequenceDictionary(), readShardSize, readShardPadding);
final RecalibrationReport bqsrReport = BaseRecalibratorSparkFn.apply(rddReadContext, getHeaderForReads(), getReferenceSequenceDictionary(), bqsrArgs);
final Broadcast<RecalibrationReport> reportBroadcast = ctx.broadcast(bqsrReport);
final JavaRDD<GATKRead> finalReads = ApplyBQSRSparkFn.apply(markedReads, reportBroadcast, getHeaderForReads(), applyBqsrArgs.toApplyBQSRArgumentCollection(bqsrArgs.PRESERVE_QSCORES_LESS_THAN));
writeReads(ctx, output, finalReads);
}
Aggregations