use of org.broadinstitute.hellbender.tools.exome.ReadCountCollection in project gatk by broadinstitute.
the class TitanFileConverter method convertCRToTitanCovFile.
/**
* Create a target file that is compatible with TITAN.
*
* @param tnFile Readable file from {@link org.broadinstitute.hellbender.tools.exome.NormalizeSomaticReadCounts}
* @param outputFile Not {@code null}
*/
public static void convertCRToTitanCovFile(final File tnFile, final File outputFile) {
IOUtils.canReadFile(tnFile);
try {
final ReadCountCollection rcc = ReadCountCollectionUtils.parse(tnFile);
final TitanCopyRatioEstimateWriter titanCopyRatioEstimateWriter = new TitanCopyRatioEstimateWriter(outputFile);
titanCopyRatioEstimateWriter.writeAllRecords(rcc.records());
titanCopyRatioEstimateWriter.close();
} catch (final IOException ioe) {
throw new UserException.BadInput("Bad output file: " + outputFile);
}
}
use of org.broadinstitute.hellbender.tools.exome.ReadCountCollection in project gatk-protected by broadinstitute.
the class CoverageModelParameters method adaptModelToReadCountCollection.
/**
* This method "adapts" a model to a read count collection in the following sense:
*
* - removes targets that are not included in the model from the read counts collection
* - removes targets that are in the read count collection from the model
* - rearranges model targets in the same order as read count collection targets
*
* The modifications are not done in-place and the original input parameters remain intact.
*
* @param model a model
* @param readCounts a read count collection
* @return a pair of model and read count collection
*/
public static ImmutablePair<CoverageModelParameters, ReadCountCollection> adaptModelToReadCountCollection(@Nonnull final CoverageModelParameters model, @Nonnull final ReadCountCollection readCounts, @Nonnull final Logger logger) {
logger.info("Adapting model to read counts...");
Utils.nonNull(model, "The model parameters must be non-null");
Utils.nonNull(readCounts, "The read count collection must be non-null");
Utils.nonNull(logger, "The logger must be non-null");
final List<Target> modelTargetList = model.getTargetList();
final List<Target> readCountsTargetList = readCounts.targets();
final Set<Target> mutualTargetSet = Sets.intersection(new HashSet<>(modelTargetList), new HashSet<>(readCountsTargetList));
final List<Target> mutualTargetList = readCountsTargetList.stream().filter(mutualTargetSet::contains).collect(Collectors.toList());
logger.info("Number of mutual targets: " + mutualTargetList.size());
Utils.validateArg(mutualTargetList.size() > 0, "The intersection between model targets and targets from read count" + " collection is empty. Please check there the model is compatible with the given read count" + " collection.");
if (modelTargetList.size() > mutualTargetList.size()) {
logger.info("The following targets dropped from the model: " + Sets.difference(new HashSet<>(modelTargetList), mutualTargetSet).stream().map(Target::getName).collect(Collectors.joining(", ", "[", "]")));
}
if (readCountsTargetList.size() > mutualTargetList.size()) {
logger.info("The following targets dropped from read counts: " + Sets.difference(new HashSet<>(readCountsTargetList), mutualTargetSet).stream().map(Target::getName).collect(Collectors.joining(", ", "[", "]")));
}
/* the targets in {@code subsetReadCounts} follow the original order of targets in {@code readCounts} */
final ReadCountCollection subsetReadCounts = readCounts.subsetTargets(mutualTargetSet);
/* fetch original model parameters */
final INDArray originalModelTargetMeanBias = model.getTargetMeanLogBias();
final INDArray originalModelTargetUnexplainedVariance = model.getTargetUnexplainedVariance();
final INDArray originalModelMeanBiasCovariates = model.getMeanBiasCovariates();
/* re-arrange targets, mean log bias, and target-specific unexplained variance */
final Map<Target, Integer> modelTargetsToIndexMap = IntStream.range(0, modelTargetList.size()).mapToObj(ti -> ImmutablePair.of(modelTargetList.get(ti), ti)).collect(Collectors.toMap(Pair<Target, Integer>::getLeft, Pair<Target, Integer>::getRight));
final int[] newTargetIndicesInOriginalModel = mutualTargetList.stream().mapToInt(modelTargetsToIndexMap::get).toArray();
final INDArray newModelTargetMeanBias = Nd4j.create(new int[] { 1, mutualTargetList.size() });
final INDArray newModelTargetUnexplainedVariance = Nd4j.create(new int[] { 1, mutualTargetList.size() });
IntStream.range(0, mutualTargetList.size()).forEach(ti -> {
newModelTargetMeanBias.put(0, ti, originalModelTargetMeanBias.getDouble(0, newTargetIndicesInOriginalModel[ti]));
newModelTargetUnexplainedVariance.put(0, ti, originalModelTargetUnexplainedVariance.getDouble(0, newTargetIndicesInOriginalModel[ti]));
});
/* if model has bias covariates and/or ARD, re-arrange mean/var of bias covariates as well */
final INDArray newModelMeanBiasCovariates;
if (model.isBiasCovariatesEnabled()) {
newModelMeanBiasCovariates = Nd4j.create(new int[] { mutualTargetList.size(), model.getNumLatents() });
IntStream.range(0, mutualTargetList.size()).forEach(ti -> {
newModelMeanBiasCovariates.get(NDArrayIndex.point(ti), NDArrayIndex.all()).assign(originalModelMeanBiasCovariates.get(NDArrayIndex.point(newTargetIndicesInOriginalModel[ti]), NDArrayIndex.all()));
});
} else {
newModelMeanBiasCovariates = null;
}
return ImmutablePair.of(new CoverageModelParameters(mutualTargetList, newModelTargetMeanBias, newModelTargetUnexplainedVariance, newModelMeanBiasCovariates, model.getBiasCovariateARDCoefficients()), subsetReadCounts);
}
use of org.broadinstitute.hellbender.tools.exome.ReadCountCollection in project gatk by broadinstitute.
the class CoverageModelParameters method adaptModelToReadCountCollection.
/**
* This method "adapts" a model to a read count collection in the following sense:
*
* - removes targets that are not included in the model from the read counts collection
* - removes targets that are in the read count collection from the model
* - rearranges model targets in the same order as read count collection targets
*
* The modifications are not done in-place and the original input parameters remain intact.
*
* @param model a model
* @param readCounts a read count collection
* @return a pair of model and read count collection
*/
public static ImmutablePair<CoverageModelParameters, ReadCountCollection> adaptModelToReadCountCollection(@Nonnull final CoverageModelParameters model, @Nonnull final ReadCountCollection readCounts, @Nonnull final Logger logger) {
logger.info("Adapting model to read counts...");
Utils.nonNull(model, "The model parameters must be non-null");
Utils.nonNull(readCounts, "The read count collection must be non-null");
Utils.nonNull(logger, "The logger must be non-null");
final List<Target> modelTargetList = model.getTargetList();
final List<Target> readCountsTargetList = readCounts.targets();
final Set<Target> mutualTargetSet = Sets.intersection(new HashSet<>(modelTargetList), new HashSet<>(readCountsTargetList));
final List<Target> mutualTargetList = readCountsTargetList.stream().filter(mutualTargetSet::contains).collect(Collectors.toList());
logger.info("Number of mutual targets: " + mutualTargetList.size());
Utils.validateArg(mutualTargetList.size() > 0, "The intersection between model targets and targets from read count" + " collection is empty. Please check there the model is compatible with the given read count" + " collection.");
if (modelTargetList.size() > mutualTargetList.size()) {
logger.info("The following targets dropped from the model: " + Sets.difference(new HashSet<>(modelTargetList), mutualTargetSet).stream().map(Target::getName).collect(Collectors.joining(", ", "[", "]")));
}
if (readCountsTargetList.size() > mutualTargetList.size()) {
logger.info("The following targets dropped from read counts: " + Sets.difference(new HashSet<>(readCountsTargetList), mutualTargetSet).stream().map(Target::getName).collect(Collectors.joining(", ", "[", "]")));
}
/* the targets in {@code subsetReadCounts} follow the original order of targets in {@code readCounts} */
final ReadCountCollection subsetReadCounts = readCounts.subsetTargets(mutualTargetSet);
/* fetch original model parameters */
final INDArray originalModelTargetMeanBias = model.getTargetMeanLogBias();
final INDArray originalModelTargetUnexplainedVariance = model.getTargetUnexplainedVariance();
final INDArray originalModelMeanBiasCovariates = model.getMeanBiasCovariates();
/* re-arrange targets, mean log bias, and target-specific unexplained variance */
final Map<Target, Integer> modelTargetsToIndexMap = IntStream.range(0, modelTargetList.size()).mapToObj(ti -> ImmutablePair.of(modelTargetList.get(ti), ti)).collect(Collectors.toMap(Pair<Target, Integer>::getLeft, Pair<Target, Integer>::getRight));
final int[] newTargetIndicesInOriginalModel = mutualTargetList.stream().mapToInt(modelTargetsToIndexMap::get).toArray();
final INDArray newModelTargetMeanBias = Nd4j.create(new int[] { 1, mutualTargetList.size() });
final INDArray newModelTargetUnexplainedVariance = Nd4j.create(new int[] { 1, mutualTargetList.size() });
IntStream.range(0, mutualTargetList.size()).forEach(ti -> {
newModelTargetMeanBias.put(0, ti, originalModelTargetMeanBias.getDouble(0, newTargetIndicesInOriginalModel[ti]));
newModelTargetUnexplainedVariance.put(0, ti, originalModelTargetUnexplainedVariance.getDouble(0, newTargetIndicesInOriginalModel[ti]));
});
/* if model has bias covariates and/or ARD, re-arrange mean/var of bias covariates as well */
final INDArray newModelMeanBiasCovariates;
if (model.isBiasCovariatesEnabled()) {
newModelMeanBiasCovariates = Nd4j.create(new int[] { mutualTargetList.size(), model.getNumLatents() });
IntStream.range(0, mutualTargetList.size()).forEach(ti -> {
newModelMeanBiasCovariates.get(NDArrayIndex.point(ti), NDArrayIndex.all()).assign(originalModelMeanBiasCovariates.get(NDArrayIndex.point(newTargetIndicesInOriginalModel[ti]), NDArrayIndex.all()));
});
} else {
newModelMeanBiasCovariates = null;
}
return ImmutablePair.of(new CoverageModelParameters(mutualTargetList, newModelTargetMeanBias, newModelTargetUnexplainedVariance, newModelMeanBiasCovariates, model.getBiasCovariateARDCoefficients()), subsetReadCounts);
}
use of org.broadinstitute.hellbender.tools.exome.ReadCountCollection in project gatk by broadinstitute.
the class CoveragePoNQCUtilsUnitTest method testIdentifySamplesWithSuspiciousContigsNoSuspiciousSamples.
@Test
public void testIdentifySamplesWithSuspiciousContigsNoSuspiciousSamples() {
ReadCountCollection allCoverageProfiles = null;
try {
allCoverageProfiles = ReadCountCollectionUtils.parse(TEST_NO_SUSPICIOUS_SAMPLES_FILE);
} catch (final IOException ioe) {
Assert.fail("Could not load test file: " + TEST_NO_SUSPICIOUS_SAMPLES_FILE, ioe);
}
final List<ReadCountCollection> singleSampleTangentNormalizedReadCounts = CoveragePoNQCUtils.createIndividualReadCountCollections(allCoverageProfiles);
// By the time we are here, input is assumed to have been tangent normalized.
final List<String> blacklistSamples = CoveragePoNQCUtils.identifySamplesWithSuspiciousContigs(singleSampleTangentNormalizedReadCounts, CoveragePoNQCUtils.getContigToMedianCRMap(allCoverageProfiles));
Assert.assertEquals(blacklistSamples.size(), 0);
}
use of org.broadinstitute.hellbender.tools.exome.ReadCountCollection in project gatk by broadinstitute.
the class CoveragePoNQCUtilsUnitTest method testIdentifySamplesWithSuspiciousContigsDelsWithSpark.
@Test
public void testIdentifySamplesWithSuspiciousContigsDelsWithSpark() {
final JavaSparkContext ctx = SparkContextFactory.getTestSparkContext();
final Set<String> gtBlacklistSamples = new HashSet<>();
gtBlacklistSamples.add("sample_1");
gtBlacklistSamples.add("sample_2");
gtBlacklistSamples.add("sample_3");
ReadCountCollection allCoverageProfiles = null;
try {
allCoverageProfiles = ReadCountCollectionUtils.parse(TEST_FILE_DEL);
} catch (final IOException ioe) {
Assert.fail("Could not load test file: " + TEST_FILE_DEL, ioe);
}
final JavaRDD<ReadCountCollection> allSampleTangentNormalizedReadCounts = CoveragePoNQCUtils.createParallelIndividualReadCountCollections(allCoverageProfiles, ctx);
// By the time we are here, input is assumed to have been tangent normalized.
final List<String> blacklistSamples = CoveragePoNQCUtils.identifySamplesWithSuspiciousContigs(allSampleTangentNormalizedReadCounts, ctx, CoveragePoNQCUtils.getContigToMedianCRMap(allCoverageProfiles));
final Set<String> resultSamples = new HashSet<>(blacklistSamples);
Assert.assertEquals(resultSamples.size(), gtBlacklistSamples.size());
Assert.assertEquals(Sets.difference(resultSamples, gtBlacklistSamples).size(), 0);
}
Aggregations