use of org.broadinstitute.hellbender.tools.coveragemodel.CoverageModelEMWorkspace in project gatk-protected by broadinstitute.
the class GermlineCNVCaller method runPipeline.
/**
* The main routine
*
* @param ctx a nullable Spark context
*/
@Override
protected void runPipeline(@Nullable JavaSparkContext ctx) {
final TargetCollection<Target> optionalTargetsCollections = optionalTargets.readTargetCollection(true);
if (optionalTargetsCollections == null) {
logger.info("No target file was provided: using all targets in the combined read counts table");
}
logger.info("Parsing the read counts table...");
final ReadCountCollection readCounts = loadReadCountCollection(optionalTargetsCollections);
logger.info("Parsing the sample sex genotypes table...");
final SexGenotypeDataCollection sexGenotypeDataCollection = loadSexGenotypeDataCollection();
logger.info("Parsing the germline contig ploidy annotation table...");
final GermlinePloidyAnnotatedTargetCollection ploidyAnnotatedTargetCollection = loadGermlinePloidyAnnotatedTargetCollection(readCounts);
logger.info("Parsing the copy number transition prior table and initializing the caches...");
final IntegerCopyNumberTransitionProbabilityCacheCollection transitionProbabilityCacheCollection = createIntegerCopyNumberTransitionProbabilityCacheCollection();
final IntegerCopyNumberExpectationsCalculator integerCopyNumberExpectationsCalculator = new IntegerCopyNumberExpectationsCalculator(transitionProbabilityCacheCollection, params.getMinLearningReadCount());
final CoverageModelParameters model = getCoverageModelParameters();
Utils.validateArg(model != null || !jobType.equals(JobType.CALL_ONLY), "Model parameters are not given; can not run the tool in the CALL_ONLY mode.");
logger.info("Initializing the EM algorithm workspace...");
final IntegerCopyNumberReferenceStateFactory referenceStateFactory = new IntegerCopyNumberReferenceStateFactory(ploidyAnnotatedTargetCollection);
final CoverageModelEMWorkspace<IntegerCopyNumberState> workspace = new CoverageModelEMWorkspace<>(readCounts, ploidyAnnotatedTargetCollection, sexGenotypeDataCollection, integerCopyNumberExpectationsCalculator, params, model, referenceStateFactory, ctx);
final CoverageModelEMAlgorithm<IntegerCopyNumberState> algo = new CoverageModelEMAlgorithm<>(params, workspace);
switch(jobType) {
case LEARN_AND_CALL:
algo.runExpectationMaximization();
logger.info("Saving the model to disk...");
workspace.writeModel(new File(outputPath, FINAL_MODEL_SUBDIR).getAbsolutePath());
break;
case CALL_ONLY:
algo.runExpectation();
break;
default:
throw new UnsupportedOperationException(String.format("\"%s\" is not recognized as a supported job type", jobType.name()));
}
logger.info("Saving posteriors to disk...");
workspace.writePosteriors(new File(outputPath, FINAL_POSTERIORS_SUBDIR).getAbsolutePath(), CoverageModelEMWorkspace.PosteriorVerbosityLevel.EXTENDED);
}
use of org.broadinstitute.hellbender.tools.coveragemodel.CoverageModelEMWorkspace in project gatk by broadinstitute.
the class GermlineCNVCaller method runPipeline.
/**
* The main routine
*
* @param ctx a nullable Spark context
*/
@Override
protected void runPipeline(@Nullable JavaSparkContext ctx) {
final TargetCollection<Target> optionalTargetsCollections = optionalTargets.readTargetCollection(true);
if (optionalTargetsCollections == null) {
logger.info("No target file was provided: using all targets in the combined read counts table");
}
logger.info("Parsing the read counts table...");
final ReadCountCollection readCounts = loadReadCountCollection(optionalTargetsCollections);
logger.info("Parsing the sample sex genotypes table...");
final SexGenotypeDataCollection sexGenotypeDataCollection = loadSexGenotypeDataCollection();
logger.info("Parsing the germline contig ploidy annotation table...");
final GermlinePloidyAnnotatedTargetCollection ploidyAnnotatedTargetCollection = loadGermlinePloidyAnnotatedTargetCollection(readCounts);
logger.info("Parsing the copy number transition prior table and initializing the caches...");
final IntegerCopyNumberTransitionProbabilityCacheCollection transitionProbabilityCacheCollection = createIntegerCopyNumberTransitionProbabilityCacheCollection();
final IntegerCopyNumberExpectationsCalculator integerCopyNumberExpectationsCalculator = new IntegerCopyNumberExpectationsCalculator(transitionProbabilityCacheCollection, params.getMinLearningReadCount());
final CoverageModelParameters model = getCoverageModelParameters();
Utils.validateArg(model != null || !jobType.equals(JobType.CALL_ONLY), "Model parameters are not given; can not run the tool in the CALL_ONLY mode.");
logger.info("Initializing the EM algorithm workspace...");
final IntegerCopyNumberReferenceStateFactory referenceStateFactory = new IntegerCopyNumberReferenceStateFactory(ploidyAnnotatedTargetCollection);
final CoverageModelEMWorkspace<IntegerCopyNumberState> workspace = new CoverageModelEMWorkspace<>(readCounts, ploidyAnnotatedTargetCollection, sexGenotypeDataCollection, integerCopyNumberExpectationsCalculator, params, model, referenceStateFactory, ctx);
final CoverageModelEMAlgorithm<IntegerCopyNumberState> algo = new CoverageModelEMAlgorithm<>(params, workspace);
switch(jobType) {
case LEARN_AND_CALL:
algo.runExpectationMaximization();
logger.info("Saving the model to disk...");
workspace.writeModel(new File(outputPath, FINAL_MODEL_SUBDIR).getAbsolutePath());
break;
case CALL_ONLY:
algo.runExpectation();
break;
default:
throw new UnsupportedOperationException(String.format("\"%s\" is not recognized as a supported job type", jobType.name()));
}
logger.info("Saving posteriors to disk...");
workspace.writePosteriors(new File(outputPath, FINAL_POSTERIORS_SUBDIR).getAbsolutePath(), CoverageModelEMWorkspace.PosteriorVerbosityLevel.EXTENDED);
}
Aggregations