use of ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission in project irida by phac-nml.
the class RESTProjectAnalysisController method getProjectAnalysesByType.
/**
* Get the list of {@link AnalysisSubmission}s for this {@link Project} by
* type of analysis.
*
* @param projectId
* The {@link Project} to search.
* @param type
* The analysis type to search for.
* @return A list of {@link AnalysisSubmission}s for the given
* {@link Project} by the given type.
* @throws IridaWorkflowNotFoundException
* If the {@link AnalysisSubmission} is linked to a workflow not
* found in IRIDA.
*/
@RequestMapping(value = "/api/projects/{projectId}/analyses/{type}", method = RequestMethod.GET)
public ModelMap getProjectAnalysesByType(@PathVariable Long projectId, @PathVariable String type) throws IridaWorkflowNotFoundException {
logger.debug("Loading analyses for project [" + projectId + "] by type [" + type + "]");
if (!RESTAnalysisSubmissionController.ANALYSIS_TYPES.containsKey(type)) {
throw new EntityNotFoundException("Analysis type [" + type + "] not found");
}
AnalysisType analysisType = RESTAnalysisSubmissionController.ANALYSIS_TYPES.get(type);
ModelMap modelMap = new ModelMap();
Project p = projectService.read(projectId);
Collection<AnalysisSubmission> analysisSubmissions = analysisSubmissionService.getAnalysisSubmissionsSharedToProject(p);
ResourceCollection<AnalysisSubmission> analysisResources = new ResourceCollection<>(analysisSubmissions.size());
for (AnalysisSubmission submission : analysisSubmissions) {
IridaWorkflow iridaWorkflow = iridaWorkflowsService.getIridaWorkflow(submission.getWorkflowId());
AnalysisType submissionAnalysisType = iridaWorkflow.getWorkflowDescription().getAnalysisType();
if (analysisType.equals(submissionAnalysisType)) {
submission.add(linkTo(methodOn(RESTAnalysisSubmissionController.class, Long.class).getResource(submission.getId())).withSelfRel());
analysisResources.add(submission);
}
}
analysisResources.add(linkTo(methodOn(RESTProjectsController.class, Long.class).getResource(projectId)).withRel(PROJECT_REL));
analysisResources.add(linkTo(methodOn(RESTProjectAnalysisController.class, Long.class).getProjectAnalysesByType(projectId, type)).withSelfRel());
modelMap.addAttribute(ANALYSIS_RESOURCES, analysisResources);
return modelMap;
}
use of ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission in project irida by phac-nml.
the class SNVPhylAnalysisIT method testSNVPhylSuccess.
/**
* Tests out successfully executing the SNVPhyl pipeline.
*
* @throws Exception
*/
@Test
@WithMockUser(username = "aaron", roles = "ADMIN")
public void testSNVPhylSuccess() throws Exception {
SequenceFilePair sequenceFilePairA = databaseSetupGalaxyITService.setupSampleSequenceFileInDatabase(1L, sequenceFilePathsA1List, sequenceFilePathsA2List).get(0);
SequenceFilePair sequenceFilePairB = databaseSetupGalaxyITService.setupSampleSequenceFileInDatabase(2L, sequenceFilePathsB1List, sequenceFilePathsB2List).get(0);
SequenceFilePair sequenceFilePairC = databaseSetupGalaxyITService.setupSampleSequenceFileInDatabase(3L, sequenceFilePathsC1List, sequenceFilePathsC2List).get(0);
Map<String, String> parameters = ImmutableMap.of("snv-abundance-ratio", "0.75", "minimum-read-coverage", "2", "filter-density-threshold", "2", "filter-density-window-size", "3");
waitForFilesToSettle(sequenceFilePairA, sequenceFilePairB, sequenceFilePairC);
AnalysisSubmission submission = databaseSetupGalaxyITService.setupPairSubmissionInDatabase(Sets.newHashSet(sequenceFilePairA, sequenceFilePairB, sequenceFilePairC), referenceFilePath, parameters, snvPhylWorkflow.getWorkflowIdentifier());
completeSubmittedAnalyses(submission.getId());
submission = analysisSubmissionRepository.findOne(submission.getId());
assertEquals("analysis state should be completed.", AnalysisState.COMPLETED, submission.getAnalysisState());
Analysis analysisPhylogenomics = submission.getAnalysis();
assertEquals("Should have generated a phylogenomics pipeline analysis type.", AnalysisType.PHYLOGENOMICS, analysisPhylogenomics.getAnalysisType());
assertEquals("the phylogenomics pipeline should have 8 output files.", 8, analysisPhylogenomics.getAnalysisOutputFiles().size());
@SuppressWarnings("resource") String matrixContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(MATRIX_KEY).getFile().toFile()).useDelimiter("\\Z").next();
assertTrue("snpMatrix should be the same but is \"" + matrixContent + "\"", com.google.common.io.Files.equal(outputSnvMatrix1.toFile(), analysisPhylogenomics.getAnalysisOutputFile(MATRIX_KEY).getFile().toFile()));
assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(MATRIX_KEY).getCreatedByTool());
@SuppressWarnings("resource") String snpTableContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(TABLE_KEY).getFile().toFile()).useDelimiter("\\Z").next();
assertTrue("snpTable should be the same but is \"" + snpTableContent + "\"", com.google.common.io.Files.equal(outputSnvTable1.toFile(), analysisPhylogenomics.getAnalysisOutputFile(TABLE_KEY).getFile().toFile()));
assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(TABLE_KEY).getCreatedByTool());
@SuppressWarnings("resource") String vcf2coreContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(CORE_KEY).getFile().toFile()).useDelimiter("\\Z").next();
assertTrue("vcf2core should be the same but is \"" + vcf2coreContent + "\"", com.google.common.io.Files.equal(vcf2core1.toFile(), analysisPhylogenomics.getAnalysisOutputFile(CORE_KEY).getFile().toFile()));
assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(CORE_KEY).getCreatedByTool());
// only check size of mapping quality file due to samples output in random order
assertTrue("the mapping quality file should not be empty.", Files.size(analysisPhylogenomics.getAnalysisOutputFile(QUALITY_KEY).getFile()) > 0);
@SuppressWarnings("resource") String filterStatsContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(STATS_KEY).getFile().toFile()).useDelimiter("\\Z").next();
assertTrue("filterStats should be the same but is \"" + filterStatsContent + "\"", com.google.common.io.Files.equal(filterStats1.toFile(), analysisPhylogenomics.getAnalysisOutputFile(STATS_KEY).getFile().toFile()));
assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(STATS_KEY).getCreatedByTool());
@SuppressWarnings("resource") String snvAlignContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(ALIGN_KEY).getFile().toFile()).useDelimiter("\\Z").next();
assertTrue("snvAlign should be the same but is \"" + snvAlignContent + "\"", com.google.common.io.Files.equal(snvAlign1.toFile(), analysisPhylogenomics.getAnalysisOutputFile(ALIGN_KEY).getFile().toFile()));
assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(ALIGN_KEY).getCreatedByTool());
// only test to make sure the files have a valid size since PhyML uses a
// random seed to generate the tree (and so changes results)
assertTrue("the phylogenetic tree file should not be empty.", Files.size(analysisPhylogenomics.getAnalysisOutputFile(TREE_KEY).getFile()) > 0);
assertTrue("the phylogenetic tree stats file should not be empty.", Files.size(analysisPhylogenomics.getAnalysisOutputFile(TREE_KEY).getFile()) > 0);
// try to follow the phylogenomics provenance all the way back to the
// upload tools
final List<ToolExecution> toolsToVisit = Lists.newArrayList(analysisPhylogenomics.getAnalysisOutputFile(TREE_KEY).getCreatedByTool());
assertFalse("file should have tool provenance attached.", toolsToVisit.isEmpty());
boolean foundReadsInputTool = false;
boolean foundReferenceInputTool = false;
// one where you upload the reads.
while (!toolsToVisit.isEmpty()) {
final ToolExecution ex = toolsToVisit.remove(0);
toolsToVisit.addAll(ex.getPreviousSteps());
if (ex.isInputTool()) {
final Map<String, String> params = ex.getExecutionTimeParameters();
logger.debug("Input tool has " + params);
foundReferenceInputTool |= params.containsKey("files.NAME") && params.get("files.NAME").contains("reference") && params.get("file_type").contains("fasta");
foundReadsInputTool |= params.get("file_type").contains("fastq");
}
}
assertTrue("Should have found both reads and reference input tools.", foundReadsInputTool && foundReferenceInputTool);
}
use of ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission in project irida by phac-nml.
the class SNVPhylAnalysisIT method testSNVPhylSuccessRemoveSNVDensity.
/**
* Tests out successfully executing the SNVPhyl pipeline and passing a lower value for SNV density threshold to filter out SNVs.
*
* @throws Exception
*/
@Test
@WithMockUser(username = "aaron", roles = "ADMIN")
public void testSNVPhylSuccessRemoveSNVDensity() throws Exception {
SequenceFilePair sequenceFilePairA = databaseSetupGalaxyITService.setupSampleSequenceFileInDatabase(1L, sequenceFilePathsA1List, sequenceFilePathsA2List).get(0);
SequenceFilePair sequenceFilePairB = databaseSetupGalaxyITService.setupSampleSequenceFileInDatabase(2L, sequenceFilePathsB1List, sequenceFilePathsB2List).get(0);
SequenceFilePair sequenceFilePairC = databaseSetupGalaxyITService.setupSampleSequenceFileInDatabase(3L, sequenceFilePathsC1List, sequenceFilePathsC2List).get(0);
Map<String, String> parameters = ImmutableMap.of("snv-abundance-ratio", "0.75", "minimum-read-coverage", "2", "filter-density-threshold", "2", "filter-density-window-size", "4");
AnalysisSubmission submission = databaseSetupGalaxyITService.setupPairSubmissionInDatabase(Sets.newHashSet(sequenceFilePairA, sequenceFilePairB, sequenceFilePairC), referenceFilePath, parameters, snvPhylWorkflow.getWorkflowIdentifier());
completeSubmittedAnalyses(submission.getId());
submission = analysisSubmissionRepository.findOne(submission.getId());
assertEquals("analysis state should be completed.", AnalysisState.COMPLETED, submission.getAnalysisState());
Analysis analysisPhylogenomics = submission.getAnalysis();
assertEquals("Should have generated a phylogenomics pipeline analysis type.", AnalysisType.PHYLOGENOMICS, analysisPhylogenomics.getAnalysisType());
assertEquals("the phylogenomics pipeline should have 8 output files.", 8, analysisPhylogenomics.getAnalysisOutputFiles().size());
@SuppressWarnings("resource") String matrixContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(MATRIX_KEY).getFile().toFile()).useDelimiter("\\Z").next();
assertTrue("snpMatrix should be the same but is \"" + matrixContent + "\"", com.google.common.io.Files.equal(outputSnvMatrix3.toFile(), analysisPhylogenomics.getAnalysisOutputFile(MATRIX_KEY).getFile().toFile()));
assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(MATRIX_KEY).getCreatedByTool());
@SuppressWarnings("resource") String snpTableContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(TABLE_KEY).getFile().toFile()).useDelimiter("\\Z").next();
assertTrue("snpTable should be the same but is \"" + snpTableContent + "\"", com.google.common.io.Files.equal(outputSnvTable3.toFile(), analysisPhylogenomics.getAnalysisOutputFile(TABLE_KEY).getFile().toFile()));
assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(TABLE_KEY).getCreatedByTool());
@SuppressWarnings("resource") String vcf2coreContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(CORE_KEY).getFile().toFile()).useDelimiter("\\Z").next();
assertTrue("vcf2core should be the same but is \"" + vcf2coreContent + "\"", com.google.common.io.Files.equal(vcf2core3.toFile(), analysisPhylogenomics.getAnalysisOutputFile(CORE_KEY).getFile().toFile()));
assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(CORE_KEY).getCreatedByTool());
// only check size of mapping quality file due to samples output in random order
assertTrue("the mapping quality file should not be empty.", Files.size(analysisPhylogenomics.getAnalysisOutputFile(QUALITY_KEY).getFile()) > 0);
@SuppressWarnings("resource") String filterStatsContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(STATS_KEY).getFile().toFile()).useDelimiter("\\Z").next();
assertTrue("filterStats should be the same but is \"" + filterStatsContent + "\"", com.google.common.io.Files.equal(filterStats3.toFile(), analysisPhylogenomics.getAnalysisOutputFile(STATS_KEY).getFile().toFile()));
assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(STATS_KEY).getCreatedByTool());
@SuppressWarnings("resource") String snvAlignContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(ALIGN_KEY).getFile().toFile()).useDelimiter("\\Z").next();
assertTrue("snvAlign should be the same but is \"" + snvAlignContent + "\"", com.google.common.io.Files.equal(snvAlign3.toFile(), analysisPhylogenomics.getAnalysisOutputFile(ALIGN_KEY).getFile().toFile()));
assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(ALIGN_KEY).getCreatedByTool());
// only test to make sure the files have a valid size since PhyML uses a
// random seed to generate the tree (and so changes results)
assertTrue("the phylogenetic tree file should not be empty.", Files.size(analysisPhylogenomics.getAnalysisOutputFile(TREE_KEY).getFile()) > 0);
assertTrue("the phylogenetic tree stats file should not be empty.", Files.size(analysisPhylogenomics.getAnalysisOutputFile(TREE_KEY).getFile()) > 0);
// try to follow the phylogenomics provenance all the way back to the
// upload tools
List<ToolExecution> toolsToVisit = Lists.newArrayList(analysisPhylogenomics.getAnalysisOutputFile(TREE_KEY).getCreatedByTool());
assertFalse("file should have tool provenance attached.", toolsToVisit.isEmpty());
String minVcf2AlignCov = null;
String altAlleleFraction = null;
String minimumPercentCoverage = null;
String minimumDepthVerify = null;
String filterDensityThreshold = null;
String filterDensityWindowSize = null;
// one where you upload the reads.
while (!toolsToVisit.isEmpty()) {
final ToolExecution ex = toolsToVisit.remove(0);
toolsToVisit.addAll(ex.getPreviousSteps());
if (ex.getToolName().contains("Consolidate VCFs")) {
final Map<String, String> params = ex.getExecutionTimeParameters();
minVcf2AlignCov = params.get("coverage");
altAlleleFraction = params.get("snv_abundance_ratio");
filterDensityThreshold = params.get("use_density_filter.threshold");
filterDensityWindowSize = params.get("use_density_filter.window_size");
break;
}
}
// try to follow the mapping quality provenance all the way back to the
// upload tools
toolsToVisit = Lists.newArrayList(analysisPhylogenomics.getAnalysisOutputFile(QUALITY_KEY).getCreatedByTool());
assertFalse("file should have tool provenance attached.", toolsToVisit.isEmpty());
while (!toolsToVisit.isEmpty()) {
final ToolExecution ex = toolsToVisit.remove(0);
toolsToVisit.addAll(ex.getPreviousSteps());
if (ex.getToolName().contains("Verify Mapping Quality")) {
final Map<String, String> params = ex.getExecutionTimeParameters();
minimumPercentCoverage = params.get("minmap");
minimumDepthVerify = params.get("mindepth");
}
}
assertEquals("incorrect minimum vcf 2 align coverage", "\"2\"", minVcf2AlignCov);
assertEquals("incorrect alternative allele fraction", "\"0.75\"", altAlleleFraction);
assertEquals("incorrect minimum depth for verify map", "\"2\"", minimumDepthVerify);
assertEquals("incorrect min percent coverage for verify map", "\"80\"", minimumPercentCoverage);
assertEquals("incorrect filter density threshold", "2", filterDensityThreshold);
assertEquals("incorrect filter density window size", "4", filterDensityWindowSize);
}
use of ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission in project irida by phac-nml.
the class SNVPhylAnalysisIT method completeSubmittedAnalyses.
private void completeSubmittedAnalyses(Long submissionId) throws Exception {
waitUntilAnalysisStageComplete(analysisExecutionScheduledTask.prepareAnalyses());
waitUntilAnalysisStageComplete(analysisExecutionScheduledTask.executeAnalyses());
AnalysisSubmission submission = analysisSubmissionRepository.findOne(submissionId);
databaseSetupGalaxyITService.waitUntilSubmissionComplete(submission);
waitUntilAnalysisStageComplete(analysisExecutionScheduledTask.monitorRunningAnalyses());
waitUntilAnalysisStageComplete(analysisExecutionScheduledTask.transferAnalysesResults());
}
use of ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission in project irida by phac-nml.
the class AnalysisSubmissionTest method testBuildWithNamedParameters.
@Test
public void testBuildWithNamedParameters() {
final AnalysisSubmission submission = AnalysisSubmission.builder(workflowId).withNamedParameters(namedParameters).inputFiles(Sets.newHashSet(singleEndFile)).build();
assertEquals("analysis submission should have a reference to the specified named parameters.", inputParameters, submission.getInputParameters());
}
Aggregations