Search in sources :

Example 6 with Analysis

use of ca.corefacility.bioinformatics.irida.model.workflow.analysis.Analysis in project irida by phac-nml.

the class AnalysisController method getSistrAnalysis.

/**
 * Get the sistr analysis information to display
 *
 * @param id ID of the analysis submission
 * @return Json results for the SISTR analysis
 */
@SuppressWarnings("resource")
@RequestMapping("/ajax/sistr/{id}")
@ResponseBody
public Map<String, Object> getSistrAnalysis(@PathVariable Long id) {
    AnalysisSubmission submission = analysisSubmissionService.read(id);
    Collection<Sample> samples = sampleService.getSamplesForAnalysisSubmission(submission);
    Map<String, Object> result = ImmutableMap.of("parse_results_error", true);
    final String sistrFileKey = "sistr-predictions";
    // Get details about the workflow
    UUID workflowUUID = submission.getWorkflowId();
    IridaWorkflow iridaWorkflow;
    try {
        iridaWorkflow = workflowsService.getIridaWorkflow(workflowUUID);
    } catch (IridaWorkflowNotFoundException e) {
        logger.error("Error finding workflow, ", e);
        throw new EntityNotFoundException("Couldn't find workflow for submission " + submission.getId(), e);
    }
    AnalysisType analysisType = iridaWorkflow.getWorkflowDescription().getAnalysisType();
    if (analysisType.equals(AnalysisType.SISTR_TYPING)) {
        Analysis analysis = submission.getAnalysis();
        Path path = analysis.getAnalysisOutputFile(sistrFileKey).getFile();
        try {
            String json = new Scanner(new BufferedReader(new FileReader(path.toFile()))).useDelimiter("\\Z").next();
            // verify file is proper json file
            ObjectMapper mapper = new ObjectMapper();
            List<Map<String, Object>> sistrResults = mapper.readValue(json, new TypeReference<List<Map<String, Object>>>() {
            });
            if (sistrResults.size() > 0) {
                // should only ever be one sample for these results
                if (samples.size() == 1) {
                    Sample sample = samples.iterator().next();
                    result = sistrResults.get(0);
                    result.put("parse_results_error", false);
                    result.put("sample_name", sample.getSampleName());
                } else {
                    logger.error("Invalid number of associated samles for submission " + submission);
                }
            } else {
                logger.error("SISTR results for file [" + path + "] are not correctly formatted");
            }
        } catch (FileNotFoundException e) {
            logger.error("File [" + path + "] not found", e);
        } catch (JsonParseException | JsonMappingException e) {
            logger.error("Error attempting to parse file [" + path + "] as JSON", e);
        } catch (IOException e) {
            logger.error("Error reading file [" + path + "]", e);
        }
    }
    return result;
}
Also used : AnalysisType(ca.corefacility.bioinformatics.irida.model.enums.AnalysisType) IridaWorkflow(ca.corefacility.bioinformatics.irida.model.workflow.IridaWorkflow) AnalysisSubmission(ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission) JsonParseException(com.fasterxml.jackson.core.JsonParseException) JsonMappingException(com.fasterxml.jackson.databind.JsonMappingException) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Path(java.nio.file.Path) Sample(ca.corefacility.bioinformatics.irida.model.sample.Sample) EntityNotFoundException(ca.corefacility.bioinformatics.irida.exceptions.EntityNotFoundException) IridaWorkflowNotFoundException(ca.corefacility.bioinformatics.irida.exceptions.IridaWorkflowNotFoundException) Analysis(ca.corefacility.bioinformatics.irida.model.workflow.analysis.Analysis) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 7 with Analysis

use of ca.corefacility.bioinformatics.irida.model.workflow.analysis.Analysis in project irida by phac-nml.

the class AnalysisController method getNewickForAnalysis.

/**
 * Get a newick file associated with a specific {@link AnalysisSubmission}.
 *
 * @param submissionId {@link Long} id for an {@link AnalysisSubmission}
 * @return {@link Map} containing the newick file contents.
 * @throws IOException {@link IOException} if the newick file is not found
 */
@RequestMapping("/ajax/{submissionId}/newick")
@ResponseBody
public Map<String, Object> getNewickForAnalysis(@PathVariable Long submissionId) throws IOException {
    final String treeFileKey = "tree";
    AnalysisSubmission submission = analysisSubmissionService.read(submissionId);
    Analysis analysis = submission.getAnalysis();
    AnalysisOutputFile file = analysis.getAnalysisOutputFile(treeFileKey);
    List<String> lines = Files.readAllLines(file.getFile());
    return ImmutableMap.of("newick", lines.get(0));
}
Also used : Analysis(ca.corefacility.bioinformatics.irida.model.workflow.analysis.Analysis) AnalysisSubmission(ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission) AnalysisOutputFile(ca.corefacility.bioinformatics.irida.model.workflow.analysis.AnalysisOutputFile)

Example 8 with Analysis

use of ca.corefacility.bioinformatics.irida.model.workflow.analysis.Analysis in project irida by phac-nml.

the class SNVPhylAnalysisIT method testSNVPhylSuccessHigherSNVReadProportion.

/**
 * Tests out successfully executing the SNVPhyl pipeline and passing a higher value for fraction of reads to call a SNP.
 *
 * @throws Exception
 */
@Test
@WithMockUser(username = "aaron", roles = "ADMIN")
public void testSNVPhylSuccessHigherSNVReadProportion() throws Exception {
    SequenceFilePair sequenceFilePairA = databaseSetupGalaxyITService.setupSampleSequenceFileInDatabase(1L, sequenceFilePathsA1List, sequenceFilePathsA2List).get(0);
    SequenceFilePair sequenceFilePairB = databaseSetupGalaxyITService.setupSampleSequenceFileInDatabase(2L, sequenceFilePathsB1List, sequenceFilePathsB2List).get(0);
    SequenceFilePair sequenceFilePairC = databaseSetupGalaxyITService.setupSampleSequenceFileInDatabase(3L, sequenceFilePathsC1List, sequenceFilePathsC2List).get(0);
    waitForFilesToSettle(sequenceFilePairA, sequenceFilePairB, sequenceFilePairC);
    Map<String, String> parameters = ImmutableMap.<String, String>builder().put("snv-abundance-ratio", "0.90").put("minimum-read-coverage", "2").put("minimum-percent-coverage", "75").put("minimum-mean-mapping-quality", "20").put("filter-density-threshold", "3").put("filter-density-window-size", "30").build();
    AnalysisSubmission submission = databaseSetupGalaxyITService.setupPairSubmissionInDatabase(Sets.newHashSet(sequenceFilePairA, sequenceFilePairB, sequenceFilePairC), referenceFilePath, parameters, snvPhylWorkflow.getWorkflowIdentifier());
    completeSubmittedAnalyses(submission.getId());
    submission = analysisSubmissionRepository.findOne(submission.getId());
    assertEquals("analysis state should be completed.", AnalysisState.COMPLETED, submission.getAnalysisState());
    Analysis analysisPhylogenomics = submission.getAnalysis();
    assertEquals("Should have generated a phylogenomics pipeline analysis type.", AnalysisType.PHYLOGENOMICS, analysisPhylogenomics.getAnalysisType());
    assertEquals("the phylogenomics pipeline should have 8 output files.", 8, analysisPhylogenomics.getAnalysisOutputFiles().size());
    @SuppressWarnings("resource") String matrixContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(MATRIX_KEY).getFile().toFile()).useDelimiter("\\Z").next();
    assertTrue("snpMatrix should be the same but is \"" + matrixContent + "\"", com.google.common.io.Files.equal(outputSnvMatrix2.toFile(), analysisPhylogenomics.getAnalysisOutputFile(MATRIX_KEY).getFile().toFile()));
    assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(MATRIX_KEY).getCreatedByTool());
    @SuppressWarnings("resource") String snpTableContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(TABLE_KEY).getFile().toFile()).useDelimiter("\\Z").next();
    assertTrue("snpTable should be the same but is \"" + snpTableContent + "\"", com.google.common.io.Files.equal(outputSnvTable2.toFile(), analysisPhylogenomics.getAnalysisOutputFile(TABLE_KEY).getFile().toFile()));
    assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(TABLE_KEY).getCreatedByTool());
    @SuppressWarnings("resource") String vcf2coreContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(CORE_KEY).getFile().toFile()).useDelimiter("\\Z").next();
    assertTrue("vcf2core should be the same but is \"" + vcf2coreContent + "\"", com.google.common.io.Files.equal(vcf2core2.toFile(), analysisPhylogenomics.getAnalysisOutputFile(CORE_KEY).getFile().toFile()));
    assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(CORE_KEY).getCreatedByTool());
    // only check size of mapping quality file due to samples output in random order
    assertTrue("the mapping quality file should not be empty.", Files.size(analysisPhylogenomics.getAnalysisOutputFile(QUALITY_KEY).getFile()) > 0);
    @SuppressWarnings("resource") String filterStatsContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(STATS_KEY).getFile().toFile()).useDelimiter("\\Z").next();
    assertTrue("filterStats should be the same but is \"" + filterStatsContent + "\"", com.google.common.io.Files.equal(filterStats2.toFile(), analysisPhylogenomics.getAnalysisOutputFile(STATS_KEY).getFile().toFile()));
    assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(STATS_KEY).getCreatedByTool());
    @SuppressWarnings("resource") String snvAlignContent = new Scanner(analysisPhylogenomics.getAnalysisOutputFile(ALIGN_KEY).getFile().toFile()).useDelimiter("\\Z").next();
    assertTrue("snvAlign should be the same but is \"" + snvAlignContent + "\"", com.google.common.io.Files.equal(snvAlign2.toFile(), analysisPhylogenomics.getAnalysisOutputFile(ALIGN_KEY).getFile().toFile()));
    assertNotNull("file should have tool provenance attached.", analysisPhylogenomics.getAnalysisOutputFile(ALIGN_KEY).getCreatedByTool());
    // only test to make sure the files have a valid size since PhyML uses a
    // random seed to generate the tree (and so changes results)
    assertTrue("the phylogenetic tree file should not be empty.", Files.size(analysisPhylogenomics.getAnalysisOutputFile(TREE_KEY).getFile()) > 0);
    assertTrue("the phylogenetic tree stats file should not be empty.", Files.size(analysisPhylogenomics.getAnalysisOutputFile(TREE_KEY).getFile()) > 0);
    // try to follow the phylogenomics provenance all the way back to the
    // upload tools
    List<ToolExecution> toolsToVisit = Lists.newArrayList(analysisPhylogenomics.getAnalysisOutputFile(TREE_KEY).getCreatedByTool());
    assertFalse("file should have tool provenance attached.", toolsToVisit.isEmpty());
    String minVcf2AlignCov = null;
    String altAlleleFraction = null;
    String minimumPercentCoverage = null;
    String minimumDepthVerify = null;
    String filterDensityThreshold = null;
    String filterDensityWindowSize = null;
    // one where you upload the reads.
    while (!toolsToVisit.isEmpty()) {
        final ToolExecution ex = toolsToVisit.remove(0);
        toolsToVisit.addAll(ex.getPreviousSteps());
        if (ex.getToolName().contains("Consolidate VCFs")) {
            final Map<String, String> params = ex.getExecutionTimeParameters();
            minVcf2AlignCov = params.get("coverage");
            altAlleleFraction = params.get("snv_abundance_ratio");
            filterDensityThreshold = params.get("use_density_filter.threshold");
            filterDensityWindowSize = params.get("use_density_filter.window_size");
            break;
        }
    }
    // try to follow the mapping quality provenance all the way back to the
    // upload tools
    toolsToVisit = Lists.newArrayList(analysisPhylogenomics.getAnalysisOutputFile(QUALITY_KEY).getCreatedByTool());
    assertFalse("file should have tool provenance attached.", toolsToVisit.isEmpty());
    while (!toolsToVisit.isEmpty()) {
        final ToolExecution ex = toolsToVisit.remove(0);
        toolsToVisit.addAll(ex.getPreviousSteps());
        if (ex.getToolName().contains("Verify Mapping Quality")) {
            final Map<String, String> params = ex.getExecutionTimeParameters();
            minimumPercentCoverage = params.get("minmap");
            minimumDepthVerify = params.get("mindepth");
        }
    }
    assertEquals("incorrect minimum vcf 2 align coverage", "\"2\"", minVcf2AlignCov);
    assertEquals("incorrect alternative allele fraction", "\"0.90\"", altAlleleFraction);
    assertEquals("incorrect minimum depth for verify map", "\"2\"", minimumDepthVerify);
    assertEquals("incorrect min percent coverage for verify map", "\"75\"", minimumPercentCoverage);
    assertEquals("incorrect filter density threshold", "3", filterDensityThreshold);
    assertEquals("incorrect filter density window size", "30", filterDensityWindowSize);
}
Also used : SequenceFilePair(ca.corefacility.bioinformatics.irida.model.sequenceFile.SequenceFilePair) Scanner(java.util.Scanner) ToolExecution(ca.corefacility.bioinformatics.irida.model.workflow.analysis.ToolExecution) Analysis(ca.corefacility.bioinformatics.irida.model.workflow.analysis.Analysis) AnalysisSubmission(ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission) WithMockUser(org.springframework.security.test.context.support.WithMockUser) Test(org.junit.Test)

Example 9 with Analysis

use of ca.corefacility.bioinformatics.irida.model.workflow.analysis.Analysis in project irida by phac-nml.

the class SISTRSampleUpdaterTest method testUpdaterPassed.

@SuppressWarnings("unchecked")
@Test
public void testUpdaterPassed() throws PostProcessingException, AnalysisAlreadySetException {
    ImmutableMap<String, String> expectedResults = ImmutableMap.of("SISTR serovar", "Enteritidis", "SISTR cgMLST Subspecies", "enterica", "SISTR QC Status", "PASS");
    Path outputPath = Paths.get("src/test/resources/files/sistr-predictions-pass.json");
    AnalysisOutputFile outputFile = new AnalysisOutputFile(outputPath, null, null, null);
    Analysis analysis = new Analysis(null, ImmutableMap.of("sistr-predictions", outputFile), null, null);
    AnalysisSubmission submission = AnalysisSubmission.builder(UUID.randomUUID()).inputFiles(ImmutableSet.of(new SingleEndSequenceFile(null))).build();
    submission.setAnalysis(analysis);
    Sample sample = new Sample();
    sample.setId(1L);
    ImmutableMap<MetadataTemplateField, MetadataEntry> metadataMap = ImmutableMap.of(new MetadataTemplateField("SISTR Field", "text"), new MetadataEntry("Value1", "text"));
    when(metadataTemplateService.getMetadataMap(any(Map.class))).thenReturn(metadataMap);
    updater.update(Lists.newArrayList(sample), submission);
    ArgumentCaptor<Map> mapCaptor = ArgumentCaptor.forClass(Map.class);
    // this is the important bit.  Ensures the correct values got pulled from the file
    verify(metadataTemplateService).getMetadataMap(mapCaptor.capture());
    Map<String, MetadataEntry> metadata = mapCaptor.getValue();
    int found = 0;
    for (Map.Entry<String, MetadataEntry> e : metadata.entrySet()) {
        if (expectedResults.containsKey(e.getKey())) {
            String expected = expectedResults.get(e.getKey());
            MetadataEntry value = e.getValue();
            assertEquals("metadata values should match", expected, value.getValue());
            found++;
        }
    }
    assertEquals("should have found the same number of results", expectedResults.keySet().size(), found);
    // this bit just ensures the merged data got saved
    verify(sampleService).updateFields(eq(sample.getId()), mapCaptor.capture());
    Map<MetadataTemplateField, MetadataEntry> value = (Map<MetadataTemplateField, MetadataEntry>) mapCaptor.getValue().get("metadata");
    assertEquals(metadataMap.keySet().iterator().next(), value.keySet().iterator().next());
}
Also used : Path(java.nio.file.Path) Sample(ca.corefacility.bioinformatics.irida.model.sample.Sample) AnalysisSubmission(ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission) SingleEndSequenceFile(ca.corefacility.bioinformatics.irida.model.sequenceFile.SingleEndSequenceFile) Analysis(ca.corefacility.bioinformatics.irida.model.workflow.analysis.Analysis) MetadataEntry(ca.corefacility.bioinformatics.irida.model.sample.metadata.MetadataEntry) MetadataTemplateField(ca.corefacility.bioinformatics.irida.model.sample.MetadataTemplateField) AnalysisOutputFile(ca.corefacility.bioinformatics.irida.model.workflow.analysis.AnalysisOutputFile) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Test(org.junit.Test)

Example 10 with Analysis

use of ca.corefacility.bioinformatics.irida.model.workflow.analysis.Analysis in project irida by phac-nml.

the class SISTRSampleUpdaterTest method testUpdaterBadFile.

@Test(expected = PostProcessingException.class)
public void testUpdaterBadFile() throws PostProcessingException, AnalysisAlreadySetException {
    ImmutableMap<String, String> expectedResults = ImmutableMap.of("SISTR serovar", "Enteritidis", "SISTR cgMLST Subspecies", "enterica", "SISTR QC Status", "PASS");
    Path outputPath = Paths.get("src/test/resources/files/snp_tree.tree");
    AnalysisOutputFile outputFile = new AnalysisOutputFile(outputPath, null, null, null);
    Analysis analysis = new Analysis(null, ImmutableMap.of("sistr-predictions", outputFile), null, null);
    AnalysisSubmission submission = AnalysisSubmission.builder(UUID.randomUUID()).inputFiles(ImmutableSet.of(new SingleEndSequenceFile(null))).build();
    submission.setAnalysis(analysis);
    Sample sample = new Sample();
    sample.setId(1L);
    updater.update(Lists.newArrayList(sample), submission);
}
Also used : Path(java.nio.file.Path) Analysis(ca.corefacility.bioinformatics.irida.model.workflow.analysis.Analysis) Sample(ca.corefacility.bioinformatics.irida.model.sample.Sample) AnalysisSubmission(ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission) AnalysisOutputFile(ca.corefacility.bioinformatics.irida.model.workflow.analysis.AnalysisOutputFile) SingleEndSequenceFile(ca.corefacility.bioinformatics.irida.model.sequenceFile.SingleEndSequenceFile) Test(org.junit.Test)

Aggregations

Analysis (ca.corefacility.bioinformatics.irida.model.workflow.analysis.Analysis)41 AnalysisSubmission (ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission)30 Test (org.junit.Test)28 AnalysisOutputFile (ca.corefacility.bioinformatics.irida.model.workflow.analysis.AnalysisOutputFile)21 WithMockUser (org.springframework.security.test.context.support.WithMockUser)19 Path (java.nio.file.Path)17 SequenceFilePair (ca.corefacility.bioinformatics.irida.model.sequenceFile.SequenceFilePair)14 IridaWorkflow (ca.corefacility.bioinformatics.irida.model.workflow.IridaWorkflow)12 ToolExecution (ca.corefacility.bioinformatics.irida.model.workflow.analysis.ToolExecution)12 SingleEndSequenceFile (ca.corefacility.bioinformatics.irida.model.sequenceFile.SingleEndSequenceFile)11 Sample (ca.corefacility.bioinformatics.irida.model.sample.Sample)9 HistoriesClient (com.github.jmchilton.blend4j.galaxy.HistoriesClient)7 ToolsClient (com.github.jmchilton.blend4j.galaxy.ToolsClient)7 WorkflowsClient (com.github.jmchilton.blend4j.galaxy.WorkflowsClient)7 History (com.github.jmchilton.blend4j.galaxy.beans.History)7 Workflow (com.github.jmchilton.blend4j.galaxy.beans.Workflow)7 EntityNotFoundException (ca.corefacility.bioinformatics.irida.exceptions.EntityNotFoundException)6 AnalysisType (ca.corefacility.bioinformatics.irida.model.enums.AnalysisType)5 Scanner (java.util.Scanner)5 ExecutionManagerException (ca.corefacility.bioinformatics.irida.exceptions.ExecutionManagerException)4