use of ca.corefacility.bioinformatics.irida.model.sequenceFile.SingleEndSequenceFile in project irida by phac-nml.
the class PipelineController method ajaxStartPipeline.
// ************************************************************************************************
// AJAX
// ************************************************************************************************
/**
* Launch a pipeline
*
* @param locale the locale that the browser is using for the current request.
* @param parameters DTO of pipeline start parameters
* @return a JSON response with the status and any messages.
*/
@RequestMapping(value = "/ajax/start", method = RequestMethod.POST)
@ResponseBody
public Map<String, Object> ajaxStartPipeline(Locale locale, @RequestBody final PipelineStartParameters parameters) {
try {
IridaWorkflow flow = workflowsService.getIridaWorkflow(parameters.getWorkflowId());
IridaWorkflowDescription description = flow.getWorkflowDescription();
// The pipeline needs to have a name.
String name = parameters.getName();
if (Strings.isNullOrEmpty(name)) {
return ImmutableMap.of("error", messageSource.getMessage("workflow.no-name-provided", null, locale));
}
// Check to see if a reference file is required.
Long ref = parameters.getRef();
if (description.requiresReference() && ref == null) {
return ImmutableMap.of("error", messageSource.getMessage("pipeline.error.no-reference.pipeline-start", null, locale));
}
// Get a list of the files to submit
List<SingleEndSequenceFile> singleEndFiles = new ArrayList<>();
List<SequenceFilePair> sequenceFilePairs = new ArrayList<>();
List<Long> single = parameters.getSingle();
if (single != null) {
Iterable<SequencingObject> readMultiple = sequencingObjectService.readMultiple(single);
readMultiple.forEach(f -> {
if (!(f instanceof SingleEndSequenceFile)) {
throw new IllegalArgumentException("file " + f.getId() + " not a SingleEndSequenceFile");
}
singleEndFiles.add((SingleEndSequenceFile) f);
});
// Check the single files for duplicates in a sample, throws SampleAnalysisDuplicateException
sequencingObjectService.getUniqueSamplesForSequencingObjects(Sets.newHashSet(singleEndFiles));
}
List<Long> paired = parameters.getPaired();
if (paired != null) {
Iterable<SequencingObject> readMultiple = sequencingObjectService.readMultiple(paired);
readMultiple.forEach(f -> {
if (!(f instanceof SequenceFilePair)) {
throw new IllegalArgumentException("file " + f.getId() + " not a SequenceFilePair");
}
sequenceFilePairs.add((SequenceFilePair) f);
});
// Check the pair files for duplicates in a sample, throws SampleAnalysisDuplicateException
sequencingObjectService.getUniqueSamplesForSequencingObjects(Sets.newHashSet(sequenceFilePairs));
}
// Get the pipeline parameters
Map<String, String> params = new HashMap<>();
IridaWorkflowNamedParameters namedParameters = null;
Map<String, Object> selectedParameters = parameters.getSelectedParameters();
if (selectedParameters != null) {
try {
final String selectedParametersId = selectedParameters.get("id").toString();
if (!DEFAULT_WORKFLOW_PARAMETERS_ID.equals(selectedParametersId) && !CUSTOM_UNSAVED_WORKFLOW_PARAMETERS_ID.equals(selectedParametersId)) {
// this means that a named parameter set was selected
// and unmodified, so load up that named parameter set
// to pass along.
namedParameters = namedParameterService.read(Long.valueOf(selectedParametersId));
} else {
@SuppressWarnings("unchecked") final List<Map<String, String>> unnamedParameters = (List<Map<String, String>>) selectedParameters.get("parameters");
for (final Map<String, String> parameter : unnamedParameters) {
params.put(parameter.get("name"), parameter.get("value"));
}
}
} catch (Exception e) {
return ImmutableMap.of("parameterError", messageSource.getMessage("pipeline.parameters.error", null, locale));
}
}
List<Project> projectsToShare = new ArrayList<>();
List<Long> sharedProjects = parameters.getSharedProjects();
if (sharedProjects != null && !sharedProjects.isEmpty()) {
projectsToShare = Lists.newArrayList(projectService.readMultiple(sharedProjects));
}
String analysisDescription = parameters.getDescription();
Boolean writeResultsToSamples = parameters.getWriteResultsToSamples();
if (description.getInputs().requiresSingleSample()) {
analysisSubmissionService.createSingleSampleSubmission(flow, ref, singleEndFiles, sequenceFilePairs, params, namedParameters, name, analysisDescription, projectsToShare, writeResultsToSamples);
} else {
analysisSubmissionService.createMultipleSampleSubmission(flow, ref, singleEndFiles, sequenceFilePairs, params, namedParameters, name, analysisDescription, projectsToShare, writeResultsToSamples);
}
} catch (IridaWorkflowNotFoundException e) {
logger.error("Cannot find IridaWorkflow [" + parameters.getWorkflowId() + "]", e);
return ImmutableMap.of("pipelineError", messageSource.getMessage("pipeline.error.invalid-pipeline", null, locale));
} catch (DuplicateSampleException e) {
logger.error("Multiple files for Sample found", e);
return ImmutableMap.of("pipelineError", messageSource.getMessage("pipeline.error.duplicate-samples", null, locale));
}
return ImmutableMap.of("success", true);
}
use of ca.corefacility.bioinformatics.irida.model.sequenceFile.SingleEndSequenceFile in project irida by phac-nml.
the class GzipFileProcessorTest method testExceptionBehaviours.
@Test(expected = FileProcessorException.class)
public void testExceptionBehaviours() throws IOException {
final SequenceFile sf = constructSequenceFile();
// compress the file, update the sequence file reference
Path uncompressed = sf.getFile();
Path compressed = Files.createTempFile(null, ".gz");
GZIPOutputStream out = new GZIPOutputStream(Files.newOutputStream(compressed));
Files.copy(uncompressed, out);
out.close();
sf.setFile(compressed);
SingleEndSequenceFile so = new SingleEndSequenceFile(sf);
when(sequenceFileRepository.save(any(SequenceFile.class))).thenThrow(new RuntimeException());
fileProcessor.process(so);
}
use of ca.corefacility.bioinformatics.irida.model.sequenceFile.SingleEndSequenceFile in project irida by phac-nml.
the class GzipFileProcessorTest method handleCompressedFileWithGzExtension.
@Test
public void handleCompressedFileWithGzExtension() throws IOException {
// the file processor should decompress the file, then update the
// sequence file in the database.
SequenceFile sf = constructSequenceFile();
SequenceFile sfUpdated = new SequenceFile();
sfUpdated.setFile(sf.getFile());
final Long id = 1L;
sf.setId(id);
// compress the file, update the sequence file reference
Path uncompressed = sf.getFile();
Path compressed = Files.createTempFile(null, ".gz");
GZIPOutputStream out = new GZIPOutputStream(Files.newOutputStream(compressed));
Files.copy(uncompressed, out);
out.close();
when(sequenceFileRepository.save(sf)).thenReturn(sfUpdated);
SingleEndSequenceFile so = new SingleEndSequenceFile(sf);
sf.setFile(compressed);
fileProcessor.process(so);
ArgumentCaptor<SequenceFile> argument = ArgumentCaptor.forClass(SequenceFile.class);
verify(sequenceFileRepository).save(argument.capture());
SequenceFile modified = argument.getValue();
verify(sequenceFileRepository).save(sf);
String uncompressedFileContents = new String(Files.readAllBytes(modified.getFile()));
assertEquals("uncompressed file and file in database should be the same.", FILE_CONTENTS, uncompressedFileContents);
Files.delete(uncompressed);
assertTrue("Original compressed file should not have been deleted.", Files.exists(compressed));
Files.delete(compressed);
}
use of ca.corefacility.bioinformatics.irida.model.sequenceFile.SingleEndSequenceFile in project irida by phac-nml.
the class AnalysisSubmissionRepositoryIT method setup.
/**
* Sets up objects for test.
*
* @throws IOException
*/
@Before
public void setup() throws IOException {
singleEndFile = (SingleEndSequenceFile) objectRepository.findOne(2L);
sequenceFile = singleEndFile.getFileWithId(1L);
assertNotNull(sequenceFile);
Set<SequencingObject> singleFiles = Sets.newHashSet(singleEndFile);
SingleEndSequenceFile singleEndFile2 = (SingleEndSequenceFile) objectRepository.findOne(3L);
SequenceFile sequenceFile2 = singleEndFile2.getFileWithId(2L);
assertNotNull(sequenceFile2);
Set<SequencingObject> singleFiles2 = Sets.newHashSet(singleEndFile2);
sequenceFilePair = (SequenceFilePair) objectRepository.findOne(1L);
assertNotNull(sequenceFilePair);
referenceFile = referenceFileRepository.findOne(1L);
assertNotNull(referenceFile);
referenceFile2 = referenceFileRepository.findOne(2L);
submitter1 = userRepository.findOne(1L);
submitter2 = userRepository.findOne(2L);
analysisSubmission = AnalysisSubmission.builder(workflowId).name(analysisName).inputFiles(singleFiles).referenceFile(referenceFile).build();
analysisSubmission.setRemoteAnalysisId(analysisId);
analysisSubmission.setAnalysisState(AnalysisState.SUBMITTING);
analysisSubmission.setSubmitter(submitter1);
analysisSubmission.setAnalysisCleanedState(AnalysisCleanedState.NOT_CLEANED);
analysisSubmission2 = AnalysisSubmission.builder(workflowId).name(analysisName2).inputFiles(singleFiles2).referenceFile(referenceFile).build();
analysisSubmission2.setRemoteAnalysisId(analysisId2);
analysisSubmission2.setAnalysisState(AnalysisState.SUBMITTING);
analysisSubmission2.setSubmitter(submitter2);
analysisSubmission2.setAnalysisCleanedState(AnalysisCleanedState.NOT_CLEANED);
analysisSubmission2b = AnalysisSubmission.builder(workflowId).name(analysisName2).inputFiles(singleFiles2).referenceFile(referenceFile2).build();
analysisSubmission2b.setRemoteAnalysisId(analysisId2);
analysisSubmission2b.setAnalysisState(AnalysisState.SUBMITTING);
analysisSubmission2b.setSubmitter(submitter2);
analysisSubmission2b.setAnalysisCleanedState(AnalysisCleanedState.NOT_CLEANED);
}
use of ca.corefacility.bioinformatics.irida.model.sequenceFile.SingleEndSequenceFile in project irida by phac-nml.
the class SequencingObjectConcatenatorFactoryTest method testGetConcatenatorSingleCollection.
@Test
public void testGetConcatenatorSingleCollection() {
Set<SingleEndSequenceFile> fileSet = Sets.newHashSet(new SingleEndSequenceFile(null));
SequencingObjectConcatenator<?> concatenator = SequencingObjectConcatenatorFactory.getConcatenator(fileSet);
assertTrue(concatenator instanceof SingleEndSequenceFileConcatenator);
}
Aggregations