use of ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission in project irida by phac-nml.
the class AnalysisSubmissionServiceImpl method getPercentCompleteForAnalysisSubmission.
/**
* {@inheritDoc}
*/
@Override
@PreAuthorize("hasRole('ROLE_ADMIN') or hasPermission(#id, 'canReadAnalysisSubmission')")
public float getPercentCompleteForAnalysisSubmission(Long id) throws EntityNotFoundException, ExecutionManagerException, NoPercentageCompleteException {
AnalysisSubmission analysisSubmission = read(id);
AnalysisState analysisState = analysisSubmission.getAnalysisState();
switch(analysisState) {
case NEW:
case PREPARING:
case PREPARED:
case SUBMITTING:
return STATE_PERCENTAGE.get(analysisState);
/**
* If the analysis is in a state of {@link AnalysisState.RUNNING}
* then we are able to ask Galaxy for the proportion of jobs that
* are complete. We can scale this value between RUNNING_PERCENT
* (10%) and FINISHED_RUNNING_PERCENT (90%) so that after all jobs
* are complete we are only at 90%. The remaining 10% involves
* transferring files back to Galaxy.
*
* For example, if there are 10 out of 20 jobs finished on Galaxy,
* then the proportion of jobs complete is 10/20 = 0.5. So, the
* percent complete for the overall analysis is: percentComplete =
* 10 + (90 - 10) * 0.5 = 50%.
*
* If there are 20 out of 20 jobs finished in Galaxy, then the
* percent complete is: percentComplete = 10 + (90 - 10) * 1.0 =
* 90%.
*/
case RUNNING:
String workflowHistoryId = analysisSubmission.getRemoteAnalysisId();
GalaxyWorkflowStatus workflowStatus = galaxyHistoriesService.getStatusForHistory(workflowHistoryId);
return RUNNING_PERCENT + (FINISHED_RUNNING_PERCENT - RUNNING_PERCENT) * workflowStatus.getProportionComplete();
case FINISHED_RUNNING:
case COMPLETING:
case TRANSFERRED:
case POST_PROCESSING:
case COMPLETED:
return STATE_PERCENTAGE.get(analysisState);
default:
throw new NoPercentageCompleteException("No valid percent complete for state " + analysisState);
}
}
use of ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission in project irida by phac-nml.
the class AnalysisSubmissionServiceImpl method createSingleSampleSubmission.
/**
* {@inheritDoc}
*/
@Override
@Transactional
@PreAuthorize("hasRole('ROLE_USER')")
public Collection<AnalysisSubmission> createSingleSampleSubmission(IridaWorkflow workflow, Long ref, List<SingleEndSequenceFile> sequenceFiles, List<SequenceFilePair> sequenceFilePairs, Map<String, String> params, IridaWorkflowNamedParameters namedParameters, String name, String analysisDescription, List<Project> projectsToShare, boolean writeResultsToSamples) {
final Collection<AnalysisSubmission> createdSubmissions = new HashSet<AnalysisSubmission>();
// Single end reads
IridaWorkflowDescription description = workflow.getWorkflowDescription();
if (description.acceptsSingleSequenceFiles()) {
final Map<Sample, SingleEndSequenceFile> samplesMap = sequencingObjectService.getUniqueSamplesForSequencingObjects(Sets.newHashSet(sequenceFiles));
for (final Map.Entry<Sample, SingleEndSequenceFile> entry : samplesMap.entrySet()) {
Sample s = entry.getKey();
SingleEndSequenceFile file = entry.getValue();
// Build the analysis submission
AnalysisSubmission.Builder builder = AnalysisSubmission.builder(workflow.getWorkflowIdentifier());
builder.name(name + "_" + s.getSampleName());
builder.inputFiles(ImmutableSet.of(file));
builder.updateSamples(writeResultsToSamples);
builder.priority(AnalysisSubmission.Priority.MEDIUM);
// Add reference file
if (ref != null && description.requiresReference()) {
// Note: This cannot be empty if through the UI if the
// pipeline required a reference file.
ReferenceFile referenceFile = referenceFileRepository.findOne(ref);
builder.referenceFile(referenceFile);
}
if (description.acceptsParameters()) {
if (namedParameters != null) {
builder.withNamedParameters(namedParameters);
} else {
if (!params.isEmpty()) {
// Note: This cannot be empty if through the UI if
// the pipeline required params.
builder.inputParameters(params);
}
}
}
// Create the submission
createdSubmissions.add(create(builder.build()));
}
}
// Paired end reads
if (description.acceptsPairedSequenceFiles()) {
final Map<Sample, SequenceFilePair> samplesMap = sequencingObjectService.getUniqueSamplesForSequencingObjects(Sets.newHashSet(sequenceFilePairs));
for (final Map.Entry<Sample, SequenceFilePair> entry : samplesMap.entrySet()) {
Sample s = entry.getKey();
SequenceFilePair filePair = entry.getValue();
// Build the analysis submission
AnalysisSubmission.Builder builder = AnalysisSubmission.builder(workflow.getWorkflowIdentifier());
builder.name(name + "_" + s.getSampleName());
builder.inputFiles(ImmutableSet.of(filePair));
builder.updateSamples(writeResultsToSamples);
// Add reference file
if (ref != null && description.requiresReference()) {
ReferenceFile referenceFile = referenceFileRepository.findOne(ref);
builder.referenceFile(referenceFile);
}
if (description.acceptsParameters()) {
if (namedParameters != null) {
builder.withNamedParameters(namedParameters);
} else {
if (!params.isEmpty()) {
// Note: This cannot be empty if through the UI if
// the pipeline required params.
builder.inputParameters(params);
}
}
}
// Add description to submission, can be null
builder.analysisDescription(analysisDescription);
// Create the submission
createdSubmissions.add(create(builder.build()));
}
}
// Share with the required projects
for (AnalysisSubmission submission : createdSubmissions) {
for (Project project : projectsToShare) {
pasRepository.save(new ProjectAnalysisSubmissionJoin(project, submission));
}
}
return createdSubmissions;
}
use of ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission in project irida by phac-nml.
the class AnalysisSubmissionServiceImpl method delete.
/**
* {@inheritDoc}
*/
@Override
@PreAuthorize("hasRole('ROLE_ADMIN') or hasPermission(#id, 'canUpdateAnalysisSubmission')")
@Transactional
public void delete(Long id) throws EntityNotFoundException {
final AnalysisSubmission submission = read(id);
if (AnalysisCleanedState.NOT_CLEANED.equals(submission.getAnalysisCleanedState())) {
// We're "CLEANING" it right now!
submission.setAnalysisCleanedState(AnalysisCleanedState.CLEANING);
try {
analysisExecutionService.cleanupSubmission(submission).get();
} catch (final ExecutionManagerException e) {
logger.error("Failed to cleanup analysis submission before deletion," + " but proceeding with deletion anyway.", e);
} catch (final Throwable e) {
logger.error("An unexpected exception happened when cleaning the analysis submission," + " but proceeding with deletion anyway.", e);
}
} else {
logger.debug("Not cleaning submission [" + id + "] when deleting, it's already cleaned.");
}
super.delete(id);
}
use of ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission in project irida by phac-nml.
the class AnalysisSubmissionServiceImpl method createMultipleSampleSubmission.
/**
* {@inheritDoc}
*/
@Transactional
@PreAuthorize("hasRole('ROLE_USER')")
public AnalysisSubmission createMultipleSampleSubmission(IridaWorkflow workflow, Long ref, List<SingleEndSequenceFile> sequenceFiles, List<SequenceFilePair> sequenceFilePairs, Map<String, String> params, IridaWorkflowNamedParameters namedParameters, String name, String newAnalysisDescription, List<Project> projectsToShare, boolean writeResultsToSamples) {
AnalysisSubmission.Builder builder = AnalysisSubmission.builder(workflow.getWorkflowIdentifier());
builder.name(name);
builder.priority(AnalysisSubmission.Priority.MEDIUM);
builder.updateSamples(writeResultsToSamples);
IridaWorkflowDescription description = workflow.getWorkflowDescription();
// Add reference file
if (ref != null && description.requiresReference()) {
ReferenceFile referenceFile = referenceFileRepository.findOne(ref);
builder.referenceFile(referenceFile);
}
// Add any single end sequencing files.
if (description.acceptsSingleSequenceFiles()) {
if (!sequenceFiles.isEmpty()) {
builder.inputFiles(Sets.newHashSet(sequenceFiles));
}
}
// Add any paired end sequencing files.
if (description.acceptsPairedSequenceFiles()) {
if (!sequenceFilePairs.isEmpty()) {
builder.inputFiles(Sets.newHashSet(sequenceFilePairs));
}
}
if (description.acceptsParameters()) {
if (namedParameters != null) {
builder.withNamedParameters(namedParameters);
} else {
if (!params.isEmpty()) {
// Note: This cannot be empty if through the UI if
// the pipeline required params.
builder.inputParameters(params);
}
}
}
// Add description to submission, can be null
builder.analysisDescription(newAnalysisDescription);
// Create the submission
AnalysisSubmission submission = create(builder.build());
// Share with the required projects
for (Project project : projectsToShare) {
pasRepository.save(new ProjectAnalysisSubmissionJoin(project, submission));
}
return submission;
}
use of ca.corefacility.bioinformatics.irida.model.workflow.submission.AnalysisSubmission in project irida by phac-nml.
the class RESTAnalysisSubmissionController method getAnalysisInputUnpairedFiles.
/**
* get the {@link SequenceFile}s not in {@link SequenceFilePair}s used for
* the {@link AnalysisSubmission}
*
* @param identifier
* the {@link AnalysisSubmission} id
* @return list of {@link SequenceFile}s
*/
@RequestMapping("/{identifier}/sequenceFiles/unpaired")
public ModelMap getAnalysisInputUnpairedFiles(@PathVariable Long identifier) {
ModelMap map = new ModelMap();
AnalysisSubmission analysisSubmission = analysisSubmissionService.read(identifier);
Set<SingleEndSequenceFile> inputFilesSingleEnd = sequencingObjectService.getSequencingObjectsOfTypeForAnalysisSubmission(analysisSubmission, SingleEndSequenceFile.class);
ResourceCollection<SequencingObject> resources = new ResourceCollection<>(inputFilesSingleEnd.size());
for (SingleEndSequenceFile file : inputFilesSingleEnd) {
SampleSequencingObjectJoin join = sampleService.getSampleForSequencingObject(file);
if (join != null) {
SequencingObject sequencingObject = join.getObject();
RESTSampleSequenceFilesController.addSequencingObjectLinks(sequencingObject, join.getSubject().getId());
resources.add(sequencingObject);
}
}
resources.add(linkTo(methodOn(RESTAnalysisSubmissionController.class).getAnalysisInputUnpairedFiles(identifier)).withSelfRel());
map.addAttribute(RESTGenericController.RESOURCE_NAME, resources);
return map;
}
Aggregations