use of ca.corefacility.bioinformatics.irida.model.workflow.analysis.AnalysisOutputFile in project irida by phac-nml.
the class FileUtilities method createAnalysisOutputFileZippedResponse.
/**
* Utility method for download a zip file containing all output files from
* an analysis.
*
* @param response
* {@link HttpServletResponse}
* @param fileName
* Name fo the file to create
* @param files
* Set of {@link AnalysisOutputFile}
*/
public static void createAnalysisOutputFileZippedResponse(HttpServletResponse response, String fileName, Set<AnalysisOutputFile> files) {
/*
* Replacing spaces and commas as they cause issues with
* Content-disposition response header.
*/
fileName = fileName.replaceAll(" ", "_");
fileName = fileName.replaceAll(",", "");
logger.debug("Creating zipped file response. [" + fileName + "]");
// set the response headers before we do *ANYTHING* so that the filename
// actually appears in the download dialog
response.setHeader(CONTENT_DISPOSITION, ATTACHMENT_FILENAME + fileName + EXTENSION_ZIP);
// for zip file
response.setContentType(CONTENT_TYPE_APPLICATION_ZIP);
try (ServletOutputStream responseStream = response.getOutputStream();
ZipOutputStream outputStream = new ZipOutputStream(responseStream)) {
for (AnalysisOutputFile file : files) {
if (!Files.exists(file.getFile())) {
response.setStatus(404);
throw new FileNotFoundException();
}
// 1) Build a folder/file name
fileName = formatName(fileName);
StringBuilder zipEntryName = new StringBuilder(fileName);
zipEntryName.append("/").append(file.getLabel());
// 2) Tell the zip stream that we are starting a new entry in
// the archive.
outputStream.putNextEntry(new ZipEntry(zipEntryName.toString()));
// 3) COPY all of thy bytes from the file to the output stream.
Files.copy(file.getFile(), outputStream);
// 4) Close the current entry in the archive in preparation for
// the next entry.
outputStream.closeEntry();
ObjectMapper objectMapper = new ObjectMapper();
byte[] bytes = objectMapper.writeValueAsBytes(file);
outputStream.putNextEntry(new ZipEntry(zipEntryName.toString() + "-prov.json"));
outputStream.write(bytes);
outputStream.closeEntry();
}
// Tell the output stream that you are finished downloading.
outputStream.finish();
outputStream.close();
} catch (IOException e) {
// this generally means that the user has cancelled the download
// from their web browser; we can safely ignore this
logger.debug("This *probably* means that the user cancelled the download, " + "but it might be something else, see the stack trace below for more information.", e);
} catch (Exception e) {
logger.error("Download failed...", e);
}
}
use of ca.corefacility.bioinformatics.irida.model.workflow.analysis.AnalysisOutputFile in project irida by phac-nml.
the class SISTRSampleUpdater method update.
/**
* Add SISTR results to the metadata of the given {@link Sample}s
*
* @param samples The samples to update.
* @param analysis the {@link AnalysisSubmission} to apply to the samples
* @throws PostProcessingException if the method cannot read the "sistr-predictions" output file
*/
@Override
public void update(Collection<Sample> samples, AnalysisSubmission analysis) throws PostProcessingException {
AnalysisOutputFile sistrFile = analysis.getAnalysis().getAnalysisOutputFile(SISTR_FILE);
Path filePath = sistrFile.getFile();
Map<String, MetadataEntry> stringEntries = new HashMap<>();
try {
// Read the JSON file from SISTR output
@SuppressWarnings("resource") String jsonFile = new Scanner(new BufferedReader(new FileReader(filePath.toFile()))).useDelimiter("\\Z").next();
// map the results into a Map
ObjectMapper mapper = new ObjectMapper();
List<Map<String, Object>> sistrResults = mapper.readValue(jsonFile, new TypeReference<List<Map<String, Object>>>() {
});
if (sistrResults.size() > 0) {
Map<String, Object> result = sistrResults.get(0);
// loop through each of the requested fields and save the entries
SISTR_FIELDS.entrySet().forEach(e -> {
if (result.containsKey(e.getKey()) && result.get(e.getKey()) != null) {
String value = result.get(e.getKey()).toString();
PipelineProvidedMetadataEntry metadataEntry = new PipelineProvidedMetadataEntry(value, "text", analysis);
stringEntries.put(e.getValue(), metadataEntry);
}
});
// convert string map into metadata fields
Map<MetadataTemplateField, MetadataEntry> metadataMap = metadataTemplateService.getMetadataMap(stringEntries);
// save metadata back to sample
samples.forEach(s -> {
s.mergeMetadata(metadataMap);
sampleService.updateFields(s.getId(), ImmutableMap.of("metadata", s.getMetadata()));
});
} else {
throw new PostProcessingException("SISTR results for file are not correctly formatted");
}
} catch (IOException e) {
throw new PostProcessingException("Error parsing JSON from SISTR results", e);
}
}
use of ca.corefacility.bioinformatics.irida.model.workflow.analysis.AnalysisOutputFile in project irida by phac-nml.
the class CSVView method renderMergedOutputModel.
/**
* {@inheritDoc}
*/
@Override
protected void renderMergedOutputModel(Map<String, Object> model, HttpServletRequest request, HttpServletResponse response) throws Exception {
AnalysisOutputFile sfr = (AnalysisOutputFile) model.get(RESTGenericController.RESOURCE_NAME);
Path fileContent = sfr.getFile();
String filename = fileContent.getFileName().toString();
logger.trace("Sending file to client [" + filename + "]");
response.setHeader(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + filename + "\"");
response.setHeader(HttpHeaders.CONTENT_TYPE, DEFAULT_CONTENT_TYPE);
response.setHeader(HttpHeaders.CONTENT_LENGTH, String.valueOf(Files.size(fileContent)));
OutputStream os = response.getOutputStream();
Files.copy(fileContent, os);
os.flush();
os.close();
}
use of ca.corefacility.bioinformatics.irida.model.workflow.analysis.AnalysisOutputFile in project irida by phac-nml.
the class NewickFileView method renderMergedOutputModel.
/**
* {@inheritDoc}
*/
@Override
protected void renderMergedOutputModel(Map<String, Object> model, HttpServletRequest request, HttpServletResponse response) throws Exception {
AnalysisOutputFile sfr = (AnalysisOutputFile) model.get(RESTGenericController.RESOURCE_NAME);
Path fileContent = sfr.getFile();
String filename = fileContent.getFileName().toString();
logger.trace("Sending file to client [" + filename + "]");
response.setHeader(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + filename + "\"");
response.setHeader(HttpHeaders.CONTENT_TYPE, DEFAULT_CONTENT_TYPE);
response.setHeader(HttpHeaders.CONTENT_LENGTH, String.valueOf(Files.size(fileContent)));
OutputStream os = response.getOutputStream();
Files.copy(fileContent, os);
os.flush();
os.close();
}
use of ca.corefacility.bioinformatics.irida.model.workflow.analysis.AnalysisOutputFile in project irida by phac-nml.
the class RESTAnalysisSubmissionController method getAnalysisOutputFileContents.
/**
* Get the actual file contents for an analysis output file.
*
* @param submissionId
* The {@link AnalysisSubmission} id
* @param fileType
* The {@link AnalysisOutputFile} type as defined in the
* {@link Analysis} subclass
* @return a {@link FileSystemResource} containing the contents of the
* {@link AnalysisOutputFile}.
*/
@RequestMapping(value = "/{submissionId}/analysis/file/{fileType}", produces = MediaType.TEXT_PLAIN_VALUE)
@ResponseBody
public FileSystemResource getAnalysisOutputFileContents(@PathVariable Long submissionId, @PathVariable String fileType) {
AnalysisSubmission read = analysisSubmissionService.read(submissionId);
if (read.getAnalysisState() != AnalysisState.COMPLETED) {
throw new EntityNotFoundException("Analysis is not completed");
}
AnalysisOutputFile analysisOutputFile = read.getAnalysis().getAnalysisOutputFile(fileType);
return new FileSystemResource(analysisOutputFile.getFile().toFile());
}
Aggregations