use of java.io.PipedOutputStream in project dhis2-core by dhis2.
the class DefaultAdxDataService method saveDataValueSetInternal.
private ImportSummary saveDataValueSetInternal(InputStream in, ImportOptions importOptions, TaskId id) {
notifier.clear(id).notify(id, "ADX parsing process started");
ImportOptions adxImportOptions = ObjectUtils.firstNonNull(importOptions, ImportOptions.getDefaultImportOptions()).instance().setNotificationLevel(NotificationLevel.OFF);
// Get import options
IdScheme dataSetIdScheme = importOptions.getIdSchemes().getDataSetIdScheme();
IdScheme dataElementIdScheme = importOptions.getIdSchemes().getDataElementIdScheme();
// Create meta-data maps
CachingMap<String, DataSet> dataSetMap = new CachingMap<>();
CachingMap<String, DataElement> dataElementMap = new CachingMap<>();
// Get meta-data maps
IdentifiableObjectCallable<DataSet> dataSetCallable = new IdentifiableObjectCallable<>(identifiableObjectManager, DataSet.class, dataSetIdScheme, null);
IdentifiableObjectCallable<DataElement> dataElementCallable = new IdentifiableObjectCallable<>(identifiableObjectManager, DataElement.class, dataElementIdScheme, null);
// Heat cache
if (importOptions.isPreheatCacheDefaultFalse()) {
dataSetMap.load(identifiableObjectManager.getAll(DataSet.class), o -> o.getPropertyValue(dataSetIdScheme));
dataElementMap.load(identifiableObjectManager.getAll(DataElement.class), o -> o.getPropertyValue(dataElementIdScheme));
}
XMLReader adxReader = XMLFactory.getXMLReader(in);
ImportSummary importSummary;
adxReader.moveToStartElement(AdxDataService.ROOT, AdxDataService.NAMESPACE);
ExecutorService executor = Executors.newSingleThreadExecutor();
// Give the DXF import a different notification task ID so it doesn't conflict with notifications from this level.
TaskId dxfTaskId = new TaskId(TaskCategory.DATAVALUE_IMPORT_INTERNAL, id.getUser());
int groupCount = 0;
try (PipedOutputStream pipeOut = new PipedOutputStream()) {
Future<ImportSummary> futureImportSummary = executor.submit(new AdxPipedImporter(dataValueSetService, adxImportOptions, dxfTaskId, pipeOut, sessionFactory));
XMLOutputFactory factory = XMLOutputFactory.newInstance();
XMLStreamWriter dxfWriter = factory.createXMLStreamWriter(pipeOut);
List<ImportConflict> adxConflicts = new LinkedList<>();
dxfWriter.writeStartDocument("1.0");
dxfWriter.writeStartElement("dataValueSet");
dxfWriter.writeDefaultNamespace("http://dhis2.org/schema/dxf/2.0");
notifier.notify(id, "Starting to import ADX data groups.");
while (adxReader.moveToStartElement(AdxDataService.GROUP, AdxDataService.NAMESPACE)) {
notifier.update(id, "Importing ADX data group: " + groupCount);
// note this returns conflicts which are detected at ADX level
adxConflicts.addAll(parseAdxGroupToDxf(adxReader, dxfWriter, adxImportOptions, dataSetMap, dataSetCallable, dataElementMap, dataElementCallable));
groupCount++;
}
// end dataValueSet
dxfWriter.writeEndElement();
dxfWriter.writeEndDocument();
pipeOut.flush();
importSummary = futureImportSummary.get(TOTAL_MINUTES_TO_WAIT, TimeUnit.MINUTES);
importSummary.getConflicts().addAll(adxConflicts);
importSummary.getImportCount().incrementIgnored(adxConflicts.size());
} catch (AdxException ex) {
importSummary = new ImportSummary();
importSummary.setStatus(ImportStatus.ERROR);
importSummary.setDescription("Data set import failed within group number: " + groupCount);
importSummary.getConflicts().add(ex.getImportConflict());
notifier.update(id, NotificationLevel.ERROR, "ADX data import done", true);
log.warn("Import failed: " + DebugUtils.getStackTrace(ex));
} catch (IOException | XMLStreamException | InterruptedException | ExecutionException | TimeoutException ex) {
importSummary = new ImportSummary();
importSummary.setStatus(ImportStatus.ERROR);
importSummary.setDescription("Data set import failed within group number: " + groupCount);
notifier.update(id, NotificationLevel.ERROR, "ADX data import done", true);
log.warn("Import failed: " + DebugUtils.getStackTrace(ex));
}
executor.shutdown();
notifier.update(id, INFO, "ADX data import done", true).addTaskSummary(id, importSummary);
ImportCount c = importSummary.getImportCount();
log.info("ADX data import done, imported: " + c.getImported() + ", updated: " + c.getUpdated() + ", deleted: " + c.getDeleted() + ", ignored: " + c.getIgnored());
return importSummary;
}
use of java.io.PipedOutputStream in project accumulo by apache.
the class PasswordConverterTest method setup.
@Before
public void setup() throws IOException {
argv = new String[] { "--password", "" };
password = new Password();
PipedInputStream in = new PipedInputStream();
PipedOutputStream out = new PipedOutputStream(in);
OutputStreamWriter osw = new OutputStreamWriter(out);
osw.write("secret");
osw.close();
System.setIn(in);
}
use of java.io.PipedOutputStream in project smarthome by eclipse.
the class JSONResponse method createResponse.
private Response createResponse(Status status, Object entity) {
ResponseBuilder rp = responseBuilder(status);
if (entity == null) {
return rp.build();
}
// The PipedOutputStream will only be closed by the writing thread
// since closing it during this method call would be too early.
// The receiver of the response will read from the pipe after this method returns.
PipedOutputStream out = new PipedOutputStream();
try {
// we will not actively close the PipedInputStream since it is read by the receiving end
// and will be GC'ed once the response is consumed.
PipedInputStream in = new PipedInputStream(out);
rp.entity(in);
} catch (IOException e) {
throw new RuntimeException(e);
}
Thread writerThread = new Thread(() -> {
try (JsonWriter jsonWriter = new JsonWriter(new BufferedWriter(new OutputStreamWriter(out)))) {
if (entity != null) {
gson.toJson(entity, entity.getClass(), jsonWriter);
jsonWriter.flush();
}
} catch (IOException | JsonIOException e) {
logger.error("Error streaming JSON through PipedInpuStream/PipedOutputStream: ", e);
}
});
// daemonize thread to permit the JVM shutdown even if we stream JSON.
writerThread.setDaemon(true);
writerThread.start();
return rp.build();
}
use of java.io.PipedOutputStream in project data-prep by Talend.
the class DataSetService method updateRawDataSet.
/**
* Updates a data set content and metadata. If no data set exists for given id, data set is silently created.
*
* @param dataSetId The id of data set to be updated.
* @param name The new name for the data set. Empty name (or <code>null</code>) does not update dataset name.
* @param dataSetContent The new content for the data set. If empty, existing content will <b>not</b> be replaced.
* For delete operation, look at {@link #delete(String)}.
*/
@RequestMapping(value = "/datasets/{id}/raw", method = PUT)
@ApiOperation(value = "Update a data set by id", notes = "Update a data set content based on provided id and PUT body. Id should be a UUID returned by the list operation. Not valid or non existing data set id returns empty content. For documentation purposes, body is typed as 'text/plain' but operation accepts binary content too.")
@Timed
@VolumeMetered
public String updateRawDataSet(//
@PathVariable(value = "id") @ApiParam(name = "id", value = "Id of the data set to update") String dataSetId, //
@RequestParam(value = "name", required = false) @ApiParam(name = "name", value = "New value for the data set name") String name, //
@RequestParam(value = "size", required = false) @ApiParam(name = "size", value = "The size of the dataSet") Long size, @ApiParam(value = "content") InputStream dataSetContent) {
LOG.debug("updating dataset content #{}", dataSetId);
if (name != null) {
checkDataSetName(name);
}
DataSetMetadata currentDataSetMetadata = dataSetMetadataRepository.get(dataSetId);
if (currentDataSetMetadata == null) {
return create(name, null, size, TEXT_PLAIN_VALUE, dataSetContent);
} else {
// just like the creation, let's make sure invalid size forbids dataset creation
if (size != null && size < 0) {
LOG.warn("invalid size provided {}", size);
throw new TDPException(UNSUPPORTED_CONTENT);
}
final UpdateDataSetCacheKey cacheKey = new UpdateDataSetCacheKey(currentDataSetMetadata.getId());
final DistributedLock lock = dataSetMetadataRepository.createDatasetMetadataLock(currentDataSetMetadata.getId());
try {
lock.lock();
// check the size if it's available (quick win)
if (size != null && size > 0) {
quotaService.checkIfAddingSizeExceedsAvailableStorage(Math.abs(size - currentDataSetMetadata.getDataSetSize()));
}
final DataSetMetadataBuilder datasetBuilder = metadataBuilder.metadata().id(currentDataSetMetadata.getId());
datasetBuilder.copyNonContentRelated(currentDataSetMetadata);
datasetBuilder.modified(System.currentTimeMillis());
if (!StringUtils.isEmpty(name)) {
datasetBuilder.name(name);
}
final DataSetMetadata updatedDataSetMetadata = datasetBuilder.build();
// Save data set content into cache to make sure there's enough space in the content store
final long maxDataSetSizeAllowed = getMaxDataSetSizeAllowed();
final StrictlyBoundedInputStream sizeCalculator = new StrictlyBoundedInputStream(dataSetContent, maxDataSetSizeAllowed);
try (OutputStream cacheEntry = cacheManager.put(cacheKey, TimeToLive.DEFAULT)) {
IOUtils.copy(sizeCalculator, cacheEntry);
}
// once fully copied to the cache, we know for sure that the content store has enough space, so let's copy
// from the cache to the content store
PipedInputStream toContentStore = new PipedInputStream();
PipedOutputStream fromCache = new PipedOutputStream(toContentStore);
Runnable r = () -> {
try (final InputStream input = cacheManager.get(cacheKey)) {
IOUtils.copy(input, fromCache);
// it's important to close this stream, otherwise the piped stream will never close
fromCache.close();
} catch (IOException e) {
throw new TDPException(UNABLE_TO_CREATE_OR_UPDATE_DATASET, e);
}
};
executor.execute(r);
contentStore.storeAsRaw(updatedDataSetMetadata, toContentStore);
// update the dataset metadata with its new size
updatedDataSetMetadata.setDataSetSize(sizeCalculator.getTotal());
dataSetMetadataRepository.save(updatedDataSetMetadata);
// publishing update event
publisher.publishEvent(new DatasetUpdatedEvent(updatedDataSetMetadata));
} catch (StrictlyBoundedInputStream.InputStreamTooLargeException e) {
LOG.warn("Dataset update {} cannot be done, new content is too big", currentDataSetMetadata.getId());
throw new TDPException(MAX_STORAGE_MAY_BE_EXCEEDED, e, build().put("limit", e.getMaxSize()));
} catch (IOException e) {
LOG.error("Error updating the dataset", e);
throw new TDPException(UNABLE_TO_CREATE_OR_UPDATE_DATASET, e);
} finally {
dataSetContentToNull(dataSetContent);
// whatever the outcome the cache needs to be cleaned
if (cacheManager.has(cacheKey)) {
cacheManager.evict(cacheKey);
}
lock.unlock();
}
// Content was changed, so queue events (format analysis, content indexing for search...)
analyzeDataSet(currentDataSetMetadata.getId(), true, emptyList());
return currentDataSetMetadata.getId();
}
}
use of java.io.PipedOutputStream in project org.csstudio.display.builder by kasemir.
the class WorkspaceResourceHelperImpl method writeWorkspaceResource.
@Override
public OutputStream writeWorkspaceResource(final String resource_name) throws Exception {
final IWorkspaceRoot root = ResourcesPlugin.getWorkspace().getRoot();
final IFile file = root.getFile(new Path(resource_name));
// IFile API requires an InputStream for the content.
// That content, however, doesn't exist at this time, because
// it's about to be written to an OutputStream by the caller
// of this function.
// -> Provide pipe, with background job to read from pipe and write the file
final PipedOutputStream buf = new PipedOutputStream();
final PipedInputStream input = new PipedInputStream(buf);
final IJobFunction writer = monitor -> {
try {
if (file.exists())
file.setContents(input, true, false, monitor);
else
file.create(input, true, monitor);
} catch (Exception ex) {
logger.log(Level.WARNING, "Cannot write to " + resource_name, ex);
}
return Status.OK_STATUS;
};
Job.create("Workspace Writer", writer).schedule();
// Provide caller with output end of pipe to fill
return buf;
}
Aggregations