use of org.jumpmind.symmetric.io.stage.IStagingManager in project symmetric-ds by JumpMind.
the class FileSyncService method sendFiles.
public List<OutgoingBatch> sendFiles(ProcessInfo processInfo, Node targetNode, IOutgoingTransport outgoingTransport) {
List<OutgoingBatch> batchesToProcess = getBatchesToProcess(targetNode);
if (batchesToProcess.isEmpty()) {
return batchesToProcess;
}
IStagingManager stagingManager = engine.getStagingManager();
long maxBytesToSync = parameterService.getLong(ParameterConstants.TRANSPORT_MAX_BYTES_TO_SYNC);
List<OutgoingBatch> processedBatches = new ArrayList<OutgoingBatch>();
OutgoingBatch currentBatch = null;
IStagedResource stagedResource = null;
IStagedResource previouslyStagedResource = null;
FileSyncZipDataWriter dataWriter = null;
try {
long syncedBytes = 0;
try {
for (int i = 0; i < batchesToProcess.size(); i++) {
currentBatch = batchesToProcess.get(i);
previouslyStagedResource = getStagedResource(currentBatch);
if (isWaitForExtractionRequired(currentBatch, previouslyStagedResource) || isFlushBatchesRequired(currentBatch, processedBatches, previouslyStagedResource)) {
// previously staged batch will have to wait for the next push/pull.
break;
}
if (previouslyStagedResource != null) {
log.debug("Using existing extraction for file sync batch {}", currentBatch.getNodeBatchId());
stagedResource = previouslyStagedResource;
} else {
if (dataWriter == null) {
stagedResource = stagingManager.create(Constants.STAGING_CATEGORY_OUTGOING, processInfo.getSourceNodeId(), targetNode.getNodeId(), "filesync.zip");
dataWriter = new FileSyncZipDataWriter(maxBytesToSync, this, engine.getNodeService(), stagedResource);
}
log.debug("Extracting batch {} for filesync.", currentBatch.getNodeBatchId());
((DataExtractorService) engine.getDataExtractorService()).extractOutgoingBatch(processInfo, targetNode, dataWriter, currentBatch, false, true, DataExtractorService.ExtractMode.FOR_SYM_CLIENT);
}
processedBatches.add(currentBatch);
syncedBytes += stagedResource.getSize();
processInfo.incrementBatchCount();
processInfo.setCurrentBatchId(currentBatch.getBatchId());
log.debug("Processed file sync batch {}. syncedBytes={}, maxBytesToSync={}", currentBatch, syncedBytes, maxBytesToSync);
/*
* check to see if max bytes to sync has been reached and
* stop processing batches
*/
if (previouslyStagedResource != null || dataWriter.readyToSend()) {
break;
}
}
} finally {
if (dataWriter != null) {
dataWriter.finish();
}
}
processInfo.setStatus(ProcessInfo.Status.TRANSFERRING);
for (OutgoingBatch outgoingBatch : processedBatches) {
outgoingBatch.setStatus(Status.SE);
}
engine.getOutgoingBatchService().updateOutgoingBatches(processedBatches);
try {
if (stagedResource != null && stagedResource.exists()) {
InputStream is = stagedResource.getInputStream();
try {
OutputStream os = outgoingTransport.openStream();
IOUtils.copy(is, os);
os.flush();
} catch (IOException e) {
throw new IoException(e);
}
}
for (int i = 0; i < batchesToProcess.size(); i++) {
batchesToProcess.get(i).setStatus(Status.LD);
}
engine.getOutgoingBatchService().updateOutgoingBatches(batchesToProcess);
} finally {
if (stagedResource != null) {
stagedResource.close();
}
}
} catch (RuntimeException e) {
if (stagedResource == previouslyStagedResource) {
// on error, don't let the load extract be deleted.
stagedResource = null;
}
if (currentBatch != null) {
engine.getStatisticManager().incrementDataExtractedErrors(currentBatch.getChannelId(), 1);
currentBatch.setSqlMessage(getRootMessage(e));
currentBatch.revertStatsOnError();
if (currentBatch.getStatus() != Status.IG) {
currentBatch.setStatus(Status.ER);
}
currentBatch.setErrorFlag(true);
engine.getOutgoingBatchService().updateOutgoingBatch(currentBatch);
if (isStreamClosedByClient(e)) {
log.warn("Failed to extract file sync batch {}. The stream was closed by the client. The error was: {}", currentBatch, getRootMessage(e));
} else {
log.error("Failed to extract file sync batch " + currentBatch, e);
}
} else {
log.error("Could not log the outgoing batch status because the batch was null", e);
}
throw e;
} finally {
if (stagedResource != null) {
stagedResource.delete();
}
}
return processedBatches;
}
Aggregations