use of org.jumpmind.symmetric.io.stage.IStagedResource in project symmetric-ds by JumpMind.
the class AcknowledgeService method ack.
public BatchAckResult ack(final BatchAck batch) {
IRegistrationService registrationService = engine.getRegistrationService();
IStagingManager stagingManager = engine.getStagingManager();
IOutgoingBatchService outgoingBatchService = engine.getOutgoingBatchService();
BatchAckResult result = new BatchAckResult(batch);
for (IAcknowledgeEventListener listener : engine.getExtensionService().getExtensionPointList(IAcknowledgeEventListener.class)) {
listener.onAcknowledgeEvent(batch);
}
if (batch.getBatchId() == Constants.VIRTUAL_BATCH_FOR_REGISTRATION) {
if (batch.getStatus() == Status.OK) {
registrationService.markNodeAsRegistered(batch.getNodeId());
}
} else {
OutgoingBatch outgoingBatch = outgoingBatchService.findOutgoingBatch(batch.getBatchId(), batch.getNodeId());
Status status = batch.getStatus();
if (outgoingBatch != null) {
// is OK.
if (outgoingBatch.getStatus() != Status.OK && outgoingBatch.getStatus() != Status.IG) {
outgoingBatch.setStatus(status);
outgoingBatch.setErrorFlag(batch.getStatus() == Status.ER);
} else {
// clearing the error flag in case the user set the batch
// status to OK
Status oldStatus = outgoingBatch.getStatus();
outgoingBatch.setStatus(Status.OK);
outgoingBatch.setErrorFlag(false);
log.info("Batch {} for {} was set to {}. Updating the status to OK", new Object[] { batch.getBatchId(), batch.getNodeId(), oldStatus.name() });
}
if (batch.isIgnored()) {
outgoingBatch.incrementIgnoreCount();
}
outgoingBatch.setNetworkMillis(batch.getNetworkMillis());
outgoingBatch.setFilterMillis(batch.getFilterMillis());
outgoingBatch.setLoadMillis(batch.getDatabaseMillis());
outgoingBatch.setSqlCode(batch.getSqlCode());
outgoingBatch.setSqlState(batch.getSqlState());
outgoingBatch.setSqlMessage(batch.getSqlMessage());
if (batch.getStatus() == Status.ER && batch.getErrorLine() != 0) {
List<Number> ids = sqlTemplate.query(getSql("selectDataIdSql"), new NumberMapper(), outgoingBatch.getBatchId());
if (ids.size() >= batch.getErrorLine()) {
outgoingBatch.setFailedDataId(ids.get((int) batch.getErrorLine() - 1).longValue());
}
}
if (status == Status.ER) {
log.error("The outgoing batch {} failed{}", outgoingBatch.getNodeBatchId(), batch.getSqlMessage() != null ? ". " + batch.getSqlMessage() : "");
RouterStats routerStats = engine.getStatisticManager().getRouterStatsByBatch(batch.getBatchId());
if (routerStats != null) {
log.info("Router stats for batch " + outgoingBatch.getBatchId() + ": " + routerStats.toString());
}
} else if (!outgoingBatch.isCommonFlag()) {
IStagedResource stagingResource = stagingManager.find(Constants.STAGING_CATEGORY_OUTGOING, outgoingBatch.getNodeId(), outgoingBatch.getBatchId());
if (stagingResource != null) {
stagingResource.setState(State.DONE);
}
}
outgoingBatchService.updateOutgoingBatch(outgoingBatch);
if (status == Status.OK) {
Channel channel = engine.getConfigurationService().getChannel(outgoingBatch.getChannelId());
if (channel != null && channel.isFileSyncFlag()) {
/* Acknowledge the file_sync in case the file needs deleted. */
engine.getFileSyncService().acknowledgeFiles(outgoingBatch);
}
engine.getStatisticManager().removeRouterStatsByBatch(batch.getBatchId());
}
} else {
log.error("Could not find batch {}-{} to acknowledge as {}", new Object[] { batch.getNodeId(), batch.getBatchId(), status.name() });
result.setOk(false);
}
}
return result;
}
use of org.jumpmind.symmetric.io.stage.IStagedResource in project symmetric-ds by JumpMind.
the class FileSyncService method sendFiles.
public List<OutgoingBatch> sendFiles(ProcessInfo processInfo, Node targetNode, IOutgoingTransport outgoingTransport) {
List<OutgoingBatch> processedBatches = new ArrayList<OutgoingBatch>();
List<OutgoingBatch> batchesToProcess = new ArrayList<OutgoingBatch>();
List<Channel> fileSyncChannels = engine.getConfigurationService().getFileSyncChannels();
for (Channel channel : fileSyncChannels) {
OutgoingBatches batches = engine.getOutgoingBatchService().getOutgoingBatches(targetNode.getNodeId(), false);
batchesToProcess.addAll(batches.filterBatchesForChannel(channel));
}
OutgoingBatch currentBatch = null;
IStagingManager stagingManager = engine.getStagingManager();
long memoryThresholdInBytes = parameterService.getLong(ParameterConstants.STREAM_TO_FILE_THRESHOLD);
IStagedResource stagedResource = stagingManager.create(memoryThresholdInBytes, Constants.STAGING_CATEGORY_OUTGOING, processInfo.getSourceNodeId(), targetNode.getNodeId(), "filesync.zip");
try {
long maxBytesToSync = parameterService.getLong(ParameterConstants.TRANSPORT_MAX_BYTES_TO_SYNC);
FileSyncZipDataWriter dataWriter = new FileSyncZipDataWriter(maxBytesToSync, this, engine.getNodeService(), stagedResource);
try {
for (int i = 0; i < batchesToProcess.size(); i++) {
currentBatch = batchesToProcess.get(i);
processInfo.incrementBatchCount();
processInfo.setCurrentBatchId(currentBatch.getBatchId());
((DataExtractorService) engine.getDataExtractorService()).extractOutgoingBatch(processInfo, targetNode, dataWriter, currentBatch, false, true, DataExtractorService.ExtractMode.FOR_SYM_CLIENT);
processedBatches.add(currentBatch);
/*
* check to see if max bytes to sync has been reached and
* stop processing batches
*/
if (dataWriter.readyToSend()) {
break;
}
}
} finally {
dataWriter.finish();
}
processInfo.setStatus(ProcessInfo.Status.TRANSFERRING);
for (int i = 0; i < batchesToProcess.size(); i++) {
batchesToProcess.get(i).setStatus(Status.SE);
}
engine.getOutgoingBatchService().updateOutgoingBatches(batchesToProcess);
try {
if (stagedResource.exists()) {
InputStream is = stagedResource.getInputStream();
try {
OutputStream os = outgoingTransport.openStream();
IOUtils.copy(is, os);
os.flush();
} catch (IOException e) {
throw new IoException(e);
}
}
for (int i = 0; i < batchesToProcess.size(); i++) {
batchesToProcess.get(i).setStatus(Status.LD);
}
engine.getOutgoingBatchService().updateOutgoingBatches(batchesToProcess);
} finally {
stagedResource.close();
}
} catch (RuntimeException e) {
if (currentBatch != null) {
engine.getStatisticManager().incrementDataExtractedErrors(currentBatch.getChannelId(), 1);
currentBatch.setSqlMessage(getRootMessage(e));
currentBatch.revertStatsOnError();
if (currentBatch.getStatus() != Status.IG) {
currentBatch.setStatus(Status.ER);
}
currentBatch.setErrorFlag(true);
engine.getOutgoingBatchService().updateOutgoingBatch(currentBatch);
if (isStreamClosedByClient(e)) {
log.warn("Failed to extract batch {}. The stream was closed by the client. The error was: {}", currentBatch, getRootMessage(e));
} else {
log.error("Failed to extract batch {}", currentBatch, e);
}
} else {
log.error("Could not log the outgoing batch status because the batch was null", e);
}
throw e;
} finally {
if (stagedResource != null) {
stagedResource.delete();
}
}
return processedBatches;
}
Aggregations