Search in sources :

Example 41 with OutgoingBatch

use of org.jumpmind.symmetric.model.OutgoingBatch in project symmetric-ds by JumpMind.

the class DataExtractorService method execute.

/**
 * This is a callback method used by the NodeCommunicationService that
 * extracts an initial load in the background.
 */
public void execute(NodeCommunication nodeCommunication, RemoteNodeStatus status) {
    long ts = System.currentTimeMillis();
    List<ExtractRequest> requests = getExtractRequestsForNode(nodeCommunication.getNodeId());
    /*
         * Process extract requests until it has taken longer than 30 seconds,
         * and then allow the process to return so progress status can be seen.
         */
    for (int i = 0; i < requests.size() && (System.currentTimeMillis() - ts) <= Constants.LONG_OPERATION_THRESHOLD; i++) {
        ExtractRequest request = requests.get(i);
        Node identity = nodeService.findIdentity();
        Node targetNode = nodeService.findNode(nodeCommunication.getNodeId());
        log.info("Extracting batches for request {}. Starting at batch {}.  Ending at batch {}", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
        List<OutgoingBatch> batches = outgoingBatchService.getOutgoingBatchRange(request.getStartBatchId(), request.getEndBatchId()).getBatches();
        ProcessInfo processInfo = statisticManager.newProcessInfo(new ProcessInfoKey(identity.getNodeId(), nodeCommunication.getNodeId(), ProcessType.INITIAL_LOAD_EXTRACT_JOB));
        try {
            boolean areBatchesOk = true;
            /*
                 * check to see if batches have been OK'd by another reload
                 * request
                 */
            for (OutgoingBatch outgoingBatch : batches) {
                if (outgoingBatch.getStatus() != Status.OK) {
                    areBatchesOk = false;
                }
            }
            if (!areBatchesOk) {
                Channel channel = configurationService.getChannel(batches.get(0).getChannelId());
                /*
                     * "Trick" the extractor to extract one reload batch, but we
                     * will split it across the N batches when writing it
                     */
                extractOutgoingBatch(processInfo, targetNode, new MultiBatchStagingWriter(identity.getNodeId(), stagingManager, batches, channel.getMaxBatchSize()), batches.get(0), false, false, ExtractMode.FOR_SYM_CLIENT);
            } else {
                log.info("Batches already had an OK status for request {}, batches {} to {}.  Not extracting", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
            }
            /*
                 * re-query the batches to see if they have been OK'd while
                 * extracting
                 */
            List<OutgoingBatch> checkBatches = outgoingBatchService.getOutgoingBatchRange(request.getStartBatchId(), request.getEndBatchId()).getBatches();
            areBatchesOk = true;
            /*
                 * check to see if batches have been OK'd by another reload
                 * request while extracting
                 */
            for (OutgoingBatch outgoingBatch : checkBatches) {
                if (outgoingBatch.getStatus() != Status.OK) {
                    areBatchesOk = false;
                }
            }
            ISqlTransaction transaction = null;
            try {
                transaction = sqlTemplate.startSqlTransaction();
                updateExtractRequestStatus(transaction, request.getRequestId(), ExtractStatus.OK);
                if (!areBatchesOk) {
                    for (OutgoingBatch outgoingBatch : batches) {
                        outgoingBatch.setStatus(Status.NE);
                        outgoingBatchService.updateOutgoingBatch(transaction, outgoingBatch);
                    }
                } else {
                    log.info("Batches already had an OK status for request {}, batches {} to {}.  Not updating the status to NE", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
                }
                transaction.commit();
            } catch (Error ex) {
                if (transaction != null) {
                    transaction.rollback();
                }
                throw ex;
            } catch (RuntimeException ex) {
                if (transaction != null) {
                    transaction.rollback();
                }
                throw ex;
            } finally {
                close(transaction);
            }
            processInfo.setStatus(org.jumpmind.symmetric.model.ProcessInfo.Status.OK);
        } catch (RuntimeException ex) {
            log.debug("Failed to extract batches for request {}. Starting at batch {}.  Ending at batch {}", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
            processInfo.setStatus(org.jumpmind.symmetric.model.ProcessInfo.Status.ERROR);
            throw ex;
        }
    }
}
Also used : Node(org.jumpmind.symmetric.model.Node) NodeChannel(org.jumpmind.symmetric.model.NodeChannel) Channel(org.jumpmind.symmetric.model.Channel) ProcessInfoKey(org.jumpmind.symmetric.model.ProcessInfoKey) ProcessInfo(org.jumpmind.symmetric.model.ProcessInfo) ExtractRequest(org.jumpmind.symmetric.model.ExtractRequest) TransformPoint(org.jumpmind.symmetric.io.data.transform.TransformPoint) ISqlTransaction(org.jumpmind.db.sql.ISqlTransaction) OutgoingBatch(org.jumpmind.symmetric.model.OutgoingBatch)

Example 42 with OutgoingBatch

use of org.jumpmind.symmetric.model.OutgoingBatch in project symmetric-ds by JumpMind.

the class AbstractService method readAcks.

protected List<BatchAck> readAcks(List<OutgoingBatch> batches, IOutgoingWithResponseTransport transport, ITransportManager transportManager, IAcknowledgeService acknowledgeService) throws IOException {
    Set<Long> batchIds = new HashSet<Long>(batches.size());
    for (OutgoingBatch outgoingBatch : batches) {
        if (outgoingBatch.getStatus() == OutgoingBatch.Status.LD) {
            batchIds.add(outgoingBatch.getBatchId());
        }
    }
    BufferedReader reader = transport.readResponse();
    String ackString = reader.readLine();
    String ackExtendedString = reader.readLine();
    log.debug("Reading ack: {}", ackString);
    log.debug("Reading extend ack: {}", ackExtendedString);
    String line = null;
    do {
        line = reader.readLine();
        if (line != null) {
            log.info("Read another unexpected line {}", line);
        }
    } while (line != null);
    if (StringUtils.isBlank(ackString)) {
        throw new SymmetricException("Did not receive an acknowledgement for the batches sent.  " + "The 'ack string' was: '%s' and the 'extended ack string' was: '%s'", ackString, ackExtendedString);
    }
    List<BatchAck> batchAcks = transportManager.readAcknowledgement(ackString, ackExtendedString);
    long batchIdInError = Long.MAX_VALUE;
    for (BatchAck batchInfo : batchAcks) {
        batchIds.remove(batchInfo.getBatchId());
        if (batchInfo.getStatus() == Status.ER) {
            batchIdInError = batchInfo.getBatchId();
        }
        log.debug("Saving ack: {}, {}", batchInfo.getBatchId(), batchInfo.getStatus());
        acknowledgeService.ack(batchInfo);
    }
    for (Long batchId : batchIds) {
        if (batchId < batchIdInError) {
            log.error("We expected but did not receive an ack for batch {}", batchId);
        }
    }
    return batchAcks;
}
Also used : BatchAck(org.jumpmind.symmetric.model.BatchAck) SymmetricException(org.jumpmind.symmetric.SymmetricException) BufferedReader(java.io.BufferedReader) OutgoingBatch(org.jumpmind.symmetric.model.OutgoingBatch) HashSet(java.util.HashSet)

Example 43 with OutgoingBatch

use of org.jumpmind.symmetric.model.OutgoingBatch in project symmetric-ds by JumpMind.

the class AcknowledgeService method ack.

public BatchAckResult ack(final BatchAck batch) {
    IRegistrationService registrationService = engine.getRegistrationService();
    IStagingManager stagingManager = engine.getStagingManager();
    IOutgoingBatchService outgoingBatchService = engine.getOutgoingBatchService();
    BatchAckResult result = new BatchAckResult(batch);
    for (IAcknowledgeEventListener listener : engine.getExtensionService().getExtensionPointList(IAcknowledgeEventListener.class)) {
        listener.onAcknowledgeEvent(batch);
    }
    if (batch.getBatchId() == Constants.VIRTUAL_BATCH_FOR_REGISTRATION) {
        if (batch.getStatus() == Status.OK) {
            registrationService.markNodeAsRegistered(batch.getNodeId());
        }
    } else {
        OutgoingBatch outgoingBatch = outgoingBatchService.findOutgoingBatch(batch.getBatchId(), batch.getNodeId());
        Status status = batch.getStatus();
        if (outgoingBatch != null) {
            // is OK.
            if (outgoingBatch.getStatus() != Status.OK && outgoingBatch.getStatus() != Status.IG) {
                outgoingBatch.setStatus(status);
                outgoingBatch.setErrorFlag(batch.getStatus() == Status.ER);
            } else {
                // clearing the error flag in case the user set the batch
                // status to OK
                Status oldStatus = outgoingBatch.getStatus();
                outgoingBatch.setStatus(Status.OK);
                outgoingBatch.setErrorFlag(false);
                log.info("Batch {} for {} was set to {}.  Updating the status to OK", new Object[] { batch.getBatchId(), batch.getNodeId(), oldStatus.name() });
            }
            if (batch.isIgnored()) {
                outgoingBatch.incrementIgnoreCount();
            }
            outgoingBatch.setNetworkMillis(batch.getNetworkMillis());
            outgoingBatch.setFilterMillis(batch.getFilterMillis());
            outgoingBatch.setLoadMillis(batch.getDatabaseMillis());
            outgoingBatch.setSqlCode(batch.getSqlCode());
            outgoingBatch.setSqlState(batch.getSqlState());
            outgoingBatch.setSqlMessage(batch.getSqlMessage());
            if (batch.getStatus() == Status.ER && batch.getErrorLine() != 0) {
                List<Number> ids = sqlTemplate.query(getSql("selectDataIdSql"), new NumberMapper(), outgoingBatch.getBatchId());
                if (ids.size() >= batch.getErrorLine()) {
                    outgoingBatch.setFailedDataId(ids.get((int) batch.getErrorLine() - 1).longValue());
                }
            }
            if (status == Status.ER) {
                log.error("The outgoing batch {} failed{}", outgoingBatch.getNodeBatchId(), batch.getSqlMessage() != null ? ". " + batch.getSqlMessage() : "");
                RouterStats routerStats = engine.getStatisticManager().getRouterStatsByBatch(batch.getBatchId());
                if (routerStats != null) {
                    log.info("Router stats for batch " + outgoingBatch.getBatchId() + ": " + routerStats.toString());
                }
            } else if (!outgoingBatch.isCommonFlag()) {
                IStagedResource stagingResource = stagingManager.find(Constants.STAGING_CATEGORY_OUTGOING, outgoingBatch.getNodeId(), outgoingBatch.getBatchId());
                if (stagingResource != null) {
                    stagingResource.setState(State.DONE);
                }
            }
            outgoingBatchService.updateOutgoingBatch(outgoingBatch);
            if (status == Status.OK) {
                Channel channel = engine.getConfigurationService().getChannel(outgoingBatch.getChannelId());
                if (channel != null && channel.isFileSyncFlag()) {
                    /* Acknowledge the file_sync in case the file needs deleted. */
                    engine.getFileSyncService().acknowledgeFiles(outgoingBatch);
                }
                engine.getStatisticManager().removeRouterStatsByBatch(batch.getBatchId());
            }
        } else {
            log.error("Could not find batch {}-{} to acknowledge as {}", new Object[] { batch.getNodeId(), batch.getBatchId(), status.name() });
            result.setOk(false);
        }
    }
    return result;
}
Also used : Status(org.jumpmind.symmetric.model.OutgoingBatch.Status) NumberMapper(org.jumpmind.db.sql.mapper.NumberMapper) IAcknowledgeEventListener(org.jumpmind.symmetric.transport.IAcknowledgeEventListener) IRegistrationService(org.jumpmind.symmetric.service.IRegistrationService) Channel(org.jumpmind.symmetric.model.Channel) IStagingManager(org.jumpmind.symmetric.io.stage.IStagingManager) BatchAckResult(org.jumpmind.symmetric.model.BatchAckResult) RouterStats(org.jumpmind.symmetric.statistic.RouterStats) IStagedResource(org.jumpmind.symmetric.io.stage.IStagedResource) IOutgoingBatchService(org.jumpmind.symmetric.service.IOutgoingBatchService) OutgoingBatch(org.jumpmind.symmetric.model.OutgoingBatch)

Example 44 with OutgoingBatch

use of org.jumpmind.symmetric.model.OutgoingBatch in project symmetric-ds by JumpMind.

the class FileSyncService method sendFiles.

public List<OutgoingBatch> sendFiles(ProcessInfo processInfo, Node targetNode, IOutgoingTransport outgoingTransport) {
    List<OutgoingBatch> processedBatches = new ArrayList<OutgoingBatch>();
    List<OutgoingBatch> batchesToProcess = new ArrayList<OutgoingBatch>();
    List<Channel> fileSyncChannels = engine.getConfigurationService().getFileSyncChannels();
    for (Channel channel : fileSyncChannels) {
        OutgoingBatches batches = engine.getOutgoingBatchService().getOutgoingBatches(targetNode.getNodeId(), false);
        batchesToProcess.addAll(batches.filterBatchesForChannel(channel));
    }
    OutgoingBatch currentBatch = null;
    IStagingManager stagingManager = engine.getStagingManager();
    long memoryThresholdInBytes = parameterService.getLong(ParameterConstants.STREAM_TO_FILE_THRESHOLD);
    IStagedResource stagedResource = stagingManager.create(memoryThresholdInBytes, Constants.STAGING_CATEGORY_OUTGOING, processInfo.getSourceNodeId(), targetNode.getNodeId(), "filesync.zip");
    try {
        long maxBytesToSync = parameterService.getLong(ParameterConstants.TRANSPORT_MAX_BYTES_TO_SYNC);
        FileSyncZipDataWriter dataWriter = new FileSyncZipDataWriter(maxBytesToSync, this, engine.getNodeService(), stagedResource);
        try {
            for (int i = 0; i < batchesToProcess.size(); i++) {
                currentBatch = batchesToProcess.get(i);
                processInfo.incrementBatchCount();
                processInfo.setCurrentBatchId(currentBatch.getBatchId());
                ((DataExtractorService) engine.getDataExtractorService()).extractOutgoingBatch(processInfo, targetNode, dataWriter, currentBatch, false, true, DataExtractorService.ExtractMode.FOR_SYM_CLIENT);
                processedBatches.add(currentBatch);
                /*
                     * check to see if max bytes to sync has been reached and
                     * stop processing batches
                     */
                if (dataWriter.readyToSend()) {
                    break;
                }
            }
        } finally {
            dataWriter.finish();
        }
        processInfo.setStatus(ProcessInfo.Status.TRANSFERRING);
        for (int i = 0; i < batchesToProcess.size(); i++) {
            batchesToProcess.get(i).setStatus(Status.SE);
        }
        engine.getOutgoingBatchService().updateOutgoingBatches(batchesToProcess);
        try {
            if (stagedResource.exists()) {
                InputStream is = stagedResource.getInputStream();
                try {
                    OutputStream os = outgoingTransport.openStream();
                    IOUtils.copy(is, os);
                    os.flush();
                } catch (IOException e) {
                    throw new IoException(e);
                }
            }
            for (int i = 0; i < batchesToProcess.size(); i++) {
                batchesToProcess.get(i).setStatus(Status.LD);
            }
            engine.getOutgoingBatchService().updateOutgoingBatches(batchesToProcess);
        } finally {
            stagedResource.close();
        }
    } catch (RuntimeException e) {
        if (currentBatch != null) {
            engine.getStatisticManager().incrementDataExtractedErrors(currentBatch.getChannelId(), 1);
            currentBatch.setSqlMessage(getRootMessage(e));
            currentBatch.revertStatsOnError();
            if (currentBatch.getStatus() != Status.IG) {
                currentBatch.setStatus(Status.ER);
            }
            currentBatch.setErrorFlag(true);
            engine.getOutgoingBatchService().updateOutgoingBatch(currentBatch);
            if (isStreamClosedByClient(e)) {
                log.warn("Failed to extract batch {}.  The stream was closed by the client.  The error was: {}", currentBatch, getRootMessage(e));
            } else {
                log.error("Failed to extract batch {}", currentBatch, e);
            }
        } else {
            log.error("Could not log the outgoing batch status because the batch was null", e);
        }
        throw e;
    } finally {
        if (stagedResource != null) {
            stagedResource.delete();
        }
    }
    return processedBatches;
}
Also used : InputStream(java.io.InputStream) Channel(org.jumpmind.symmetric.model.Channel) OutputStream(java.io.OutputStream) ArrayList(java.util.ArrayList) IOException(java.io.IOException) FileSyncZipDataWriter(org.jumpmind.symmetric.file.FileSyncZipDataWriter) IStagingManager(org.jumpmind.symmetric.io.stage.IStagingManager) IoException(org.jumpmind.exception.IoException) IStagedResource(org.jumpmind.symmetric.io.stage.IStagedResource) OutgoingBatch(org.jumpmind.symmetric.model.OutgoingBatch) OutgoingBatches(org.jumpmind.symmetric.model.OutgoingBatches)

Example 45 with OutgoingBatch

use of org.jumpmind.symmetric.model.OutgoingBatch in project symmetric-ds by JumpMind.

the class FileSyncTest method testManual.

protected void testManual(ISymmetricEngine rootServer, ISymmetricEngine clientServer) throws Exception {
    IFileSyncService fileSyncService = rootServer.getFileSyncService();
    FileTriggerRouter fileTriggerRouter = fileSyncService.getFileTriggerRouter("all", "server_2_client");
    fileTriggerRouter.setConflictStrategy(FileConflictStrategy.MANUAL);
    fileSyncService.saveFileTriggerRouter(fileTriggerRouter);
    pull("client");
    File allFile1 = new File(allSvrSourceDir, "manual/test.txt");
    allFile1.getParentFile().mkdirs();
    FileUtils.write(allFile1, "server value");
    File allFile1Target = new File(allClntTargetDir, allFile1.getParentFile().getName() + "/" + allFile1.getName());
    allFile1Target.getParentFile().mkdirs();
    FileUtils.write(allFile1Target, "client value");
    pullFiles();
    assertEquals("client value", FileUtils.readFileToString(allFile1Target));
    OutgoingBatches batchesInError = rootServer.getOutgoingBatchService().getOutgoingBatchErrors(10);
    List<OutgoingBatch> batches = batchesInError.getBatchesForChannel(Constants.CHANNEL_FILESYNC);
    assertEquals(1, batches.size());
    allFile1Target.delete();
    pullFiles();
    assertEquals("server value", FileUtils.readFileToString(allFile1Target));
    batchesInError = rootServer.getOutgoingBatchService().getOutgoingBatchErrors(10);
    batches = batchesInError.getBatchesForChannel(Constants.CHANNEL_FILESYNC);
    assertEquals(0, batches.size());
}
Also used : FileTriggerRouter(org.jumpmind.symmetric.model.FileTriggerRouter) IFileSyncService(org.jumpmind.symmetric.service.IFileSyncService) OutgoingBatches(org.jumpmind.symmetric.model.OutgoingBatches) OutgoingBatch(org.jumpmind.symmetric.model.OutgoingBatch) File(java.io.File)

Aggregations

OutgoingBatch (org.jumpmind.symmetric.model.OutgoingBatch)45 OutgoingBatches (org.jumpmind.symmetric.model.OutgoingBatches)15 ArrayList (java.util.ArrayList)14 Node (org.jumpmind.symmetric.model.Node)10 ProcessInfo (org.jumpmind.symmetric.model.ProcessInfo)10 IStagedResource (org.jumpmind.symmetric.io.stage.IStagedResource)8 NodeChannel (org.jumpmind.symmetric.model.NodeChannel)7 ProcessInfoKey (org.jumpmind.symmetric.model.ProcessInfoKey)7 ProtocolDataWriter (org.jumpmind.symmetric.io.data.writer.ProtocolDataWriter)5 BatchAck (org.jumpmind.symmetric.model.BatchAck)5 ChannelMap (org.jumpmind.symmetric.model.ChannelMap)5 HashSet (java.util.HashSet)4 SymmetricException (org.jumpmind.symmetric.SymmetricException)4 DataContext (org.jumpmind.symmetric.io.data.DataContext)4 Channel (org.jumpmind.symmetric.model.Channel)4 BufferedReader (java.io.BufferedReader)3 File (java.io.File)3 IOException (java.io.IOException)3 List (java.util.List)3 IoException (org.jumpmind.exception.IoException)3