Search in sources :

Example 1 with IStagedResource

use of org.jumpmind.symmetric.io.stage.IStagedResource in project symmetric-ds by JumpMind.

the class FileSyncExtractorService method buildMultiBatchStagingWriter.

@Override
protected MultiBatchStagingWriter buildMultiBatchStagingWriter(ExtractRequest request, final Node sourceNode, final Node targetNode, List<OutgoingBatch> batches, ProcessInfo processInfo, Channel channel) {
    MultiBatchStagingWriter multiBatchStagingWriter = new MultiBatchStagingWriter(this, request, sourceNode.getNodeId(), stagingManager, batches, channel.getMaxBatchSize(), processInfo) {

        @Override
        protected IDataWriter buildWriter(long memoryThresholdInBytes) {
            IStagedResource stagedResource = stagingManager.create(fileSyncService.getStagingPathComponents(outgoingBatch));
            log.info("Extracting file sync batch {} to resource '{}'", outgoingBatch.getNodeBatchId(), stagedResource);
            long maxBytesToSync = parameterService.getLong(ParameterConstants.TRANSPORT_MAX_BYTES_TO_SYNC);
            FileSyncZipDataWriter fileSyncWriter = new FileSyncZipDataWriter(maxBytesToSync, fileSyncService, nodeService, stagedResource) {

                @Override
                public void close() {
                    super.finish();
                }
            };
            return fileSyncWriter;
        }
    };
    return multiBatchStagingWriter;
}
Also used : FileSyncZipDataWriter(org.jumpmind.symmetric.file.FileSyncZipDataWriter) IStagedResource(org.jumpmind.symmetric.io.stage.IStagedResource)

Example 2 with IStagedResource

use of org.jumpmind.symmetric.io.stage.IStagedResource in project symmetric-ds by JumpMind.

the class DataExtractorService method extract.

protected List<OutgoingBatch> extract(final ProcessInfo processInfo, final Node targetNode, final List<OutgoingBatch> activeBatches, final IDataWriter dataWriter, final BufferedWriter writer, final ExtractMode mode) {
    if (activeBatches.size() > 0) {
        final List<OutgoingBatch> processedBatches = new ArrayList<OutgoingBatch>(activeBatches.size());
        Set<String> channelsProcessed = new HashSet<String>();
        long batchesSelectedAtMs = System.currentTimeMillis();
        OutgoingBatch currentBatch = null;
        ExecutorService executor = null;
        try {
            final boolean streamToFileEnabled = parameterService.is(ParameterConstants.STREAM_TO_FILE_ENABLED);
            long keepAliveMillis = parameterService.getLong(ParameterConstants.DATA_LOADER_SEND_ACK_KEEPALIVE);
            Node sourceNode = nodeService.findIdentity();
            final FutureExtractStatus status = new FutureExtractStatus();
            executor = Executors.newFixedThreadPool(1, new CustomizableThreadFactory(String.format("dataextractor-%s-%s", targetNode.getNodeGroupId(), targetNode.getNodeGroupId())));
            List<Future<FutureOutgoingBatch>> futures = new ArrayList<Future<FutureOutgoingBatch>>();
            processInfo.setBatchCount(activeBatches.size());
            for (int i = 0; i < activeBatches.size(); i++) {
                currentBatch = activeBatches.get(i);
                processInfo.setCurrentLoadId(currentBatch.getLoadId());
                processInfo.setDataCount(currentBatch.getDataEventCount());
                processInfo.setCurrentBatchId(currentBatch.getBatchId());
                channelsProcessed.add(currentBatch.getChannelId());
                currentBatch = requeryIfEnoughTimeHasPassed(batchesSelectedAtMs, currentBatch);
                processInfo.setStatus(ProcessInfo.Status.EXTRACTING);
                final OutgoingBatch extractBatch = currentBatch;
                Callable<FutureOutgoingBatch> callable = new Callable<FutureOutgoingBatch>() {

                    public FutureOutgoingBatch call() throws Exception {
                        return extractBatch(extractBatch, status, processInfo, targetNode, dataWriter, mode, activeBatches);
                    }
                };
                if (status.shouldExtractSkip) {
                    break;
                }
                futures.add(executor.submit(callable));
            }
            if (parameterService.is(ParameterConstants.SYNCHRONIZE_ALL_JOBS)) {
                executor.shutdown();
                boolean isProcessed = false;
                while (!isProcessed) {
                    try {
                        isProcessed = executor.awaitTermination(keepAliveMillis, TimeUnit.MILLISECONDS);
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    }
                    if (!isProcessed) {
                        writeKeepAliveAck(writer, sourceNode, streamToFileEnabled);
                    }
                }
            }
            Iterator<OutgoingBatch> activeBatchIter = activeBatches.iterator();
            for (Future<FutureOutgoingBatch> future : futures) {
                currentBatch = activeBatchIter.next();
                boolean isProcessed = false;
                while (!isProcessed) {
                    try {
                        FutureOutgoingBatch extractBatch = future.get(keepAliveMillis, TimeUnit.MILLISECONDS);
                        currentBatch = extractBatch.getOutgoingBatch();
                        if (extractBatch.isExtractSkipped) {
                            break;
                        }
                        if (streamToFileEnabled || mode == ExtractMode.FOR_PAYLOAD_CLIENT) {
                            processInfo.setStatus(ProcessInfo.Status.TRANSFERRING);
                            processInfo.setCurrentLoadId(currentBatch.getLoadId());
                            boolean isRetry = extractBatch.isRetry() && extractBatch.getOutgoingBatch().getStatus() != OutgoingBatch.Status.IG;
                            currentBatch = sendOutgoingBatch(processInfo, targetNode, currentBatch, isRetry, dataWriter, writer, mode);
                        }
                        processedBatches.add(currentBatch);
                        isProcessed = true;
                        if (currentBatch.getStatus() != Status.OK) {
                            currentBatch.setLoadCount(currentBatch.getLoadCount() + 1);
                            changeBatchStatus(Status.LD, currentBatch, mode);
                        }
                    } catch (ExecutionException e) {
                        if (isNotBlank(e.getMessage()) && e.getMessage().contains("string truncation")) {
                            throw new RuntimeException("There is a good chance that the truncation error you are receiving is because contains_big_lobs on the '" + currentBatch.getChannelId() + "' channel needs to be turned on.", e.getCause() != null ? e.getCause() : e);
                        }
                        throw new RuntimeException(e.getCause() != null ? e.getCause() : e);
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    } catch (TimeoutException e) {
                        writeKeepAliveAck(writer, sourceNode, streamToFileEnabled);
                    }
                }
            }
        } catch (RuntimeException e) {
            SQLException se = unwrapSqlException(e);
            if (currentBatch != null) {
                try {
                    /* Reread batch in case the ignore flag has been set */
                    currentBatch = outgoingBatchService.findOutgoingBatch(currentBatch.getBatchId(), currentBatch.getNodeId());
                    statisticManager.incrementDataExtractedErrors(currentBatch.getChannelId(), 1);
                    if (se != null) {
                        currentBatch.setSqlState(se.getSQLState());
                        currentBatch.setSqlCode(se.getErrorCode());
                        currentBatch.setSqlMessage(se.getMessage());
                    } else {
                        currentBatch.setSqlMessage(getRootMessage(e));
                    }
                    currentBatch.revertStatsOnError();
                    if (currentBatch.getStatus() != Status.IG && currentBatch.getStatus() != Status.OK) {
                        currentBatch.setStatus(Status.ER);
                        currentBatch.setErrorFlag(true);
                    }
                    outgoingBatchService.updateOutgoingBatch(currentBatch);
                } catch (Exception ex) {
                    log.error("Failed to update the outgoing batch status for failed batch {}", currentBatch, ex);
                } finally {
                    if (!isStreamClosedByClient(e)) {
                        if (e instanceof ProtocolException) {
                            IStagedResource resource = getStagedResource(currentBatch);
                            if (resource != null) {
                                resource.delete();
                            }
                        }
                        if (e.getCause() instanceof InterruptedException) {
                            log.info("Extract of batch {} was interrupted", currentBatch);
                        } else {
                            log.error("Failed to extract batch {}", currentBatch, e);
                        }
                    }
                    processInfo.setStatus(ProcessInfo.Status.ERROR);
                }
            } else {
                log.error("Could not log the outgoing batch status because the batch was null", e);
            }
        } finally {
            if (executor != null) {
                executor.shutdown();
            }
        }
        // Next, we update the node channel controls to the
        // current timestamp
        Calendar now = Calendar.getInstance();
        for (String channelProcessed : channelsProcessed) {
            NodeChannel nodeChannel = configurationService.getNodeChannel(channelProcessed, targetNode.getNodeId(), false);
            if (nodeChannel != null && nodeChannel.getExtractPeriodMillis() > 0) {
                nodeChannel.setLastExtractTime(now.getTime());
                configurationService.updateLastExtractTime(nodeChannel);
            }
        }
        return processedBatches;
    } else {
        return Collections.emptyList();
    }
}
Also used : CustomizableThreadFactory(org.jumpmind.util.CustomizableThreadFactory) SQLException(java.sql.SQLException) Node(org.jumpmind.symmetric.model.Node) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) IStagedResource(org.jumpmind.symmetric.io.stage.IStagedResource) OutgoingBatch(org.jumpmind.symmetric.model.OutgoingBatch) ExecutionException(java.util.concurrent.ExecutionException) NodeChannel(org.jumpmind.symmetric.model.NodeChannel) HashSet(java.util.HashSet) TimeoutException(java.util.concurrent.TimeoutException) ProtocolException(org.jumpmind.symmetric.io.data.ProtocolException) Calendar(java.util.Calendar) TransformPoint(org.jumpmind.symmetric.io.data.transform.TransformPoint) CancellationException(java.util.concurrent.CancellationException) SymmetricException(org.jumpmind.symmetric.SymmetricException) SQLException(java.sql.SQLException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) ProtocolException(org.jumpmind.symmetric.io.data.ProtocolException) TimeoutException(java.util.concurrent.TimeoutException) IoException(org.jumpmind.exception.IoException) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future)

Example 3 with IStagedResource

use of org.jumpmind.symmetric.io.stage.IStagedResource in project symmetric-ds by JumpMind.

the class DataExtractorService method sendOutgoingBatch.

protected OutgoingBatch sendOutgoingBatch(ProcessInfo processInfo, Node targetNode, OutgoingBatch currentBatch, boolean isRetry, IDataWriter dataWriter, BufferedWriter writer, ExtractMode mode) {
    if (currentBatch.getStatus() != Status.OK || ExtractMode.EXTRACT_ONLY == mode) {
        currentBatch.setSentCount(currentBatch.getSentCount() + 1);
        long ts = System.currentTimeMillis();
        IStagedResource extractedBatch = getStagedResource(currentBatch);
        if (extractedBatch != null) {
            if (mode == ExtractMode.FOR_SYM_CLIENT && writer != null) {
                if (!isRetry && parameterService.is(ParameterConstants.OUTGOING_BATCH_COPY_TO_INCOMING_STAGING) && !parameterService.is(ParameterConstants.NODE_OFFLINE, false)) {
                    ISymmetricEngine targetEngine = AbstractSymmetricEngine.findEngineByUrl(targetNode.getSyncUrl());
                    if (targetEngine != null && extractedBatch.isFileResource()) {
                        try {
                            Node sourceNode = nodeService.findIdentity();
                            IStagedResource targetResource = targetEngine.getStagingManager().create(Constants.STAGING_CATEGORY_INCOMING, Batch.getStagedLocation(false, sourceNode.getNodeId()), currentBatch.getBatchId());
                            SymmetricUtils.copyFile(extractedBatch.getFile(), targetResource.getFile());
                            targetResource.setState(State.DONE);
                            isRetry = true;
                        } catch (Exception e) {
                            throw new RuntimeException(e);
                        }
                    }
                }
                Channel channel = configurationService.getChannel(currentBatch.getChannelId());
                DataContext ctx = new DataContext();
                transferFromStaging(mode, BatchType.EXTRACT, currentBatch, isRetry, extractedBatch, writer, ctx, channel.getMaxKBytesPerSecond());
            } else {
                IDataReader dataReader = new ProtocolDataReader(BatchType.EXTRACT, currentBatch.getNodeId(), extractedBatch);
                DataContext ctx = new DataContext();
                ctx.put(Constants.DATA_CONTEXT_TARGET_NODE, targetNode);
                ctx.put(Constants.DATA_CONTEXT_SOURCE_NODE, nodeService.findIdentity());
                new DataProcessor(dataReader, new ProcessInfoDataWriter(dataWriter, processInfo), "send from stage").process(ctx);
                if (dataReader.getStatistics().size() > 0) {
                    Statistics stats = dataReader.getStatistics().values().iterator().next();
                    statisticManager.incrementDataSent(currentBatch.getChannelId(), stats.get(DataReaderStatistics.READ_RECORD_COUNT));
                    long byteCount = stats.get(DataReaderStatistics.READ_BYTE_COUNT);
                    statisticManager.incrementDataBytesSent(currentBatch.getChannelId(), byteCount);
                } else {
                    log.warn("Could not find recorded statistics for batch {}", currentBatch.getNodeBatchId());
                }
            }
        } else {
            throw new IllegalStateException(String.format("Could not find the staged resource for batch %s", currentBatch.getNodeBatchId()));
        }
        currentBatch = requeryIfEnoughTimeHasPassed(ts, currentBatch);
    }
    return currentBatch;
}
Also used : IDataReader(org.jumpmind.symmetric.io.data.IDataReader) Node(org.jumpmind.symmetric.model.Node) NodeChannel(org.jumpmind.symmetric.model.NodeChannel) Channel(org.jumpmind.symmetric.model.Channel) ISymmetricEngine(org.jumpmind.symmetric.ISymmetricEngine) DataProcessor(org.jumpmind.symmetric.io.data.DataProcessor) Statistics(org.jumpmind.util.Statistics) DataReaderStatistics(org.jumpmind.symmetric.io.data.reader.DataReaderStatistics) CancellationException(java.util.concurrent.CancellationException) SymmetricException(org.jumpmind.symmetric.SymmetricException) SQLException(java.sql.SQLException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) ProtocolException(org.jumpmind.symmetric.io.data.ProtocolException) TimeoutException(java.util.concurrent.TimeoutException) IoException(org.jumpmind.exception.IoException) DataContext(org.jumpmind.symmetric.io.data.DataContext) IStagedResource(org.jumpmind.symmetric.io.stage.IStagedResource) ProtocolDataReader(org.jumpmind.symmetric.io.data.reader.ProtocolDataReader) ProcessInfoDataWriter(org.jumpmind.symmetric.model.ProcessInfoDataWriter)

Example 4 with IStagedResource

use of org.jumpmind.symmetric.io.stage.IStagedResource in project symmetric-ds by JumpMind.

the class MultiBatchStagingWriter method checkSend.

public void checkSend() {
    if (this.dataExtractorService.parameterService.is(ParameterConstants.INITIAL_LOAD_EXTRACT_AND_SEND_WHEN_STAGED, false) && this.outgoingBatch.getStatus() != Status.OK) {
        IStagedResource resource = this.dataExtractorService.getStagedResource(outgoingBatch);
        if (resource != null) {
            resource.setState(State.DONE);
        }
        OutgoingBatch batchFromDatabase = this.dataExtractorService.outgoingBatchService.findOutgoingBatch(outgoingBatch.getBatchId(), outgoingBatch.getNodeId());
        if (batchFromDatabase.getIgnoreCount() == 0) {
            this.outgoingBatch.setStatus(Status.NE);
        } else {
            cancelled = true;
            throw new CancellationException();
        }
    }
    this.dataExtractorService.outgoingBatchService.updateOutgoingBatch(this.outgoingBatch);
}
Also used : CancellationException(java.util.concurrent.CancellationException) IStagedResource(org.jumpmind.symmetric.io.stage.IStagedResource) OutgoingBatch(org.jumpmind.symmetric.model.OutgoingBatch)

Example 5 with IStagedResource

use of org.jumpmind.symmetric.io.stage.IStagedResource in project symmetric-ds by JumpMind.

the class SimpleStagingDataWriter method process.

public void process() throws IOException {
    String catalogLine = null, schemaLine = null, nodeLine = null, binaryLine = null, channelLine = null;
    TableLine tableLine = null;
    Map<TableLine, TableLine> syncTableLines = new HashMap<TableLine, TableLine>();
    Map<TableLine, TableLine> batchTableLines = new HashMap<TableLine, TableLine>();
    IStagedResource resource = null;
    String line = null;
    long startTime = System.currentTimeMillis(), ts = startTime, lineCount = 0;
    while (reader.readRecord()) {
        line = reader.getRawRecord();
        if (line.startsWith(CsvConstants.CATALOG)) {
            catalogLine = line;
            writeLine(line);
        } else if (line.startsWith(CsvConstants.SCHEMA)) {
            schemaLine = line;
            writeLine(line);
        } else if (line.startsWith(CsvConstants.TABLE)) {
            tableLine = new TableLine(catalogLine, schemaLine, line);
            TableLine batchTableLine = batchTableLines.get(tableLine);
            if (batchTableLine != null) {
                tableLine = batchTableLine;
                writeLine(line);
            } else {
                TableLine syncTableLine = syncTableLines.get(tableLine);
                if (syncTableLine != null) {
                    tableLine = syncTableLine;
                    writeLine(tableLine.catalogLine);
                    writeLine(tableLine.schemaLine);
                    writeLine(line);
                    writeLine(tableLine.keysLine);
                    writeLine(tableLine.columnsLine);
                } else {
                    syncTableLines.put(tableLine, tableLine);
                    batchTableLines.put(tableLine, tableLine);
                    writeLine(line);
                }
            }
        } else if (line.startsWith(CsvConstants.KEYS)) {
            tableLine.keysLine = line;
            writeLine(line);
        } else if (line.startsWith(CsvConstants.COLUMNS)) {
            tableLine.columnsLine = line;
            writeLine(line);
        } else if (line.startsWith(CsvConstants.BATCH)) {
            batch = new Batch(batchType, Long.parseLong(getArgLine(line)), getArgLine(channelLine), getBinaryEncoding(binaryLine), getArgLine(nodeLine), targetNodeId, false);
            String location = batch.getStagedLocation();
            resource = stagingManager.find(category, location, batch.getBatchId());
            if (resource == null || resource.getState() == State.DONE) {
                log.debug("Creating staged resource for batch {}", batch.getNodeBatchId());
                resource = stagingManager.create(category, location, batch.getBatchId());
            }
            writer = resource.getWriter(memoryThresholdInBytes);
            writeLine(nodeLine);
            writeLine(binaryLine);
            writeLine(channelLine);
            writeLine(line);
            if (listeners != null) {
                for (IProtocolDataWriterListener listener : listeners) {
                    listener.start(context, batch);
                }
            }
        } else if (line.startsWith(CsvConstants.COMMIT)) {
            if (writer != null) {
                writeLine(line);
                resource.close();
                resource.setState(State.DONE);
                writer = null;
            }
            batchTableLines.clear();
            if (listeners != null) {
                for (IProtocolDataWriterListener listener : listeners) {
                    listener.end(context, batch, resource);
                }
            }
        } else if (line.startsWith(CsvConstants.RETRY)) {
            batch = new Batch(batchType, Long.parseLong(getArgLine(line)), getArgLine(channelLine), getBinaryEncoding(binaryLine), getArgLine(nodeLine), targetNodeId, false);
            String location = batch.getStagedLocation();
            resource = stagingManager.find(category, location, batch.getBatchId());
            if (resource == null || resource.getState() == State.CREATE) {
                resource = null;
                writer = null;
            }
            if (listeners != null) {
                for (IProtocolDataWriterListener listener : listeners) {
                    listener.start(context, batch);
                }
            }
        } else if (line.startsWith(CsvConstants.NODEID)) {
            nodeLine = line;
        } else if (line.startsWith(CsvConstants.BINARY)) {
            binaryLine = line;
        } else if (line.startsWith(CsvConstants.CHANNEL)) {
            channelLine = line;
        } else {
            if (writer == null) {
                throw new IllegalStateException("Invalid batch data was received: " + line);
            }
            TableLine batchLine = batchTableLines.get(tableLine);
            if (batchLine == null || (batchLine != null && batchLine.columnsLine == null)) {
                TableLine syncLine = syncTableLines.get(tableLine);
                if (syncLine != null) {
                    log.debug("Injecting keys and columns to be backwards compatible");
                    if (batchLine == null) {
                        batchLine = syncLine;
                        batchTableLines.put(batchLine, batchLine);
                        writeLine(batchLine.tableLine);
                    }
                    batchLine.keysLine = syncLine.keysLine;
                    writeLine(syncLine.keysLine);
                    batchLine.columnsLine = syncLine.columnsLine;
                    writeLine(syncLine.columnsLine);
                }
            }
            int size = line.length();
            if (size > MAX_WRITE_LENGTH) {
                log.debug("Exceeded max line length with {}", size);
                for (int i = 0; i < size; i = i + MAX_WRITE_LENGTH) {
                    int end = i + MAX_WRITE_LENGTH;
                    writer.append(line, i, end < size ? end : size);
                }
                writer.append("\n");
            } else {
                writeLine(line);
            }
        }
        lineCount++;
        if (System.currentTimeMillis() - ts > 60000) {
            log.info("Batch '{}', for node '{}', for process 'transfer to stage' has been processing for {} seconds.  The following stats have been gathered: {}", new Object[] { (batch != null ? batch.getBatchId() : 0), (batch != null ? batch.getTargetNodeId() : ""), (System.currentTimeMillis() - startTime) / 1000, "LINES=" + lineCount + ", BYTES=" + ((resource == null) ? 0 : resource.getSize()) });
            ts = System.currentTimeMillis();
        }
    }
}
Also used : HashMap(java.util.HashMap) Batch(org.jumpmind.symmetric.io.data.Batch) IStagedResource(org.jumpmind.symmetric.io.stage.IStagedResource)

Aggregations

IStagedResource (org.jumpmind.symmetric.io.stage.IStagedResource)17 OutgoingBatch (org.jumpmind.symmetric.model.OutgoingBatch)8 IOException (java.io.IOException)4 ArrayList (java.util.ArrayList)4 IoException (org.jumpmind.exception.IoException)4 DataContext (org.jumpmind.symmetric.io.data.DataContext)4 Node (org.jumpmind.symmetric.model.Node)4 SQLException (java.sql.SQLException)3 CancellationException (java.util.concurrent.CancellationException)3 Batch (org.jumpmind.symmetric.io.data.Batch)3 DataProcessor (org.jumpmind.symmetric.io.data.DataProcessor)3 IDataReader (org.jumpmind.symmetric.io.data.IDataReader)3 ProtocolException (org.jumpmind.symmetric.io.data.ProtocolException)3 IStagingManager (org.jumpmind.symmetric.io.stage.IStagingManager)3 Channel (org.jumpmind.symmetric.model.Channel)3 NodeChannel (org.jumpmind.symmetric.model.NodeChannel)3 ProcessInfoDataWriter (org.jumpmind.symmetric.model.ProcessInfoDataWriter)3 Statistics (org.jumpmind.util.Statistics)3 BufferedWriter (java.io.BufferedWriter)2 Calendar (java.util.Calendar)2