Search in sources :

Example 16 with ProcessInfo

use of org.jumpmind.symmetric.model.ProcessInfo in project symmetric-ds by JumpMind.

the class DataGapDetector method beforeRouting.

/**
     * Always make sure sym_data_gap is up to date to make sure that we don't
     * dual route data.
     */
public void beforeRouting() {
    long printStats = System.currentTimeMillis();
    ProcessInfo processInfo = this.statisticManager.newProcessInfo(new ProcessInfoKey(nodeService.findIdentityNodeId(), null, ProcessType.GAP_DETECT));
    try {
        long ts = System.currentTimeMillis();
        processInfo.setStatus(Status.QUERYING);
        final List<DataGap> gaps = dataService.findDataGaps();
        long lastDataId = -1;
        final int dataIdIncrementBy = parameterService.getInt(ParameterConstants.DATA_ID_INCREMENT_BY);
        final long maxDataToSelect = parameterService.getLong(ParameterConstants.ROUTING_LARGEST_GAP_SIZE);
        final long gapTimoutInMs = parameterService.getLong(ParameterConstants.ROUTING_STALE_DATA_ID_GAP_TIME);
        long databaseTime = symmetricDialect.getDatabaseTime();
        int idsFilled = 0;
        int newGapsInserted = 0;
        int rangeChecked = 0;
        int gapsDeleted = 0;
        Set<DataGap> gapCheck = new HashSet<DataGap>(gaps);
        boolean supportsTransactionViews = symmetricDialect.supportsTransactionViews();
        long earliestTransactionTime = 0;
        if (supportsTransactionViews) {
            Date date = symmetricDialect.getEarliestTransactionStartTime();
            if (date != null) {
                earliestTransactionTime = date.getTime() - parameterService.getLong(ParameterConstants.DBDIALECT_ORACLE_TRANSACTION_VIEW_CLOCK_SYNC_THRESHOLD_MS, 60000);
            }
        }
        for (final DataGap dataGap : gaps) {
            final boolean lastGap = dataGap.equals(gaps.get(gaps.size() - 1));
            String sql = routerService.getSql("selectDistinctDataIdFromDataEventUsingGapsSql");
            ISqlTemplate sqlTemplate = symmetricDialect.getPlatform().getSqlTemplate();
            Object[] params = new Object[] { dataGap.getStartId(), dataGap.getEndId() };
            lastDataId = -1;
            processInfo.setStatus(Status.QUERYING);
            long queryForIdsTs = System.currentTimeMillis();
            List<Number> ids = sqlTemplate.query(sql, new NumberMapper(), params);
            if (System.currentTimeMillis() - queryForIdsTs > Constants.LONG_OPERATION_THRESHOLD) {
                log.info("It took longer than {}ms to run the following sql for gap from {} to {}.  {}", new Object[] { Constants.LONG_OPERATION_THRESHOLD, dataGap.getStartId(), dataGap.getEndId(), sql });
            }
            processInfo.setStatus(Status.PROCESSING);
            idsFilled += ids.size();
            rangeChecked += dataGap.getEndId() - dataGap.getStartId();
            ISqlTransaction transaction = null;
            try {
                transaction = sqlTemplate.startSqlTransaction();
                for (Number number : ids) {
                    long dataId = number.longValue();
                    processInfo.incrementCurrentDataCount();
                    if (lastDataId == -1 && dataGap.getStartId() + dataIdIncrementBy <= dataId) {
                        // there was a new gap at the start
                        DataGap newGap = new DataGap(dataGap.getStartId(), dataId - 1);
                        if (!gapCheck.contains(newGap)) {
                            dataService.insertDataGap(transaction, newGap);
                            gapCheck.add(newGap);
                        }
                        newGapsInserted++;
                    } else if (lastDataId != -1 && lastDataId + dataIdIncrementBy != dataId && lastDataId != dataId) {
                        // found a gap somewhere in the existing gap
                        DataGap newGap = new DataGap(lastDataId + 1, dataId - 1);
                        if (!gapCheck.contains(newGap)) {
                            dataService.insertDataGap(transaction, newGap);
                            gapCheck.add(newGap);
                        }
                        newGapsInserted++;
                    }
                    lastDataId = dataId;
                }
                // if we found data in the gap
                if (lastDataId != -1) {
                    if (!lastGap && lastDataId + dataIdIncrementBy <= dataGap.getEndId()) {
                        DataGap newGap = new DataGap(lastDataId + dataIdIncrementBy, dataGap.getEndId());
                        if (!gapCheck.contains(newGap)) {
                            dataService.insertDataGap(transaction, newGap);
                            gapCheck.add(newGap);
                        }
                        newGapsInserted++;
                    }
                    dataService.deleteDataGap(transaction, dataGap);
                    gapsDeleted++;
                // if we did not find data in the gap and it was not the
                // last gap
                } else if (!lastGap) {
                    Date createTime = dataGap.getCreateTime();
                    if (supportsTransactionViews) {
                        if (createTime != null && (createTime.getTime() < earliestTransactionTime || earliestTransactionTime == 0)) {
                            if (dataService.countDataInRange(dataGap.getStartId() - 1, dataGap.getEndId() + 1) == 0) {
                                if (dataGap.getStartId() == dataGap.getEndId()) {
                                    log.info("Found a gap in data_id at {}.  Skipping it because there are no pending transactions in the database", dataGap.getStartId());
                                } else {
                                    log.info("Found a gap in data_id from {} to {}.  Skipping it because there are no pending transactions in the database", dataGap.getStartId(), dataGap.getEndId());
                                }
                                dataService.deleteDataGap(transaction, dataGap);
                                gapsDeleted++;
                            }
                        }
                    } else if (createTime != null && databaseTime - createTime.getTime() > gapTimoutInMs) {
                        if (dataService.countDataInRange(dataGap.getStartId() - 1, dataGap.getEndId() + 1) == 0) {
                            if (dataGap.getStartId() == dataGap.getEndId()) {
                                log.info("Found a gap in data_id at {}.  Skipping it because the gap expired", dataGap.getStartId());
                            } else {
                                log.info("Found a gap in data_id from {} to {}.  Skipping it because the gap expired", dataGap.getStartId(), dataGap.getEndId());
                            }
                            dataService.deleteDataGap(transaction, dataGap);
                            gapsDeleted++;
                        }
                    }
                }
                if (System.currentTimeMillis() - printStats > 30000) {
                    log.info("The data gap detection process has been running for {}ms, detected {} rows that have been previously routed over a total gap range of {}, " + "inserted {} new gaps, and deleted {} gaps", new Object[] { System.currentTimeMillis() - ts, idsFilled, rangeChecked, newGapsInserted, gapsDeleted });
                    printStats = System.currentTimeMillis();
                }
                transaction.commit();
            } catch (Error ex) {
                if (transaction != null) {
                    transaction.rollback();
                }
                throw ex;
            } catch (RuntimeException ex) {
                if (transaction != null) {
                    transaction.rollback();
                }
                throw ex;
            } finally {
                if (transaction != null) {
                    transaction.close();
                }
            }
        }
        if (lastDataId != -1) {
            DataGap newGap = new DataGap(lastDataId + 1, lastDataId + maxDataToSelect);
            if (!gapCheck.contains(newGap)) {
                dataService.insertDataGap(newGap);
                gapCheck.add(newGap);
            }
        }
        long updateTimeInMs = System.currentTimeMillis() - ts;
        if (updateTimeInMs > 10000) {
            log.info("Detecting gaps took {} ms", updateTimeInMs);
        }
        processInfo.setStatus(Status.OK);
    } catch (RuntimeException ex) {
        processInfo.setStatus(Status.ERROR);
        throw ex;
    }
}
Also used : NumberMapper(org.jumpmind.db.sql.mapper.NumberMapper) ProcessInfoKey(org.jumpmind.symmetric.model.ProcessInfoKey) ProcessInfo(org.jumpmind.symmetric.model.ProcessInfo) Date(java.util.Date) DataGap(org.jumpmind.symmetric.model.DataGap) ISqlTemplate(org.jumpmind.db.sql.ISqlTemplate) ISqlTransaction(org.jumpmind.db.sql.ISqlTransaction) HashSet(java.util.HashSet)

Example 17 with ProcessInfo

use of org.jumpmind.symmetric.model.ProcessInfo in project symmetric-ds by JumpMind.

the class DataLoaderService method loadDataFromOfflineTransport.

public List<IncomingBatch> loadDataFromOfflineTransport(Node remote, RemoteNodeStatus status, IIncomingTransport transport) throws IOException {
    Node local = nodeService.findIdentity();
    ProcessInfo processInfo = statisticManager.newProcessInfo(new ProcessInfoKey(remote.getNodeId(), local.getNodeId(), ProcessType.OFFLINE_PULL));
    List<IncomingBatch> list = null;
    try {
        list = loadDataFromTransport(processInfo, remote, transport, null);
        if (list.size() > 0) {
            processInfo.setStatus(ProcessInfo.Status.ACKING);
            status.updateIncomingStatus(list);
        }
        if (containsError(list)) {
            processInfo.setStatus(ProcessInfo.Status.ERROR);
        } else {
            processInfo.setStatus(ProcessInfo.Status.OK);
        }
    } catch (RuntimeException e) {
        processInfo.setStatus(ProcessInfo.Status.ERROR);
        throw e;
    } catch (IOException e) {
        processInfo.setStatus(ProcessInfo.Status.ERROR);
        throw e;
    }
    return list;
}
Also used : Node(org.jumpmind.symmetric.model.Node) ProcessInfoKey(org.jumpmind.symmetric.model.ProcessInfoKey) ProcessInfo(org.jumpmind.symmetric.model.ProcessInfo) IOException(java.io.IOException) IncomingBatch(org.jumpmind.symmetric.model.IncomingBatch)

Example 18 with ProcessInfo

use of org.jumpmind.symmetric.model.ProcessInfo in project symmetric-ds by JumpMind.

the class DataExtractorService method execute.

/**
     * This is a callback method used by the NodeCommunicationService that extracts an initial load
     * in the background.
     */
public void execute(NodeCommunication nodeCommunication, RemoteNodeStatus status) {
    if (!isApplicable(nodeCommunication, status)) {
        log.debug("{} failed isApplicable check and will not run.", this);
        return;
    }
    List<ExtractRequest> requests = getExtractRequestsForNode(nodeCommunication);
    long ts = System.currentTimeMillis();
    /*
         * Process extract requests until it has taken longer than 30 seconds, and then
         * allow the process to return so progress status can be seen.
         */
    for (int i = 0; i < requests.size() && (System.currentTimeMillis() - ts) <= Constants.LONG_OPERATION_THRESHOLD; i++) {
        ExtractRequest request = requests.get(i);
        Node identity = nodeService.findIdentity();
        Node targetNode = nodeService.findNode(nodeCommunication.getNodeId());
        log.info("Extracting batches for request {}. Starting at batch {}.  Ending at batch {}", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
        List<OutgoingBatch> batches = outgoingBatchService.getOutgoingBatchRange(request.getStartBatchId(), request.getEndBatchId()).getBatches();
        ProcessInfo processInfo = statisticManager.newProcessInfo(new ProcessInfoKey(identity.getNodeId(), nodeCommunication.getQueue(), nodeCommunication.getNodeId(), getProcessType()));
        processInfo.setBatchCount(batches.size());
        try {
            boolean areBatchesOk = true;
            /*
                 * check to see if batches have been OK'd by another reload
                 * request 
                 */
            for (OutgoingBatch outgoingBatch : batches) {
                if (outgoingBatch.getStatus() != Status.OK) {
                    areBatchesOk = false;
                    break;
                }
            }
            if (!areBatchesOk) {
                Channel channel = configurationService.getChannel(batches.get(0).getChannelId());
                /*
                     * "Trick" the extractor to extract one reload batch, but we
                     * will split it across the N batches when writing it
                     */
                OutgoingBatch firstBatch = batches.get(0);
                processInfo.setCurrentLoadId(firstBatch.getLoadId());
                IStagedResource resource = getStagedResource(firstBatch);
                if (resource != null && resource.exists() && resource.getState() != State.CREATE) {
                    resource.delete();
                }
                MultiBatchStagingWriter multiBatchStatingWriter = buildMultiBatchStagingWriter(request, identity, targetNode, batches, processInfo, channel);
                extractOutgoingBatch(processInfo, targetNode, multiBatchStatingWriter, firstBatch, false, false, ExtractMode.FOR_SYM_CLIENT);
                for (OutgoingBatch outgoingBatch : batches) {
                    resource = getStagedResource(outgoingBatch);
                    if (resource != null) {
                        resource.setState(State.DONE);
                    }
                }
            } else {
                log.info("Batches already had an OK status for request {}, batches {} to {}.  Not extracting", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
            }
            /*
                 * re-query the batches to see if they have been OK'd while
                 * extracting
                 */
            List<OutgoingBatch> checkBatches = outgoingBatchService.getOutgoingBatchRange(request.getStartBatchId(), request.getEndBatchId()).getBatches();
            areBatchesOk = true;
            /*
                 * check to see if batches have been OK'd by another reload
                 * request while extracting
                 */
            for (OutgoingBatch outgoingBatch : checkBatches) {
                if (outgoingBatch.getStatus() != Status.OK) {
                    areBatchesOk = false;
                    break;
                }
            }
            ISqlTransaction transaction = null;
            try {
                transaction = sqlTemplate.startSqlTransaction();
                updateExtractRequestStatus(transaction, request.getRequestId(), ExtractStatus.OK);
                if (!areBatchesOk) {
                    for (OutgoingBatch outgoingBatch : batches) {
                        if (!parameterService.is(ParameterConstants.INITIAL_LOAD_EXTRACT_AND_SEND_WHEN_STAGED, false)) {
                            outgoingBatch.setStatus(Status.NE);
                            outgoingBatchService.updateOutgoingBatch(transaction, outgoingBatch);
                        }
                    }
                } else {
                    log.info("Batches already had an OK status for request {}, batches {} to {}.  Not updating the status to NE", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
                }
                transaction.commit();
                log.info("Done extracting {} batches for request {}", (request.getEndBatchId() - request.getStartBatchId()) + 1, request.getRequestId());
            } catch (Error ex) {
                if (transaction != null) {
                    transaction.rollback();
                }
                throw ex;
            } catch (RuntimeException ex) {
                if (transaction != null) {
                    transaction.rollback();
                }
                throw ex;
            } finally {
                close(transaction);
            }
            processInfo.setStatus(org.jumpmind.symmetric.model.ProcessInfo.Status.OK);
        } catch (CancellationException ex) {
            log.info("Cancelled extract request {}. Starting at batch {}.  Ending at batch {}", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
            processInfo.setStatus(org.jumpmind.symmetric.model.ProcessInfo.Status.OK);
        } catch (RuntimeException ex) {
            log.debug("Failed to extract batches for request {}. Starting at batch {}.  Ending at batch {}", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
            processInfo.setStatus(org.jumpmind.symmetric.model.ProcessInfo.Status.ERROR);
            throw ex;
        }
    }
}
Also used : Node(org.jumpmind.symmetric.model.Node) NodeChannel(org.jumpmind.symmetric.model.NodeChannel) Channel(org.jumpmind.symmetric.model.Channel) ProcessInfoKey(org.jumpmind.symmetric.model.ProcessInfoKey) ProcessInfo(org.jumpmind.symmetric.model.ProcessInfo) ExtractRequest(org.jumpmind.symmetric.model.ExtractRequest) TransformPoint(org.jumpmind.symmetric.io.data.transform.TransformPoint) ISqlTransaction(org.jumpmind.db.sql.ISqlTransaction) CancellationException(java.util.concurrent.CancellationException) IStagedResource(org.jumpmind.symmetric.io.stage.IStagedResource) OutgoingBatch(org.jumpmind.symmetric.model.OutgoingBatch)

Example 19 with ProcessInfo

use of org.jumpmind.symmetric.model.ProcessInfo in project symmetric-ds by JumpMind.

the class DataExtractorService method extractBatchRange.

public boolean extractBatchRange(Writer writer, String nodeId, long startBatchId, long endBatchId) {
    boolean foundBatch = false;
    Node sourceNode = nodeService.findIdentity();
    for (long batchId = startBatchId; batchId <= endBatchId; batchId++) {
        OutgoingBatch batch = outgoingBatchService.findOutgoingBatch(batchId, nodeId);
        if (batch != null) {
            Node targetNode = nodeService.findNode(nodeId);
            if (targetNode == null && Constants.UNROUTED_NODE_ID.equals(nodeId)) {
                targetNode = new Node();
                targetNode.setNodeId("-1");
            }
            if (targetNode != null) {
                IDataReader dataReader = new ExtractDataReader(symmetricDialect.getPlatform(), new SelectFromSymDataSource(batch, sourceNode, targetNode, new ProcessInfo()));
                DataContext ctx = new DataContext();
                ctx.put(Constants.DATA_CONTEXT_TARGET_NODE, targetNode);
                ctx.put(Constants.DATA_CONTEXT_SOURCE_NODE, nodeService.findIdentity());
                new DataProcessor(dataReader, createTransformDataWriter(nodeService.findIdentity(), targetNode, new ProtocolDataWriter(nodeService.findIdentityNodeId(), writer, targetNode.requires13Compatiblity())), "extract range").process(ctx);
                foundBatch = true;
            }
        }
    }
    return foundBatch;
}
Also used : IDataReader(org.jumpmind.symmetric.io.data.IDataReader) DataContext(org.jumpmind.symmetric.io.data.DataContext) ProtocolDataWriter(org.jumpmind.symmetric.io.data.writer.ProtocolDataWriter) Node(org.jumpmind.symmetric.model.Node) OutgoingBatch(org.jumpmind.symmetric.model.OutgoingBatch) ProcessInfo(org.jumpmind.symmetric.model.ProcessInfo) DataProcessor(org.jumpmind.symmetric.io.data.DataProcessor) ExtractDataReader(org.jumpmind.symmetric.io.data.reader.ExtractDataReader)

Example 20 with ProcessInfo

use of org.jumpmind.symmetric.model.ProcessInfo in project symmetric-ds by JumpMind.

the class DataLoaderService method loadDataFromPull.

public void loadDataFromPull(Node remote, RemoteNodeStatus status) throws IOException {
    Node local = nodeService.findIdentity();
    if (local == null) {
        local = new Node(this.parameterService, symmetricDialect);
    }
    try {
        NodeSecurity localSecurity = nodeService.findNodeSecurity(local.getNodeId(), true);
        IIncomingTransport transport = null;
        boolean isRegisterTransport = false;
        if (remote != null && localSecurity != null) {
            Map<String, String> requestProperties = new HashMap<String, String>();
            ChannelMap suspendIgnoreChannels = configurationService.getSuspendIgnoreChannelLists();
            requestProperties.put(WebConstants.SUSPENDED_CHANNELS, suspendIgnoreChannels.getSuspendChannelsAsString());
            requestProperties.put(WebConstants.IGNORED_CHANNELS, suspendIgnoreChannels.getIgnoreChannelsAsString());
            requestProperties.put(WebConstants.THREAD_CHANNEL, status.getChannelId());
            transport = transportManager.getPullTransport(remote, local, localSecurity.getNodePassword(), requestProperties, parameterService.getRegistrationUrl());
        } else {
            transport = transportManager.getRegisterTransport(local, parameterService.getRegistrationUrl());
            log.info("Using registration URL of {}", transport.getUrl());
            List<INodeRegistrationListener> registrationListeners = extensionService.getExtensionPointList(INodeRegistrationListener.class);
            for (INodeRegistrationListener l : registrationListeners) {
                l.registrationUrlUpdated(transport.getUrl());
            }
            remote = new Node();
            remote.setSyncUrl(parameterService.getRegistrationUrl());
            isRegisterTransport = true;
        }
        ProcessInfo processInfo = statisticManager.newProcessInfo(new ProcessInfoKey(remote.getNodeId(), status.getChannelId(), local.getNodeId(), ProcessType.PULL_JOB));
        try {
            List<IncomingBatch> list = loadDataFromTransport(processInfo, remote, transport, null);
            if (list.size() > 0) {
                processInfo.setStatus(ProcessInfo.Status.ACKING);
                status.updateIncomingStatus(list);
                local = nodeService.findIdentity();
                if (local != null) {
                    localSecurity = nodeService.findNodeSecurity(local.getNodeId(), !isRegisterTransport);
                    if (StringUtils.isNotBlank(transport.getRedirectionUrl())) {
                        /*
                             * We were redirected for the pull, we need to
                             * redirect for the ack
                             */
                        String url = transport.getRedirectionUrl();
                        int index = url.indexOf("/registration?");
                        if (index >= 0) {
                            url = url.substring(0, index);
                        }
                        log.info("Setting the sync url for ack to: {}", url);
                        remote.setSyncUrl(url);
                    }
                    sendAck(remote, local, localSecurity, list, transportManager);
                }
            }
            if (containsError(list)) {
                processInfo.setStatus(ProcessInfo.Status.ERROR);
            } else {
                processInfo.setStatus(ProcessInfo.Status.OK);
            }
            updateBatchToSendCount(remote, transport);
        } catch (RuntimeException e) {
            processInfo.setStatus(ProcessInfo.Status.ERROR);
            throw e;
        } catch (IOException e) {
            processInfo.setStatus(ProcessInfo.Status.ERROR);
            throw e;
        }
    } catch (RegistrationRequiredException e) {
        if (StringUtils.isBlank(remote.getSyncUrl()) || remote.getSyncUrl().equals(parameterService.getRegistrationUrl())) {
            log.warn("Node information missing on the server.  Attempting to re-register remote.getSyncUrl()={}", remote.getSyncUrl());
            loadDataFromPull(null, status);
            nodeService.findIdentity(false);
        } else {
            log.warn("Failed to pull data from node '{}'. It probably is missing a node security record for '{}'.", remote.getNodeId(), local.getNodeId());
        }
    } catch (MalformedURLException e) {
        if (remote != null) {
            log.error("Could not connect to the {} node's transport because of a bad URL: '{}' {}", remote.getNodeId(), remote.getSyncUrl(), e);
        } else {
            log.error("", e);
        }
        throw e;
    }
}
Also used : ChannelMap(org.jumpmind.symmetric.model.ChannelMap) MalformedURLException(java.net.MalformedURLException) NodeSecurity(org.jumpmind.symmetric.model.NodeSecurity) HashMap(java.util.HashMap) Node(org.jumpmind.symmetric.model.Node) ProcessInfoKey(org.jumpmind.symmetric.model.ProcessInfoKey) ProcessInfo(org.jumpmind.symmetric.model.ProcessInfo) IOException(java.io.IOException) IncomingBatch(org.jumpmind.symmetric.model.IncomingBatch) TransformPoint(org.jumpmind.symmetric.io.data.transform.TransformPoint) IIncomingTransport(org.jumpmind.symmetric.transport.IIncomingTransport) INodeRegistrationListener(org.jumpmind.symmetric.ext.INodeRegistrationListener) RegistrationRequiredException(org.jumpmind.symmetric.service.RegistrationRequiredException)

Aggregations

ProcessInfo (org.jumpmind.symmetric.model.ProcessInfo)32 ProcessInfoKey (org.jumpmind.symmetric.model.ProcessInfoKey)22 Node (org.jumpmind.symmetric.model.Node)18 IOException (java.io.IOException)10 OutgoingBatch (org.jumpmind.symmetric.model.OutgoingBatch)10 IncomingBatch (org.jumpmind.symmetric.model.IncomingBatch)8 NodeSecurity (org.jumpmind.symmetric.model.NodeSecurity)7 ArrayList (java.util.ArrayList)5 IoException (org.jumpmind.exception.IoException)5 ISymmetricEngine (org.jumpmind.symmetric.ISymmetricEngine)5 SymmetricException (org.jumpmind.symmetric.SymmetricException)5 INodeService (org.jumpmind.symmetric.service.INodeService)5 MalformedURLException (java.net.MalformedURLException)4 ISqlTransaction (org.jumpmind.db.sql.ISqlTransaction)4 Date (java.util.Date)3 ChannelMap (org.jumpmind.symmetric.model.ChannelMap)3 IOutgoingTransport (org.jumpmind.symmetric.transport.IOutgoingTransport)3 InputStream (java.io.InputStream)2 OutputStream (java.io.OutputStream)2 PipedInputStream (java.io.PipedInputStream)2