use of org.jumpmind.symmetric.model.ProcessInfoKey in project symmetric-ds by JumpMind.
the class DataGapRouteReader method execute.
protected void execute() {
long maxPeekAheadSizeInBytes = (long) (Runtime.getRuntime().maxMemory() * percentOfHeapToUse);
ISymmetricDialect symmetricDialect = engine.getSymmetricDialect();
ISqlReadCursor<Data> cursor = null;
processInfo = engine.getStatisticManager().newProcessInfo(new ProcessInfoKey(engine.getNodeService().findIdentityNodeId(), null, ProcessType.ROUTER_READER));
processInfo.setCurrentChannelId(context.getChannel().getChannelId());
try {
int lastPeekAheadIndex = 0;
int dataCount = 0;
long maxDataToRoute = context.getChannel().getMaxDataToRoute();
List<Data> peekAheadQueue = new ArrayList<Data>(peekAheadCount);
boolean transactional = !context.getChannel().getBatchAlgorithm().equals(NonTransactionalBatchAlgorithm.NAME) || !symmetricDialect.supportsTransactionId();
processInfo.setStatus(Status.QUERYING);
cursor = prepareCursor();
processInfo.setStatus(Status.EXTRACTING);
boolean moreData = true;
while (dataCount < maxDataToRoute || (lastTransactionId != null && transactional)) {
if (moreData && (lastTransactionId != null || peekAheadQueue.size() == 0)) {
moreData = fillPeekAheadQueue(peekAheadQueue, peekAheadCount, cursor);
}
int dataWithSameTransactionIdCount = 0;
while (peekAheadQueue.size() > 0 && lastTransactionId == null && dataCount < maxDataToRoute) {
Data data = peekAheadQueue.remove(0);
copyToQueue(data);
dataCount++;
processInfo.incrementCurrentDataCount();
processInfo.setCurrentTableName(data.getTableName());
lastTransactionId = data.getTransactionId();
context.addTransaction(lastTransactionId);
dataWithSameTransactionIdCount++;
}
if (lastTransactionId != null && peekAheadQueue.size() > 0) {
Iterator<Data> datas = peekAheadQueue.iterator();
int index = 0;
while (datas.hasNext() && (dataCount < maxDataToRoute || transactional)) {
Data data = datas.next();
if (lastTransactionId.equals(data.getTransactionId())) {
dataWithSameTransactionIdCount++;
datas.remove();
copyToQueue(data);
dataCount++;
processInfo.incrementCurrentDataCount();
processInfo.setCurrentTableName(data.getTableName());
lastPeekAheadIndex = index;
} else {
context.addTransaction(data.getTransactionId());
index++;
}
}
if (dataWithSameTransactionIdCount == 0 || peekAheadQueue.size() - lastPeekAheadIndex > peekAheadCount) {
lastTransactionId = null;
lastPeekAheadIndex = 0;
}
}
if (!moreData && peekAheadQueue.size() == 0) {
// we've reached the end of the result set
break;
} else if (peekAheadSizeInBytes >= maxPeekAheadSizeInBytes) {
log.info("The peek ahead queue has reached its max size of {} bytes. Finishing reading the current transaction", peekAheadSizeInBytes);
finishTransactionMode = true;
peekAheadQueue.clear();
}
}
processInfo.setStatus(Status.OK);
} catch (Throwable ex) {
processInfo.setStatus(Status.ERROR);
String msg = "";
if (engine.getDatabasePlatform().getName().startsWith(DatabaseNamesConstants.FIREBIRD) && isNotBlank(ex.getMessage()) && ex.getMessage().contains("arithmetic exception, numeric overflow, or string truncation")) {
msg = "There is a good chance that the truncation error you are receiving is because contains_big_lobs on the '" + context.getChannel().getChannelId() + "' channel needs to be turned on. Firebird casts to varchar when this setting is not turned on and the data length has most likely exceeded the 10k row size";
}
log.error(msg, ex);
} finally {
if (cursor != null) {
cursor.close();
}
copyToQueue(new EOD());
reading = false;
}
}
use of org.jumpmind.symmetric.model.ProcessInfoKey in project symmetric-ds by JumpMind.
the class DataGapDetector method beforeRouting.
/**
* Always make sure sym_data_gap is up to date to make sure that we don't
* dual route data.
*/
public void beforeRouting() {
long printStats = System.currentTimeMillis();
ProcessInfo processInfo = this.statisticManager.newProcessInfo(new ProcessInfoKey(nodeService.findIdentityNodeId(), null, ProcessType.GAP_DETECT));
try {
long ts = System.currentTimeMillis();
processInfo.setStatus(Status.QUERYING);
final List<DataGap> gaps = dataService.findDataGaps();
long lastDataId = -1;
final int dataIdIncrementBy = parameterService.getInt(ParameterConstants.DATA_ID_INCREMENT_BY);
final long maxDataToSelect = parameterService.getLong(ParameterConstants.ROUTING_LARGEST_GAP_SIZE);
final long gapTimoutInMs = parameterService.getLong(ParameterConstants.ROUTING_STALE_DATA_ID_GAP_TIME);
long databaseTime = symmetricDialect.getDatabaseTime();
int idsFilled = 0;
int newGapsInserted = 0;
int rangeChecked = 0;
int gapsDeleted = 0;
Set<DataGap> gapCheck = new HashSet<DataGap>(gaps);
boolean supportsTransactionViews = symmetricDialect.supportsTransactionViews();
long earliestTransactionTime = 0;
if (supportsTransactionViews) {
Date date = symmetricDialect.getEarliestTransactionStartTime();
if (date != null) {
earliestTransactionTime = date.getTime() - parameterService.getLong(ParameterConstants.DBDIALECT_ORACLE_TRANSACTION_VIEW_CLOCK_SYNC_THRESHOLD_MS, 60000);
}
}
for (final DataGap dataGap : gaps) {
final boolean lastGap = dataGap.equals(gaps.get(gaps.size() - 1));
String sql = routerService.getSql("selectDistinctDataIdFromDataEventUsingGapsSql");
ISqlTemplate sqlTemplate = symmetricDialect.getPlatform().getSqlTemplate();
Object[] params = new Object[] { dataGap.getStartId(), dataGap.getEndId() };
lastDataId = -1;
processInfo.setStatus(Status.QUERYING);
long queryForIdsTs = System.currentTimeMillis();
List<Number> ids = sqlTemplate.query(sql, new NumberMapper(), params);
if (System.currentTimeMillis() - queryForIdsTs > Constants.LONG_OPERATION_THRESHOLD) {
log.info("It took longer than {}ms to run the following sql for gap from {} to {}. {}", new Object[] { Constants.LONG_OPERATION_THRESHOLD, dataGap.getStartId(), dataGap.getEndId(), sql });
}
processInfo.setStatus(Status.PROCESSING);
idsFilled += ids.size();
rangeChecked += dataGap.getEndId() - dataGap.getStartId();
ISqlTransaction transaction = null;
try {
transaction = sqlTemplate.startSqlTransaction();
for (Number number : ids) {
long dataId = number.longValue();
processInfo.incrementCurrentDataCount();
if (lastDataId == -1 && dataGap.getStartId() + dataIdIncrementBy <= dataId) {
// there was a new gap at the start
DataGap newGap = new DataGap(dataGap.getStartId(), dataId - 1);
if (!gapCheck.contains(newGap)) {
dataService.insertDataGap(transaction, newGap);
gapCheck.add(newGap);
}
newGapsInserted++;
} else if (lastDataId != -1 && lastDataId + dataIdIncrementBy != dataId && lastDataId != dataId) {
// found a gap somewhere in the existing gap
DataGap newGap = new DataGap(lastDataId + 1, dataId - 1);
if (!gapCheck.contains(newGap)) {
dataService.insertDataGap(transaction, newGap);
gapCheck.add(newGap);
}
newGapsInserted++;
}
lastDataId = dataId;
}
// if we found data in the gap
if (lastDataId != -1) {
if (!lastGap && lastDataId + dataIdIncrementBy <= dataGap.getEndId()) {
DataGap newGap = new DataGap(lastDataId + dataIdIncrementBy, dataGap.getEndId());
if (!gapCheck.contains(newGap)) {
dataService.insertDataGap(transaction, newGap);
gapCheck.add(newGap);
}
newGapsInserted++;
}
dataService.deleteDataGap(transaction, dataGap);
gapsDeleted++;
// if we did not find data in the gap and it was not the
// last gap
} else if (!lastGap) {
Date createTime = dataGap.getCreateTime();
if (supportsTransactionViews) {
if (createTime != null && (createTime.getTime() < earliestTransactionTime || earliestTransactionTime == 0)) {
if (dataService.countDataInRange(dataGap.getStartId() - 1, dataGap.getEndId() + 1) == 0) {
if (dataGap.getStartId() == dataGap.getEndId()) {
log.info("Found a gap in data_id at {}. Skipping it because there are no pending transactions in the database", dataGap.getStartId());
} else {
log.info("Found a gap in data_id from {} to {}. Skipping it because there are no pending transactions in the database", dataGap.getStartId(), dataGap.getEndId());
}
dataService.deleteDataGap(transaction, dataGap);
gapsDeleted++;
}
}
} else if (createTime != null && databaseTime - createTime.getTime() > gapTimoutInMs) {
if (dataService.countDataInRange(dataGap.getStartId() - 1, dataGap.getEndId() + 1) == 0) {
if (dataGap.getStartId() == dataGap.getEndId()) {
log.info("Found a gap in data_id at {}. Skipping it because the gap expired", dataGap.getStartId());
} else {
log.info("Found a gap in data_id from {} to {}. Skipping it because the gap expired", dataGap.getStartId(), dataGap.getEndId());
}
dataService.deleteDataGap(transaction, dataGap);
gapsDeleted++;
}
}
}
if (System.currentTimeMillis() - printStats > 30000) {
log.info("The data gap detection process has been running for {}ms, detected {} rows that have been previously routed over a total gap range of {}, " + "inserted {} new gaps, and deleted {} gaps", new Object[] { System.currentTimeMillis() - ts, idsFilled, rangeChecked, newGapsInserted, gapsDeleted });
printStats = System.currentTimeMillis();
}
transaction.commit();
} catch (Error ex) {
if (transaction != null) {
transaction.rollback();
}
throw ex;
} catch (RuntimeException ex) {
if (transaction != null) {
transaction.rollback();
}
throw ex;
} finally {
if (transaction != null) {
transaction.close();
}
}
}
if (lastDataId != -1) {
DataGap newGap = new DataGap(lastDataId + 1, lastDataId + maxDataToSelect);
if (!gapCheck.contains(newGap)) {
dataService.insertDataGap(newGap);
gapCheck.add(newGap);
}
}
long updateTimeInMs = System.currentTimeMillis() - ts;
if (updateTimeInMs > 10000) {
log.info("Detecting gaps took {} ms", updateTimeInMs);
}
processInfo.setStatus(Status.OK);
} catch (RuntimeException ex) {
processInfo.setStatus(Status.ERROR);
throw ex;
}
}
use of org.jumpmind.symmetric.model.ProcessInfoKey in project symmetric-ds by JumpMind.
the class DataLoaderService method loadDataFromOfflineTransport.
public List<IncomingBatch> loadDataFromOfflineTransport(Node remote, RemoteNodeStatus status, IIncomingTransport transport) throws IOException {
Node local = nodeService.findIdentity();
ProcessInfo processInfo = statisticManager.newProcessInfo(new ProcessInfoKey(remote.getNodeId(), local.getNodeId(), ProcessType.OFFLINE_PULL));
List<IncomingBatch> list = null;
try {
list = loadDataFromTransport(processInfo, remote, transport, null);
if (list.size() > 0) {
processInfo.setStatus(ProcessInfo.Status.ACKING);
status.updateIncomingStatus(list);
}
if (containsError(list)) {
processInfo.setStatus(ProcessInfo.Status.ERROR);
} else {
processInfo.setStatus(ProcessInfo.Status.OK);
}
} catch (RuntimeException e) {
processInfo.setStatus(ProcessInfo.Status.ERROR);
throw e;
} catch (IOException e) {
processInfo.setStatus(ProcessInfo.Status.ERROR);
throw e;
}
return list;
}
use of org.jumpmind.symmetric.model.ProcessInfoKey in project symmetric-ds by JumpMind.
the class DataExtractorService method execute.
/**
* This is a callback method used by the NodeCommunicationService that extracts an initial load
* in the background.
*/
public void execute(NodeCommunication nodeCommunication, RemoteNodeStatus status) {
if (!isApplicable(nodeCommunication, status)) {
log.debug("{} failed isApplicable check and will not run.", this);
return;
}
List<ExtractRequest> requests = getExtractRequestsForNode(nodeCommunication);
long ts = System.currentTimeMillis();
/*
* Process extract requests until it has taken longer than 30 seconds, and then
* allow the process to return so progress status can be seen.
*/
for (int i = 0; i < requests.size() && (System.currentTimeMillis() - ts) <= Constants.LONG_OPERATION_THRESHOLD; i++) {
ExtractRequest request = requests.get(i);
Node identity = nodeService.findIdentity();
Node targetNode = nodeService.findNode(nodeCommunication.getNodeId());
log.info("Extracting batches for request {}. Starting at batch {}. Ending at batch {}", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
List<OutgoingBatch> batches = outgoingBatchService.getOutgoingBatchRange(request.getStartBatchId(), request.getEndBatchId()).getBatches();
ProcessInfo processInfo = statisticManager.newProcessInfo(new ProcessInfoKey(identity.getNodeId(), nodeCommunication.getQueue(), nodeCommunication.getNodeId(), getProcessType()));
processInfo.setBatchCount(batches.size());
try {
boolean areBatchesOk = true;
/*
* check to see if batches have been OK'd by another reload
* request
*/
for (OutgoingBatch outgoingBatch : batches) {
if (outgoingBatch.getStatus() != Status.OK) {
areBatchesOk = false;
break;
}
}
if (!areBatchesOk) {
Channel channel = configurationService.getChannel(batches.get(0).getChannelId());
/*
* "Trick" the extractor to extract one reload batch, but we
* will split it across the N batches when writing it
*/
OutgoingBatch firstBatch = batches.get(0);
processInfo.setCurrentLoadId(firstBatch.getLoadId());
IStagedResource resource = getStagedResource(firstBatch);
if (resource != null && resource.exists() && resource.getState() != State.CREATE) {
resource.delete();
}
MultiBatchStagingWriter multiBatchStatingWriter = buildMultiBatchStagingWriter(request, identity, targetNode, batches, processInfo, channel);
extractOutgoingBatch(processInfo, targetNode, multiBatchStatingWriter, firstBatch, false, false, ExtractMode.FOR_SYM_CLIENT);
for (OutgoingBatch outgoingBatch : batches) {
resource = getStagedResource(outgoingBatch);
if (resource != null) {
resource.setState(State.DONE);
}
}
} else {
log.info("Batches already had an OK status for request {}, batches {} to {}. Not extracting", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
}
/*
* re-query the batches to see if they have been OK'd while
* extracting
*/
List<OutgoingBatch> checkBatches = outgoingBatchService.getOutgoingBatchRange(request.getStartBatchId(), request.getEndBatchId()).getBatches();
areBatchesOk = true;
/*
* check to see if batches have been OK'd by another reload
* request while extracting
*/
for (OutgoingBatch outgoingBatch : checkBatches) {
if (outgoingBatch.getStatus() != Status.OK) {
areBatchesOk = false;
break;
}
}
ISqlTransaction transaction = null;
try {
transaction = sqlTemplate.startSqlTransaction();
updateExtractRequestStatus(transaction, request.getRequestId(), ExtractStatus.OK);
if (!areBatchesOk) {
for (OutgoingBatch outgoingBatch : batches) {
if (!parameterService.is(ParameterConstants.INITIAL_LOAD_EXTRACT_AND_SEND_WHEN_STAGED, false)) {
outgoingBatch.setStatus(Status.NE);
outgoingBatchService.updateOutgoingBatch(transaction, outgoingBatch);
}
}
} else {
log.info("Batches already had an OK status for request {}, batches {} to {}. Not updating the status to NE", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
}
transaction.commit();
log.info("Done extracting {} batches for request {}", (request.getEndBatchId() - request.getStartBatchId()) + 1, request.getRequestId());
} catch (Error ex) {
if (transaction != null) {
transaction.rollback();
}
throw ex;
} catch (RuntimeException ex) {
if (transaction != null) {
transaction.rollback();
}
throw ex;
} finally {
close(transaction);
}
processInfo.setStatus(org.jumpmind.symmetric.model.ProcessInfo.Status.OK);
} catch (CancellationException ex) {
log.info("Cancelled extract request {}. Starting at batch {}. Ending at batch {}", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
processInfo.setStatus(org.jumpmind.symmetric.model.ProcessInfo.Status.OK);
} catch (RuntimeException ex) {
log.debug("Failed to extract batches for request {}. Starting at batch {}. Ending at batch {}", new Object[] { request.getRequestId(), request.getStartBatchId(), request.getEndBatchId() });
processInfo.setStatus(org.jumpmind.symmetric.model.ProcessInfo.Status.ERROR);
throw ex;
}
}
}
use of org.jumpmind.symmetric.model.ProcessInfoKey in project symmetric-ds by JumpMind.
the class DataLoaderService method loadDataFromPull.
public void loadDataFromPull(Node remote, RemoteNodeStatus status) throws IOException {
Node local = nodeService.findIdentity();
if (local == null) {
local = new Node(this.parameterService, symmetricDialect);
}
try {
NodeSecurity localSecurity = nodeService.findNodeSecurity(local.getNodeId(), true);
IIncomingTransport transport = null;
boolean isRegisterTransport = false;
if (remote != null && localSecurity != null) {
Map<String, String> requestProperties = new HashMap<String, String>();
ChannelMap suspendIgnoreChannels = configurationService.getSuspendIgnoreChannelLists();
requestProperties.put(WebConstants.SUSPENDED_CHANNELS, suspendIgnoreChannels.getSuspendChannelsAsString());
requestProperties.put(WebConstants.IGNORED_CHANNELS, suspendIgnoreChannels.getIgnoreChannelsAsString());
requestProperties.put(WebConstants.THREAD_CHANNEL, status.getChannelId());
transport = transportManager.getPullTransport(remote, local, localSecurity.getNodePassword(), requestProperties, parameterService.getRegistrationUrl());
} else {
transport = transportManager.getRegisterTransport(local, parameterService.getRegistrationUrl());
log.info("Using registration URL of {}", transport.getUrl());
List<INodeRegistrationListener> registrationListeners = extensionService.getExtensionPointList(INodeRegistrationListener.class);
for (INodeRegistrationListener l : registrationListeners) {
l.registrationUrlUpdated(transport.getUrl());
}
remote = new Node();
remote.setSyncUrl(parameterService.getRegistrationUrl());
isRegisterTransport = true;
}
ProcessInfo processInfo = statisticManager.newProcessInfo(new ProcessInfoKey(remote.getNodeId(), status.getChannelId(), local.getNodeId(), ProcessType.PULL_JOB));
try {
List<IncomingBatch> list = loadDataFromTransport(processInfo, remote, transport, null);
if (list.size() > 0) {
processInfo.setStatus(ProcessInfo.Status.ACKING);
status.updateIncomingStatus(list);
local = nodeService.findIdentity();
if (local != null) {
localSecurity = nodeService.findNodeSecurity(local.getNodeId(), !isRegisterTransport);
if (StringUtils.isNotBlank(transport.getRedirectionUrl())) {
/*
* We were redirected for the pull, we need to
* redirect for the ack
*/
String url = transport.getRedirectionUrl();
int index = url.indexOf("/registration?");
if (index >= 0) {
url = url.substring(0, index);
}
log.info("Setting the sync url for ack to: {}", url);
remote.setSyncUrl(url);
}
sendAck(remote, local, localSecurity, list, transportManager);
}
}
if (containsError(list)) {
processInfo.setStatus(ProcessInfo.Status.ERROR);
} else {
processInfo.setStatus(ProcessInfo.Status.OK);
}
updateBatchToSendCount(remote, transport);
} catch (RuntimeException e) {
processInfo.setStatus(ProcessInfo.Status.ERROR);
throw e;
} catch (IOException e) {
processInfo.setStatus(ProcessInfo.Status.ERROR);
throw e;
}
} catch (RegistrationRequiredException e) {
if (StringUtils.isBlank(remote.getSyncUrl()) || remote.getSyncUrl().equals(parameterService.getRegistrationUrl())) {
log.warn("Node information missing on the server. Attempting to re-register remote.getSyncUrl()={}", remote.getSyncUrl());
loadDataFromPull(null, status);
nodeService.findIdentity(false);
} else {
log.warn("Failed to pull data from node '{}'. It probably is missing a node security record for '{}'.", remote.getNodeId(), local.getNodeId());
}
} catch (MalformedURLException e) {
if (remote != null) {
log.error("Could not connect to the {} node's transport because of a bad URL: '{}' {}", remote.getNodeId(), remote.getSyncUrl(), e);
} else {
log.error("", e);
}
throw e;
}
}
Aggregations