use of com.orientechnologies.common.concur.OOfflineNodeException in project orientdb by orientechnologies.
the class ODistributedStorage method createRecord.
public OStorageOperationResult<OPhysicalPosition> createRecord(final ORecordId iRecordId, final byte[] iContent, final int iRecordVersion, final byte iRecordType, final int iMode, final ORecordCallback<Long> iCallback) {
resetLastValidBackup();
if (OScenarioThreadLocal.INSTANCE.isRunModeDistributed()) {
// ALREADY DISTRIBUTED
return wrapped.createRecord(iRecordId, iContent, iRecordVersion, iRecordType, iMode, iCallback);
}
checkClusterRebalanceIsNotRunning();
final String localNodeName = dManager.getLocalNodeName();
final ODistributedConfiguration dbCfg = distributedConfiguration;
// ASSIGN DESTINATION NODE
final int clusterId = iRecordId.getClusterId();
if (clusterId == ORID.CLUSTER_ID_INVALID)
throw new IllegalArgumentException("Cluster not valid");
checkNodeIsMaster(localNodeName, dbCfg, "Create record " + iRecordId);
final String clusterName = getClusterNameByRID(iRecordId);
checkWriteQuorum(dbCfg, clusterName, localNodeName);
try {
ODocument documentForClusterSelection = iRecordId.getRecord();
if (documentForClusterSelection == null) {
// DOCUMENT NOT FOUND: BUILD A TEMPORARY ONE
documentForClusterSelection = (ODocument) ORecordInternal.fill(new ODocument(), iRecordId, iRecordVersion, iContent, false);
}
checkForCluster(documentForClusterSelection, localNodeName, dbCfg);
final List<String> servers = dbCfg.getServers(clusterName, null);
if (servers.isEmpty())
// NO NODES: EXECUTE LOCALLY ONLY
return wrapped.createRecord(iRecordId, iContent, iRecordVersion, iRecordType, iMode, iCallback);
final String finalClusterName = clusterName;
final Set<String> clusterNames = Collections.singleton(finalClusterName);
// REMOVE CURRENT NODE BECAUSE IT HAS BEEN ALREADY EXECUTED LOCALLY
servers.remove(localNodeName);
Boolean executionModeSynch = dbCfg.isExecutionModeSynchronous(finalClusterName);
if (executionModeSynch == null)
executionModeSynch = iMode == 0;
final boolean syncMode = executionModeSynch;
// IN ANY CASE EXECUTE LOCALLY AND THEN DISTRIBUTE
return (OStorageOperationResult<OPhysicalPosition>) executeRecordOperationInLock(syncMode, iRecordId, new OCallable<Object, OCallable<Void, ODistributedRequestId>>() {
@Override
public Object call(OCallable<Void, ODistributedRequestId> unlockCallback) {
final OStorageOperationResult<OPhysicalPosition> localResult;
localResult = wrapped.createRecord(iRecordId, iContent, iRecordVersion, iRecordType, iMode, iCallback);
// UPDATE RID WITH NEW POSITION
iRecordId.setClusterPosition(localResult.getResult().clusterPosition);
final OPlaceholder localPlaceholder = new OPlaceholder(iRecordId, localResult.getResult().recordVersion);
if (!servers.isEmpty()) {
final OCreateRecordTask task = (OCreateRecordTask) dManager.getTaskFactoryManager().getFactoryByServerNames(servers).createTask(OCreateRecordTask.FACTORYID);
task.init(iRecordId, iContent, iRecordVersion, iRecordType);
task.setLastLSN(wrapped.getLSN());
if (syncMode) {
// SYNCHRONOUS CALL: REPLICATE IT
try {
final ODistributedResponse dResponse = dManager.sendRequest(getName(), clusterNames, servers, task, dManager.getNextMessageIdCounter(), EXECUTION_MODE.RESPONSE, localPlaceholder, unlockCallback, null);
final Object payload = dResponse.getPayload();
if (payload != null) {
if (payload instanceof Exception) {
executeUndoOnLocalServer(dResponse.getRequestId(), task);
if (payload instanceof ONeedRetryException)
throw (ONeedRetryException) payload;
throw OException.wrapException(new ODistributedException("Error on execution distributed create record"), (Exception) payload);
}
// COPY THE CLUSTER POS -> RID
final OPlaceholder masterPlaceholder = (OPlaceholder) payload;
iRecordId.copyFrom(masterPlaceholder.getIdentity());
return new OStorageOperationResult<OPhysicalPosition>(new OPhysicalPosition(masterPlaceholder.getIdentity().getClusterPosition(), masterPlaceholder.getVersion()));
}
} catch (RuntimeException e) {
executeUndoOnLocalServer(null, task);
throw e;
} catch (Exception e) {
executeUndoOnLocalServer(null, task);
throw ODatabaseException.wrapException(new ODistributedException("Cannot execute distributed create record"), e);
}
} else {
// ASYNCHRONOUSLY REPLICATE IT TO ALL THE OTHER NODES
asynchronousExecution(new OAsynchDistributedOperation(getName(), Collections.singleton(finalClusterName), servers, task, dManager.getNextMessageIdCounter(), localPlaceholder, unlockCallback, null));
}
} else
unlockCallback.call(null);
return localResult;
}
});
} catch (ODistributedRecordLockedException e) {
// PASS THROUGH
throw e;
} catch (ONeedRetryException e) {
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord(iRecordId);
final ORecordId lockEntireCluster = iRecordId.copy();
lockEntireCluster.setClusterPosition(-1);
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord(lockEntireCluster);
// PASS THROUGH
throw e;
} catch (HazelcastInstanceNotActiveException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (HazelcastException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (Exception e) {
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord(iRecordId);
final ORecordId lockEntireCluster = iRecordId.copy();
lockEntireCluster.setClusterPosition(-1);
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord(lockEntireCluster);
handleDistributedException("Cannot route create record operation for %s to the distributed node", e, iRecordId);
// UNREACHABLE
return null;
}
}
use of com.orientechnologies.common.concur.OOfflineNodeException in project orientdb by orientechnologies.
the class ODistributedStorage method readRecordIfVersionIsNotLatest.
@Override
public OStorageOperationResult<ORawBuffer> readRecordIfVersionIsNotLatest(final ORecordId rid, final String fetchPlan, final boolean ignoreCache, final int recordVersion) throws ORecordNotFoundException {
final ORawBuffer memCopy = localDistributedDatabase.getRecordIfLocked(rid);
if (memCopy != null)
return new OStorageOperationResult<ORawBuffer>(memCopy);
try {
final String clusterName = getClusterNameByRID(rid);
final ODistributedConfiguration dbCfg = distributedConfiguration;
final List<String> nodes = dbCfg.getServers(clusterName, null);
final int availableNodes = nodes.size();
// CHECK IF LOCAL NODE OWNS THE DATA AND READ-QUORUM = 1: GET IT LOCALLY BECAUSE IT'S FASTER
final String localNodeName = dManager.getLocalNodeName();
if (nodes.isEmpty() || nodes.contains(dManager.getLocalNodeName()) && dbCfg.getReadQuorum(clusterName, availableNodes, localNodeName) <= 1) {
// DON'T REPLICATE
return (OStorageOperationResult<ORawBuffer>) OScenarioThreadLocal.executeAsDistributed(new Callable() {
@Override
public Object call() throws Exception {
return wrapped.readRecordIfVersionIsNotLatest(rid, fetchPlan, ignoreCache, recordVersion);
}
});
}
final OReadRecordIfNotLatestTask task = (OReadRecordIfNotLatestTask) dManager.getTaskFactoryManager().getFactoryByServerNames(nodes).createTask(OReadRecordIfNotLatestTask.FACTORYID);
task.init(rid, recordVersion);
// DISTRIBUTE IT
final Object result = dManager.sendRequest(getName(), Collections.singleton(clusterName), nodes, task, dManager.getNextMessageIdCounter(), EXECUTION_MODE.RESPONSE, null, null, null).getPayload();
if (result instanceof ONeedRetryException)
throw (ONeedRetryException) result;
else if (result instanceof Exception)
throw OException.wrapException(new ODistributedException("Error on execution distributed read record"), (Exception) result);
return new OStorageOperationResult<ORawBuffer>((ORawBuffer) result);
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (HazelcastInstanceNotActiveException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (HazelcastException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (Exception e) {
handleDistributedException("Cannot route read record operation for %s to the distributed node", e, rid);
// UNREACHABLE
return null;
}
}
use of com.orientechnologies.common.concur.OOfflineNodeException in project orientdb by orientechnologies.
the class ODistributedStorage method deleteRecord.
@Override
public OStorageOperationResult<Boolean> deleteRecord(final ORecordId iRecordId, final int iVersion, final int iMode, final ORecordCallback<Boolean> iCallback) {
resetLastValidBackup();
if (OScenarioThreadLocal.INSTANCE.isRunModeDistributed()) {
// ALREADY DISTRIBUTED
return wrapped.deleteRecord(iRecordId, iVersion, iMode, iCallback);
}
final String clusterName = getClusterNameByRID(iRecordId);
final ODistributedConfiguration dbCfg = distributedConfiguration;
final String localNodeName = dManager.getLocalNodeName();
checkWriteQuorum(dbCfg, clusterName, localNodeName);
try {
checkNodeIsMaster(localNodeName, dbCfg, "Delete record " + iRecordId);
final List<String> nodes = dbCfg.getServers(clusterName, null);
if (nodes.isEmpty())
// NO NODES: EXECUTE LOCALLY ONLY
return wrapped.deleteRecord(iRecordId, iVersion, iMode, iCallback);
final Set<String> clusterNames = Collections.singleton(clusterName);
Boolean executionModeSynch = dbCfg.isExecutionModeSynchronous(clusterName);
if (executionModeSynch == null)
executionModeSynch = iMode == 0;
final boolean syncMode = executionModeSynch;
return (OStorageOperationResult<Boolean>) executeRecordOperationInLock(syncMode, iRecordId, new OCallable<Object, OCallable<Void, ODistributedRequestId>>() {
@Override
public Object call(OCallable<Void, ODistributedRequestId> unlockCallback) {
final ODeleteRecordTask task = (ODeleteRecordTask) dManager.getTaskFactoryManager().getFactoryByServerNames(nodes).createTask(ODeleteRecordTask.FACTORYID);
task.init(iRecordId, iVersion);
final OStorageOperationResult<Boolean> localResult;
final boolean executedLocally = nodes.contains(localNodeName);
if (executedLocally) {
// EXECUTE ON LOCAL NODE FIRST
try {
// LOAD CURRENT RECORD
task.checkRecordExists();
localResult = (OStorageOperationResult<Boolean>) OScenarioThreadLocal.executeAsDistributed(new Callable() {
@Override
public Object call() throws Exception {
task.setLastLSN(wrapped.getLSN());
return wrapped.deleteRecord(iRecordId, iVersion, iMode, iCallback);
}
});
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw OException.wrapException(new ODistributedException("Cannot delete record " + iRecordId), e);
}
nodes.remove(localNodeName);
} else
localResult = null;
if (nodes.isEmpty()) {
unlockCallback.call(null);
if (!executedLocally)
throw new ODistributedException("Cannot execute distributed delete on record " + iRecordId + " because no nodes are available");
} else {
final Boolean localResultPayload = localResult != null ? localResult.getResult() : null;
if (syncMode || localResult == null) {
// REPLICATE IT
try {
final ODistributedResponse dResponse = dManager.sendRequest(getName(), clusterNames, nodes, task, dManager.getNextMessageIdCounter(), EXECUTION_MODE.RESPONSE, localResultPayload, unlockCallback, null);
final Object payload = dResponse.getPayload();
if (payload instanceof Exception) {
if (payload instanceof ORecordNotFoundException) {
// REPAIR THE RECORD IMMEDIATELY
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord((ORecordId) ((ORecordNotFoundException) payload).getRid());
}
executeUndoOnLocalServer(dResponse.getRequestId(), task);
if (payload instanceof ONeedRetryException)
throw (ONeedRetryException) payload;
throw OException.wrapException(new ODistributedException("Error on execution distributed delete record"), (Exception) payload);
}
return new OStorageOperationResult<Boolean>(true);
} catch (RuntimeException e) {
executeUndoOnLocalServer(null, task);
throw e;
} catch (Exception e) {
executeUndoOnLocalServer(null, task);
throw ODatabaseException.wrapException(new ODistributedException("Cannot execute distributed delete record"), e);
}
}
// ASYNCHRONOUS CALL: EXECUTE LOCALLY AND THEN DISTRIBUTE
if (!nodes.isEmpty())
asynchronousExecution(new OAsynchDistributedOperation(getName(), Collections.singleton(clusterName), nodes, task, dManager.getNextMessageIdCounter(), localResultPayload, unlockCallback, null));
}
return localResult;
}
});
} catch (ONeedRetryException e) {
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord(iRecordId);
// PASS THROUGH
throw e;
} catch (HazelcastInstanceNotActiveException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (HazelcastException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (Exception e) {
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord(iRecordId);
handleDistributedException("Cannot route DELETE_RECORD operation for %s to the distributed node", e, iRecordId);
// UNREACHABLE
return null;
}
}
use of com.orientechnologies.common.concur.OOfflineNodeException in project orientdb by orientechnologies.
the class ODistributedStorage method command.
public Object command(final OCommandRequestText iCommand) {
List<String> servers = (List<String>) iCommand.getContext().getVariable("servers");
if (servers == null) {
servers = new ArrayList<String>();
iCommand.getContext().setVariable("servers", servers);
}
final String localNodeName = dManager.getLocalNodeName();
servers.add(localNodeName);
if (OScenarioThreadLocal.INSTANCE.isRunModeDistributed())
// ALREADY DISTRIBUTED
return wrapped.command(iCommand);
final ODistributedConfiguration dbCfg = distributedConfiguration;
if (!dbCfg.isReplicationActive(null, localNodeName))
// DON'T REPLICATE
return wrapped.command(iCommand);
final OCommandExecutor executor = OCommandManager.instance().getExecutor(iCommand);
executor.setProgressListener(iCommand.getProgressListener());
executor.parse(iCommand);
final OCommandExecutor exec = executor instanceof OCommandExecutorSQLDelegate ? ((OCommandExecutorSQLDelegate) executor).getDelegate() : executor;
if (exec.isIdempotent() && !dManager.isNodeAvailable(dManager.getLocalNodeName(), getName())) {
// SPECIAL CASE: NODE IS OFFLINE AND THE COMMAND IS IDEMPOTENT, EXECUTE IT LOCALLY ONLY
ODistributedServerLog.warn(this, dManager.getLocalNodeName(), null, ODistributedServerLog.DIRECTION.NONE, "Node '%s' is %s, the command '%s' against database '%s' will be executed only on local server with the possibility to have partial result", dManager.getLocalNodeName(), dManager.getDatabaseStatus(dManager.getLocalNodeName(), getName()), iCommand, wrapped.getName());
return wrapped.command(iCommand);
}
if (!exec.isIdempotent())
checkNodeIsMaster(localNodeName, dbCfg, "Command '" + iCommand + "'");
try {
Object result = null;
OCommandDistributedReplicateRequest.DISTRIBUTED_EXECUTION_MODE executionMode = OCommandDistributedReplicateRequest.DISTRIBUTED_EXECUTION_MODE.LOCAL;
OCommandDistributedReplicateRequest.DISTRIBUTED_RESULT_MGMT resultMgmt = OCommandDistributedReplicateRequest.DISTRIBUTED_RESULT_MGMT.CHECK_FOR_EQUALS;
boolean executeOnLocalNodeFirst = true;
if (OScenarioThreadLocal.INSTANCE.getRunMode() != RUN_MODE.RUNNING_DISTRIBUTED) {
if (exec instanceof OCommandDistributedReplicateRequest) {
executionMode = ((OCommandDistributedReplicateRequest) exec).getDistributedExecutionMode();
resultMgmt = ((OCommandDistributedReplicateRequest) exec).getDistributedResultManagement();
executeOnLocalNodeFirst = ((OCommandDistributedReplicateRequest) exec).isDistributedExecutingOnLocalNodeFirst();
}
}
switch(executionMode) {
case LOCAL:
// CALL IN DEFAULT MODE TO LET OWN COMMAND TO REDISTRIBUTE CHANGES (LIKE INSERT)
return wrapped.command(iCommand);
case REPLICATE:
// REPLICATE IT, GET ALL THE INVOLVED NODES
final Collection<String> involvedClusters = exec.getInvolvedClusters();
if (resultMgmt == OCommandDistributedReplicateRequest.DISTRIBUTED_RESULT_MGMT.MERGE) {
if (!exec.isIdempotent() && dbCfg.isSharded())
throw new ODistributedException("Cannot distribute the command '" + iCommand.getText() + "' because it is not idempotent and a map-reduce has been requested");
final Map<String, Collection<String>> nodeClusterMap = dbCfg.getServerClusterMap(involvedClusters, localNodeName, exec.isIdempotent());
final Map<String, Object> results;
if (exec.isIdempotent() && nodeClusterMap.size() == 1 && nodeClusterMap.keySet().iterator().next().equals(localNodeName)) {
// LOCAL NODE, AVOID TO DISTRIBUTE IT
// CALL IN DEFAULT MODE TO LET OWN COMMAND TO REDISTRIBUTE CHANGES (LIKE INSERT)
result = wrapped.command(iCommand);
results = new HashMap<String, Object>(1);
results.put(localNodeName, result);
} else {
// SELECT: SPLIT CLASSES/CLUSTER IF ANY
results = executeOnServers(iCommand, exec, involvedClusters, nodeClusterMap);
}
final OCommandExecutorSQLSelect select = exec instanceof OCommandExecutorSQLSelect ? (OCommandExecutorSQLSelect) exec : null;
if (select != null && select.isAnyFunctionAggregates() && !select.hasGroupBy()) {
result = mergeResultByAggregation(select, results);
} else {
// MIX & FILTER RESULT SET AVOIDING DUPLICATES
// TODO: ONCE OPTIMIZED (SEE ABOVE) AVOID TO FILTER HERE
result = exec.mergeResults(results);
}
if (result instanceof Throwable && results.containsKey(localNodeName))
undoCommandOnLocalServer(iCommand);
} else {
final OAbstractCommandTask task = iCommand instanceof OCommandScript ? new OScriptTask(iCommand) : new OSQLCommandTask(iCommand, new HashSet<String>());
task.setResultStrategy(OAbstractRemoteTask.RESULT_STRATEGY.ANY);
final Set<String> nodes = dbCfg.getServers(involvedClusters);
if (iCommand instanceof ODistributedCommand)
nodes.removeAll(((ODistributedCommand) iCommand).nodesToExclude());
if (executeOnlyLocally(localNodeName, dbCfg, exec, involvedClusters, nodes))
// CALL IN DEFAULT MODE TO LET OWN COMMAND TO REDISTRIBUTE CHANGES (LIKE INSERT)
return wrapped.command(iCommand);
final boolean executedLocally = executeOnLocalNodeFirst && nodes.contains(localNodeName);
if (exec.involveSchema())
// EXECUTE THE COMMAND IN LOCK
result = dManager.executeInDistributedDatabaseLock(getName(), 20000, dManager.getDatabaseConfiguration(getName()).modify(), new OCallable<Object, OModifiableDistributedConfiguration>() {
@Override
public Object call(OModifiableDistributedConfiguration iArgument) {
return executeCommand(iCommand, localNodeName, involvedClusters, task, nodes, executedLocally);
}
});
else
result = executeCommand(iCommand, localNodeName, involvedClusters, task, nodes, executedLocally);
}
if (exec.involveSchema())
// UPDATE THE SCHEMA
dManager.propagateSchemaChanges(ODatabaseRecordThreadLocal.INSTANCE.get());
break;
}
if (result instanceof ONeedRetryException)
throw (ONeedRetryException) result;
else if (result instanceof RuntimeException)
throw (RuntimeException) result;
else if (result instanceof Exception)
throw OException.wrapException(new ODistributedException("Error on execution distributed COMMAND"), (Exception) result);
return result;
} catch (OConcurrentModificationException e) {
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord((ORecordId) e.getRid());
throw e;
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (HazelcastInstanceNotActiveException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (HazelcastException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (Exception e) {
handleDistributedException("Cannot route COMMAND operation to the distributed node", e);
// UNREACHABLE
return null;
}
}
use of com.orientechnologies.common.concur.OOfflineNodeException in project orientdb by orientechnologies.
the class ODistributedStorage method commit.
@Override
public List<ORecordOperation> commit(final OTransaction iTx, final Runnable callback) {
resetLastValidBackup();
if (OScenarioThreadLocal.INSTANCE.isRunModeDistributed()) {
// ALREADY DISTRIBUTED
try {
return wrapped.commit(iTx, callback);
} catch (ORecordDuplicatedException e) {
// CHECK THE RECORD HAS THE SAME KEY IS STILL UNDER DISTRIBUTED TX
final ODistributedDatabase dDatabase = dManager.getMessageService().getDatabase(getName());
if (dDatabase.getRecordIfLocked(e.getRid()) != null) {
throw new OPossibleDuplicatedRecordException(e);
}
throw e;
}
}
final ODistributedConfiguration dbCfg = distributedConfiguration;
final String localNodeName = dManager.getLocalNodeName();
checkNodeIsMaster(localNodeName, dbCfg, "Transaction Commit");
checkClusterRebalanceIsNotRunning();
try {
if (!dbCfg.isReplicationActive(null, localNodeName)) {
// DON'T REPLICATE
OScenarioThreadLocal.executeAsDistributed(new Callable() {
@Override
public Object call() throws Exception {
return wrapped.commit(iTx, callback);
}
});
} else {
// EXECUTE DISTRIBUTED TX
int maxAutoRetry = OGlobalConfiguration.DISTRIBUTED_CONCURRENT_TX_MAX_AUTORETRY.getValueAsInteger();
if (maxAutoRetry <= 0)
maxAutoRetry = 1;
int autoRetryDelay = OGlobalConfiguration.DISTRIBUTED_CONCURRENT_TX_AUTORETRY_DELAY.getValueAsInteger();
if (autoRetryDelay <= 0)
autoRetryDelay = 1;
Throwable lastException = null;
for (int retry = 1; retry <= maxAutoRetry; ++retry) {
try {
return txManager.commit((ODatabaseDocumentTx) ODatabaseRecordThreadLocal.INSTANCE.get(), iTx, callback, eventListener);
} catch (Throwable e) {
lastException = e;
if (retry >= maxAutoRetry) {
// REACHED MAX RETRIES
ODistributedServerLog.debug(this, localNodeName, null, ODistributedServerLog.DIRECTION.NONE, "Distributed transaction retries exceed maximum auto-retries (%d)", maxAutoRetry);
break;
}
// SKIP RETRY IN CASE OF OConcurrentModificationException BECAUSE IT NEEDS A RETRY AT APPLICATION LEVEL
if (!(e instanceof OConcurrentModificationException) && (e instanceof ONeedRetryException || e instanceof ORecordNotFoundException)) {
// RETRY
final long wait = autoRetryDelay / 2 + new Random().nextInt(autoRetryDelay);
ODistributedServerLog.debug(this, localNodeName, null, ODistributedServerLog.DIRECTION.NONE, "Distributed transaction cannot be completed, wait %dms and retry again (%d/%d)", wait, retry, maxAutoRetry);
Thread.sleep(wait);
Orient.instance().getProfiler().updateCounter("db." + getName() + ".distributedTxRetries", "Number of retries executed in distributed transaction", +1, "db.*.distributedTxRetries");
} else
// SKIP RETRY LOOP
break;
}
}
if (lastException instanceof RuntimeException)
throw (RuntimeException) lastException;
else
throw OException.wrapException(new ODistributedException("Error on executing distributed transaction"), lastException);
}
} catch (OValidationException e) {
throw e;
} catch (HazelcastInstanceNotActiveException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (HazelcastException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (Exception e) {
handleDistributedException("Cannot route TX operation against distributed node", e);
}
return null;
}
Aggregations