use of com.orientechnologies.common.util.OCallable in project orientdb by orientechnologies.
the class ODistributedStorage method executeRecordOperationInLock.
private Object executeRecordOperationInLock(final boolean iUnlockAtTheEnd, final ORecordId rid, final OCallable<Object, OCallable<Void, ODistributedRequestId>> callback) throws Exception {
final ORecordId rid2Lock;
if (!rid.isPersistent())
// CREATE A COPY TO MAINTAIN THE LOCK ON THE CLUSTER AVOIDING THE RID IS TRANSFORMED IN PERSISTENT. THIS ALLOWS TO HAVE
// PARALLEL TX BECAUSE NEW RID LOCKS THE ENTIRE CLUSTER.
rid2Lock = new ORecordId(rid.getClusterId(), -1l);
else
rid2Lock = rid;
ODistributedRequestId requestId = null;
final OLogSequenceNumber lastLSN = wrapped.getLSN();
final AtomicBoolean lockReleased = new AtomicBoolean(false);
try {
requestId = acquireRecordLock(rid2Lock);
final ODistributedRequestId finalReqId = requestId;
final OCallable<Void, ODistributedRequestId> unlockCallback = new OCallable<Void, ODistributedRequestId>() {
@Override
public Void call(final ODistributedRequestId requestId) {
// UNLOCK AS SOON AS THE REQUEST IS SENT
if (lockReleased.compareAndSet(false, true)) {
releaseRecordLock(rid2Lock, finalReqId);
lockReleased.set(true);
}
return null;
}
};
return OScenarioThreadLocal.executeAsDistributed(new Callable() {
@Override
public Object call() throws Exception {
return callback.call(unlockCallback);
}
});
} finally {
if (iUnlockAtTheEnd) {
if (lockReleased.compareAndSet(false, true)) {
releaseRecordLock(rid2Lock, requestId);
}
}
final OLogSequenceNumber currentLSN = wrapped.getLSN();
if (!lastLSN.equals(currentLSN))
// SAVE LAST LSN
try {
localDistributedDatabase.getSyncConfiguration().setLastLSN(getDistributedManager().getLocalNodeName(), ((OLocalPaginatedStorage) getUnderlying()).getLSN(), true);
} catch (IOException e) {
ODistributedServerLog.debug(this, dManager != null ? dManager.getLocalNodeName() : "?", null, ODistributedServerLog.DIRECTION.NONE, "Error on updating local LSN configuration for database '%s'", wrapped.getName());
}
}
}
use of com.orientechnologies.common.util.OCallable in project orientdb by orientechnologies.
the class ODistributedStorage method addCluster.
@Override
public int addCluster(final String iClusterName, boolean forceListBased, final Object... iParameters) {
for (int retry = 0; retry < 10; ++retry) {
final AtomicInteger clId = new AtomicInteger();
if (!OScenarioThreadLocal.INSTANCE.isRunModeDistributed()) {
final StringBuilder cmd = new StringBuilder("create cluster `");
cmd.append(iClusterName);
cmd.append("`");
// EXECUTE THIS OUTSIDE LOCK TO AVOID DEADLOCKS
Object result = null;
try {
result = dManager.executeInDistributedDatabaseLock(getName(), 0, dManager.getDatabaseConfiguration(getName()).modify(), new OCallable<Object, OModifiableDistributedConfiguration>() {
@Override
public Object call(OModifiableDistributedConfiguration iArgument) {
clId.set(wrapped.addCluster(iClusterName, false, iParameters));
final OCommandSQL commandSQL = new OCommandSQL(cmd.toString());
commandSQL.addExcludedNode(getNodeId());
return command(commandSQL);
}
});
} catch (Exception e) {
// RETRY
wrapped.dropCluster(iClusterName, false);
continue;
}
if (result != null && ((Integer) result).intValue() != clId.get()) {
ODistributedServerLog.warn(this, dManager.getLocalNodeName(), null, ODistributedServerLog.DIRECTION.NONE, "Error on creating cluster '%s' on distributed nodes: ids are different (local=%d and remote=%d). Local clusters %s. Retrying %d/%d...", iClusterName, clId.get(), ((Integer) result).intValue(), getClusterNames(), retry, 10);
wrapped.dropCluster(clId.get(), false);
// REMOVE ON REMOTE NODES TOO
cmd.setLength(0);
cmd.append("drop cluster ");
cmd.append(iClusterName);
final OCommandSQL commandSQL = new OCommandSQL(cmd.toString());
commandSQL.addExcludedNode(getNodeId());
command(commandSQL);
try {
Thread.sleep(300);
} catch (InterruptedException e) {
}
// TODO: RELOAD DOESN'T DO ANYTHING WHILE HERE IT'S NEEDED A WAY TO CLOSE/OPEN THE DB
wrapped.reload();
continue;
}
} else
clId.set(wrapped.addCluster(iClusterName, false, iParameters));
return clId.get();
}
throw new ODistributedException("Error on creating cluster '" + iClusterName + "' on distributed nodes: local and remote ids assigned are different");
}
use of com.orientechnologies.common.util.OCallable in project orientdb by orientechnologies.
the class ODistributedTransactionManager method commit.
public List<ORecordOperation> commit(final ODatabaseDocumentTx database, final OTransaction iTx, final Runnable callback, final ODistributedStorageEventListener eventListener) {
final String localNodeName = dManager.getLocalNodeName();
try {
OTransactionInternal.setStatus((OTransactionAbstract) iTx, OTransaction.TXSTATUS.BEGUN);
final ODistributedConfiguration dbCfg = dManager.getDatabaseConfiguration(storage.getName());
// CHECK THE LOCAL NODE IS THE OWNER OF THE CLUSTER IDS
checkForClusterIds(iTx, localNodeName, dbCfg);
// CREATE UNDO CONTENT FOR DISTRIBUTED 2-PHASE ROLLBACK
final List<OAbstractRemoteTask> undoTasks = createUndoTasksFromTx(iTx);
final int maxAutoRetry = OGlobalConfiguration.DISTRIBUTED_CONCURRENT_TX_MAX_AUTORETRY.getValueAsInteger();
final int autoRetryDelay = OGlobalConfiguration.DISTRIBUTED_CONCURRENT_TX_AUTORETRY_DELAY.getValueAsInteger();
Boolean executionModeSynch = dbCfg.isExecutionModeSynchronous(null);
if (executionModeSynch == null)
executionModeSynch = Boolean.TRUE;
final boolean finalExecutionModeSynch = executionModeSynch;
final ODistributedRequestId requestId = new ODistributedRequestId(dManager.getLocalNodeId(), dManager.getNextMessageIdCounter());
final ODistributedTxContext ctx = localDistributedDatabase.registerTxContext(requestId);
final AtomicBoolean lockReleased = new AtomicBoolean(true);
try {
acquireMultipleRecordLocks(iTx, maxAutoRetry, autoRetryDelay, eventListener, ctx);
lockReleased.set(false);
final List<ORecordOperation> uResult = (List<ORecordOperation>) OScenarioThreadLocal.executeAsDistributed(new Callable() {
@Override
public Object call() throws Exception {
return storage.commit(iTx, callback);
}
});
try {
localDistributedDatabase.getSyncConfiguration().setLastLSN(localNodeName, ((OLocalPaginatedStorage) storage.getUnderlying()).getLSN(), true);
} catch (IOException e) {
ODistributedServerLog.debug(this, dManager != null ? dManager.getLocalNodeName() : "?", null, ODistributedServerLog.DIRECTION.NONE, "Error on updating local LSN configuration for database '%s'", storage.getName());
}
// REMOVE THE TX OBJECT FROM DATABASE TO AVOID UND OPERATIONS ARE "LOST IN TRANSACTION"
database.setDefaultTransactionMode();
// After commit force the clean of dirty managers due to possible copy and miss clean.
for (ORecordOperation ent : iTx.getAllRecordEntries()) {
ORecordInternal.getDirtyManager(ent.getRecord()).clear();
}
final Set<String> involvedClusters = getInvolvedClusters(uResult);
Set<String> nodes = getAvailableNodesButLocal(dbCfg, involvedClusters, localNodeName);
if (nodes.isEmpty()) {
// NO FURTHER NODES TO INVOLVE
executionModeSynch = true;
return null;
}
updateUndoTaskWithCreatedRecords(uResult, undoTasks);
final OTxTaskResult localResult = createLocalTxResult(uResult);
final OTxTask txTask = createTxTask(uResult);
txTask.setLocalUndoTasks(undoTasks);
try {
txTask.setLastLSN(((OAbstractPaginatedStorage) storage.getUnderlying()).getLSN());
OTransactionInternal.setStatus((OTransactionAbstract) iTx, OTransaction.TXSTATUS.COMMITTING);
if (finalExecutionModeSynch) {
// SYNCHRONOUS, AUTO-RETRY IN CASE RECORDS ARE LOCKED
ODistributedResponse lastResult = null;
for (int retry = 1; retry <= maxAutoRetry; ++retry) {
boolean isLastRetry = maxAutoRetry == retry;
if (retry > 1) {
// REBUILD THE SERVER LIST
nodes = getAvailableNodesButLocal(dbCfg, involvedClusters, localNodeName);
if (nodes.isEmpty()) {
// NO FURTHER NODES TO INVOLVE
executionModeSynch = true;
return null;
}
ODistributedServerLog.debug(this, localNodeName, null, ODistributedServerLog.DIRECTION.NONE, "Retrying (%d/%d) transaction reqId=%s...", retry, maxAutoRetry, requestId);
}
// SYNCHRONOUS CALL: REPLICATE IT
lastResult = dManager.sendRequest(storage.getName(), involvedClusters, nodes, txTask, requestId.getMessageId(), EXECUTION_MODE.RESPONSE, localResult, null);
if (!processCommitResult(localNodeName, iTx, txTask, involvedClusters, uResult, nodes, autoRetryDelay, lastResult.getRequestId(), lastResult, isLastRetry)) {
// RETRY
Orient.instance().getProfiler().updateCounter("db." + database.getName() + ".distributedTxRetries", "Number of retries executed in distributed transaction", +1, "db.*.distributedTxRetries");
continue;
}
ODistributedServerLog.debug(this, localNodeName, null, ODistributedServerLog.DIRECTION.NONE, "Distributed transaction succeeded. Tasks: %s", txTask.getTasks());
// OK, DISTRIBUTED COMMIT SUCCEED
return null;
}
// ONLY CASE: ODistributedRecordLockedException MORE THAN AUTO-RETRY
ODistributedServerLog.debug(this, localNodeName, null, ODistributedServerLog.DIRECTION.NONE, "Distributed transaction retries exceed maximum auto-retries (%d). Task: %s - Tasks: %s", maxAutoRetry, txTask, txTask.getTasks());
// ROLLBACK TX
storage.executeUndoOnLocalServer(requestId, txTask);
sendTxCompleted(localNodeName, involvedClusters, nodes, lastResult.getRequestId(), false, txTask.getPartitionKey());
throw (RuntimeException) lastResult.getPayload();
} else {
// ASYNC, MANAGE REPLICATION CALLBACK
final OCallable<Void, ODistributedRequestId> unlockCallback = new OCallable<Void, ODistributedRequestId>() {
@Override
public Void call(final ODistributedRequestId reqId) {
// FREE THE CONTEXT
if (lockReleased.compareAndSet(false, true)) {
localDistributedDatabase.popTxContext(requestId);
ctx.destroy();
}
return null;
}
};
executeAsyncTx(nodes, localResult, involvedClusters, txTask, requestId.getMessageId(), localNodeName, unlockCallback);
}
} catch (Throwable e) {
// UNDO LOCAL TX
storage.executeUndoOnLocalServer(requestId, txTask);
executionModeSynch = true;
if (e instanceof RuntimeException)
throw (RuntimeException) e;
else if (e instanceof InterruptedException)
throw OException.wrapException(new ODistributedOperationException("Cannot commit transaction"), e);
else
throw OException.wrapException(new ODistributedException("Cannot commit transaction"), e);
}
} catch (RuntimeException e) {
executionModeSynch = true;
throw e;
} catch (InterruptedException e) {
executionModeSynch = true;
throw OException.wrapException(new ODistributedOperationException("Cannot commit transaction"), e);
} catch (Exception e) {
executionModeSynch = true;
throw OException.wrapException(new ODistributedException("Cannot commit transaction"), e);
} finally {
if (executionModeSynch) {
if (lockReleased.compareAndSet(false, true)) {
localDistributedDatabase.popTxContext(requestId);
ctx.destroy();
}
}
}
} catch (OValidationException e) {
throw e;
} catch (ODistributedRecordLockedException e) {
throw e;
} catch (OConcurrentCreateException e) {
// REQUEST A REPAIR OF THE CLUSTER BECAUSE IS NOT ALIGNED
localDistributedDatabase.getDatabaseRepairer().enqueueRepairCluster(e.getActualRid().getClusterId());
throw e;
} catch (OConcurrentModificationException e) {
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord((ORecordId) e.getRid());
throw e;
} catch (Exception e) {
for (ORecordOperation op : iTx.getAllRecordEntries()) {
if (iTx.hasRecordCreation()) {
final ORecordId lockEntireCluster = (ORecordId) op.getRID().copy();
localDistributedDatabase.getDatabaseRepairer().enqueueRepairCluster(lockEntireCluster.getClusterId());
}
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord((ORecordId) op.getRID());
}
storage.handleDistributedException("Cannot route TX operation against distributed node", e);
}
return null;
}
use of com.orientechnologies.common.util.OCallable in project orientdb by orientechnologies.
the class ODistributedStorage method updateRecord.
@Override
public OStorageOperationResult<Integer> updateRecord(final ORecordId iRecordId, final boolean updateContent, final byte[] iContent, final int iVersion, final byte iRecordType, final int iMode, final ORecordCallback<Integer> iCallback) {
resetLastValidBackup();
if (OScenarioThreadLocal.INSTANCE.isRunModeDistributed()) {
// ALREADY DISTRIBUTED
return wrapped.updateRecord(iRecordId, updateContent, iContent, iVersion, iRecordType, iMode, iCallback);
}
checkLocalNodeIsAvailable();
final ODistributedConfiguration dbCfg = distributedConfiguration;
final String clusterName = getClusterNameByRID(iRecordId);
final String localNodeName = dManager.getLocalNodeName();
checkWriteQuorum(dbCfg, clusterName, localNodeName);
try {
checkNodeIsMaster(localNodeName, dbCfg);
final List<String> nodes = dbCfg.getServers(clusterName, null);
if (nodes.isEmpty())
// NO REPLICATION: EXECUTE IT LOCALLY
return wrapped.updateRecord(iRecordId, updateContent, iContent, iVersion, iRecordType, iMode, iCallback);
final Set<String> clusterNames = Collections.singleton(clusterName);
Boolean executionModeSynch = dbCfg.isExecutionModeSynchronous(clusterName);
if (executionModeSynch == null)
executionModeSynch = iMode == 0;
final boolean syncMode = executionModeSynch;
return (OStorageOperationResult<Integer>) executeRecordOperationInLock(syncMode, iRecordId, new OCallable<Object, OCallable<Void, ODistributedRequestId>>() {
@Override
public Object call(OCallable<Void, ODistributedRequestId> unlockCallback) {
final OUpdateRecordTask task = new OUpdateRecordTask(iRecordId, iContent, iVersion, iRecordType);
final OStorageOperationResult<Integer> localResult;
final boolean executedLocally = nodes.contains(localNodeName);
if (executedLocally) {
// EXECUTE ON LOCAL NODE FIRST
try {
// LOAD CURRENT RECORD
task.checkRecordExists();
localResult = (OStorageOperationResult<Integer>) OScenarioThreadLocal.executeAsDistributed(new Callable() {
@Override
public Object call() throws Exception {
task.setLastLSN(wrapped.getLSN());
return wrapped.updateRecord(iRecordId, updateContent, iContent, iVersion, iRecordType, iMode, iCallback);
}
});
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw OException.wrapException(new ODistributedException("Cannot delete record " + iRecordId), e);
}
nodes.remove(localNodeName);
} else
localResult = null;
if (nodes.isEmpty()) {
unlockCallback.call(null);
if (!executedLocally)
throw new ODistributedException("Cannot execute distributed update on record " + iRecordId + " because no nodes are available");
} else {
final Integer localResultPayload = localResult != null ? localResult.getResult() : null;
if (syncMode || localResult == null) {
// REPLICATE IT
try {
final ODistributedResponse dResponse = dManager.sendRequest(getName(), clusterNames, nodes, task, dManager.getNextMessageIdCounter(), EXECUTION_MODE.RESPONSE, localResultPayload, unlockCallback);
final Object payload = dResponse.getPayload();
if (payload instanceof Exception) {
if (payload instanceof ORecordNotFoundException) {
// REPAIR THE RECORD IMMEDIATELY
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord((ORecordId) ((ORecordNotFoundException) payload).getRid());
}
executeUndoOnLocalServer(dResponse.getRequestId(), task);
if (payload instanceof ONeedRetryException)
throw (ONeedRetryException) payload;
throw OException.wrapException(new ODistributedException("Error on execution distributed update record"), (Exception) payload);
}
// UPDATE LOCALLY
return new OStorageOperationResult<Integer>((Integer) payload);
} catch (RuntimeException e) {
executeUndoOnLocalServer(null, task);
throw e;
} catch (Exception e) {
executeUndoOnLocalServer(null, task);
ODatabaseException.wrapException(new ODistributedException("Cannot execute distributed update record"), e);
}
}
// ASYNCHRONOUS CALL: EXECUTE LOCALLY AND THEN DISTRIBUTE
asynchronousExecution(new OAsynchDistributedOperation(getName(), Collections.singleton(clusterName), nodes, task, dManager.getNextMessageIdCounter(), localResultPayload, unlockCallback, null));
}
return localResult;
}
});
} catch (ONeedRetryException e) {
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord(iRecordId);
// PASS THROUGH
throw e;
} catch (HazelcastInstanceNotActiveException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (HazelcastException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (Exception e) {
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord(iRecordId);
handleDistributedException("Cannot route UPDATE_RECORD operation for %s to the distributed node", e, iRecordId);
// UNREACHABLE
return null;
}
}
use of com.orientechnologies.common.util.OCallable in project orientdb by orientechnologies.
the class ODistributedStorage method createRecord.
public OStorageOperationResult<OPhysicalPosition> createRecord(final ORecordId iRecordId, final byte[] iContent, final int iRecordVersion, final byte iRecordType, final int iMode, final ORecordCallback<Long> iCallback) {
resetLastValidBackup();
if (OScenarioThreadLocal.INSTANCE.isRunModeDistributed()) {
// ALREADY DISTRIBUTED
return wrapped.createRecord(iRecordId, iContent, iRecordVersion, iRecordType, iMode, iCallback);
}
checkLocalNodeIsAvailable();
checkClusterRebalanceIsNotRunning();
final String localNodeName = dManager.getLocalNodeName();
final ODistributedConfiguration dbCfg = distributedConfiguration;
// ASSIGN DESTINATION NODE
final int clusterId = iRecordId.getClusterId();
if (clusterId == ORID.CLUSTER_ID_INVALID)
throw new IllegalArgumentException("Cluster not valid");
checkNodeIsMaster(localNodeName, dbCfg);
final String clusterName = getClusterNameByRID(iRecordId);
checkWriteQuorum(dbCfg, clusterName, localNodeName);
try {
ODocument documentForClusterSelection = iRecordId.getRecord();
if (documentForClusterSelection == null) {
// DOCUMENT NOT FOUND: BUILD A TEMPORARY ONE
documentForClusterSelection = (ODocument) ORecordInternal.fill(new ODocument(), iRecordId, iRecordVersion, iContent, false);
}
checkForCluster(documentForClusterSelection, localNodeName, dbCfg);
final List<String> servers = dbCfg.getServers(clusterName, null);
if (servers.isEmpty())
// NO NODES: EXECUTE LOCALLY ONLY
return wrapped.createRecord(iRecordId, iContent, iRecordVersion, iRecordType, iMode, iCallback);
final String finalClusterName = clusterName;
final Set<String> clusterNames = Collections.singleton(finalClusterName);
// REMOVE CURRENT NODE BECAUSE IT HAS BEEN ALREADY EXECUTED LOCALLY
servers.remove(localNodeName);
Boolean executionModeSynch = dbCfg.isExecutionModeSynchronous(finalClusterName);
if (executionModeSynch == null)
executionModeSynch = iMode == 0;
final boolean syncMode = executionModeSynch;
// IN ANY CASE EXECUTE LOCALLY AND THEN DISTRIBUTE
return (OStorageOperationResult<OPhysicalPosition>) executeRecordOperationInLock(syncMode, iRecordId, new OCallable<Object, OCallable<Void, ODistributedRequestId>>() {
@Override
public Object call(OCallable<Void, ODistributedRequestId> unlockCallback) {
final OStorageOperationResult<OPhysicalPosition> localResult;
localResult = wrapped.createRecord(iRecordId, iContent, iRecordVersion, iRecordType, iMode, iCallback);
// UPDATE RID WITH NEW POSITION
iRecordId.setClusterPosition(localResult.getResult().clusterPosition);
final OPlaceholder localPlaceholder = new OPlaceholder(iRecordId, localResult.getResult().recordVersion);
final OCreateRecordTask task = new OCreateRecordTask(iRecordId, iContent, iRecordVersion, iRecordType);
task.setLastLSN(wrapped.getLSN());
if (!servers.isEmpty()) {
if (syncMode) {
// SYNCHRONOUS CALL: REPLICATE IT
try {
final ODistributedResponse dResponse = dManager.sendRequest(getName(), clusterNames, servers, task, dManager.getNextMessageIdCounter(), EXECUTION_MODE.RESPONSE, localPlaceholder, unlockCallback);
final Object payload = dResponse.getPayload();
if (payload != null) {
if (payload instanceof Exception) {
executeUndoOnLocalServer(dResponse.getRequestId(), task);
if (payload instanceof ONeedRetryException)
throw (ONeedRetryException) payload;
throw OException.wrapException(new ODistributedException("Error on execution distributed create record"), (Exception) payload);
}
// COPY THE CLUSTER POS -> RID
final OPlaceholder masterPlaceholder = (OPlaceholder) payload;
iRecordId.copyFrom(masterPlaceholder.getIdentity());
return new OStorageOperationResult<OPhysicalPosition>(new OPhysicalPosition(masterPlaceholder.getIdentity().getClusterPosition(), masterPlaceholder.getVersion()));
}
} catch (RuntimeException e) {
executeUndoOnLocalServer(null, task);
throw e;
} catch (Exception e) {
executeUndoOnLocalServer(null, task);
ODatabaseException.wrapException(new ODistributedException("Cannot execute distributed create record"), e);
}
} else {
// ASYNCHRONOUSLY REPLICATE IT TO ALL THE OTHER NODES
asynchronousExecution(new OAsynchDistributedOperation(getName(), Collections.singleton(finalClusterName), servers, task, dManager.getNextMessageIdCounter(), localPlaceholder, unlockCallback, null));
}
} else
unlockCallback.call(null);
return localResult;
}
});
} catch (ODistributedRecordLockedException e) {
// PASS THROUGH
throw e;
} catch (ONeedRetryException e) {
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord(iRecordId);
final ORecordId lockEntireCluster = iRecordId.copy();
lockEntireCluster.setClusterPosition(-1);
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord(lockEntireCluster);
// PASS THROUGH
throw e;
} catch (HazelcastInstanceNotActiveException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (HazelcastException e) {
throw new OOfflineNodeException("Hazelcast instance is not available");
} catch (Exception e) {
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord(iRecordId);
final ORecordId lockEntireCluster = iRecordId.copy();
lockEntireCluster.setClusterPosition(-1);
localDistributedDatabase.getDatabaseRepairer().enqueueRepairRecord(lockEntireCluster);
handleDistributedException("Cannot route create record operation for %s to the distributed node", e, iRecordId);
// UNREACHABLE
return null;
}
}
Aggregations