use of org.apache.ignite.internal.processors.cache.GridCacheReturnCompletableWrapper in project ignite by apache.
the class IgniteTxHandler method sendReply.
/**
* Sends tx finish response to remote node, if response is requested.
*
* @param nodeId Node id that originated finish request.
* @param req Request.
* @param committed {@code True} if transaction committed on this node.
* @param nearTxId Near tx version.
*/
private void sendReply(UUID nodeId, GridDhtTxFinishRequest req, boolean committed, GridCacheVersion nearTxId) {
if (req.replyRequired() || req.checkCommitted()) {
GridDhtTxFinishResponse res = new GridDhtTxFinishResponse(req.partition(), req.version(), req.futureId(), req.miniId());
if (req.checkCommitted()) {
res.checkCommitted(true);
if (committed) {
if (req.needReturnValue()) {
try {
GridCacheReturnCompletableWrapper wrapper = ctx.tm().getCommittedTxReturn(req.version());
if (wrapper != null)
res.returnValue(wrapper.fut().get());
else
assert !ctx.discovery().alive(nodeId) : nodeId;
} catch (IgniteCheckedException ignored) {
if (txFinishMsgLog.isDebugEnabled()) {
txFinishMsgLog.debug("Failed to gain entry processor return value. [txId=" + nearTxId + ", dhtTxId=" + req.version() + ", node=" + nodeId + ']');
}
}
}
} else {
ClusterTopologyCheckedException cause = new ClusterTopologyCheckedException("Primary node left grid.");
res.checkCommittedError(new IgniteTxRollbackCheckedException("Failed to commit transaction " + "(transaction has been rolled back on backup node): " + req.version(), cause));
}
}
try {
ctx.io().send(nodeId, res, req.policy());
if (txFinishMsgLog.isDebugEnabled()) {
txFinishMsgLog.debug("Sent dht tx finish response [txId=" + nearTxId + ", dhtTxId=" + req.version() + ", node=" + nodeId + ", checkCommitted=" + req.checkCommitted() + ']');
}
} catch (Throwable e) {
// Double-check.
if (ctx.discovery().node(nodeId) == null) {
if (txFinishMsgLog.isDebugEnabled()) {
txFinishMsgLog.debug("Node left while send dht tx finish response [txId=" + nearTxId + ", dhtTxId=" + req.version() + ", node=" + nodeId + ']');
}
} else {
U.error(log, "Failed to send finish response to node [txId=" + nearTxId + ", dhtTxId=" + req.version() + ", nodeId=" + nodeId + ", res=" + res + ']', e);
}
if (e instanceof Error)
throw (Error) e;
}
} else {
if (txFinishMsgLog.isDebugEnabled()) {
txFinishMsgLog.debug("Skip send dht tx finish response [txId=" + nearTxId + ", dhtTxId=" + req.version() + ", node=" + nodeId + ']');
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheReturnCompletableWrapper in project ignite by apache.
the class GridNearTxFinishFuture method checkBackup.
/**
*/
private void checkBackup() {
assert !hasFutures() : futures();
GridDistributedTxMapping mapping = mappings.singleMapping();
if (mapping != null) {
UUID nodeId = mapping.primary().id();
Collection<UUID> backups = tx.transactionNodes().get(nodeId);
if (!F.isEmpty(backups)) {
assert backups.size() == 1;
UUID backupId = F.first(backups);
ClusterNode backup = cctx.discovery().node(backupId);
// Nothing to do if backup has left the grid.
if (backup == null) {
readyNearMappingFromBackup(mapping);
ClusterTopologyCheckedException cause = new ClusterTopologyCheckedException("Backup node left grid: " + backupId);
cause.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));
onDone(new IgniteTxRollbackCheckedException("Failed to commit transaction " + "(backup has left grid): " + tx.xidVersion(), cause));
} else {
final CheckBackupMiniFuture mini = new CheckBackupMiniFuture(1, backup, mapping);
add(mini);
if (backup.isLocal()) {
boolean committed = !cctx.tm().addRolledbackTx(tx);
readyNearMappingFromBackup(mapping);
if (committed) {
try {
if (tx.needReturnValue() && tx.implicit()) {
GridCacheReturnCompletableWrapper wrapper = cctx.tm().getCommittedTxReturn(tx.xidVersion());
assert wrapper != null : tx.xidVersion();
GridCacheReturn retVal = wrapper.fut().get();
assert retVal != null;
tx.implicitSingleResult(retVal);
}
if (tx.syncMode() == FULL_SYNC) {
GridCacheVersion nearXidVer = tx.nearXidVersion();
assert nearXidVer != null : tx;
IgniteInternalFuture<?> fut = cctx.tm().remoteTxFinishFuture(nearXidVer);
fut.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> fut) {
mini.onDone(tx);
}
});
return;
}
mini.onDone(tx);
} catch (IgniteCheckedException e) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near finish fut, failed to finish [" + "txId=" + tx.nearXidVersion() + ", node=" + backup.id() + ", err=" + e + ']');
}
mini.onDone(e);
}
} else {
ClusterTopologyCheckedException cause = new ClusterTopologyCheckedException("Primary node left grid: " + nodeId);
cause.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));
mini.onDone(new IgniteTxRollbackCheckedException("Failed to commit transaction " + "(transaction has been rolled back on backup node): " + tx.xidVersion(), cause));
}
} else {
GridDhtTxFinishRequest finishReq = checkCommittedRequest(mini.futureId(), false);
try {
cctx.io().send(backup, finishReq, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near finish fut, sent check committed request [" + "txId=" + tx.nearXidVersion() + ", node=" + backup.id() + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
mini.onNodeLeft(backupId, false);
} catch (IgniteCheckedException e) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("Near finish fut, failed to send check committed request [" + "txId=" + tx.nearXidVersion() + ", node=" + backup.id() + ", err=" + e + ']');
}
mini.onDone(e);
}
}
}
} else
readyNearMappingFromBackup(mapping);
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheReturnCompletableWrapper in project ignite by apache.
the class GridDistributedTxRemoteAdapter method commitIfLocked.
/**
* @throws IgniteCheckedException If commit failed.
*/
private void commitIfLocked() throws IgniteCheckedException {
if (state() == COMMITTING) {
for (IgniteTxEntry txEntry : writeEntries()) {
assert txEntry != null : "Missing transaction entry for tx: " + this;
while (true) {
GridCacheEntryEx entry = txEntry.cached();
assert entry != null : "Missing cached entry for transaction entry: " + txEntry;
try {
GridCacheVersion ver = txEntry.explicitVersion() != null ? txEntry.explicitVersion() : xidVer;
// If locks haven't been acquired yet, keep waiting.
if (!entry.lockedBy(ver)) {
if (log.isDebugEnabled())
log.debug("Transaction does not own lock for entry (will wait) [entry=" + entry + ", tx=" + this + ']');
return;
}
// While.
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry while committing (will retry): " + txEntry);
try {
txEntry.cached(txEntry.context().cache().entryEx(txEntry.key(), topologyVersion()));
} catch (GridDhtInvalidPartitionException e) {
break;
}
}
}
}
// Only one thread gets to commit.
if (COMMIT_ALLOWED_UPD.compareAndSet(this, 0, 1)) {
IgniteCheckedException err = null;
Map<IgniteTxKey, IgniteTxEntry> writeMap = txState.writeMap();
GridCacheReturnCompletableWrapper wrapper = null;
if (!F.isEmpty(writeMap) || mvccSnapshot != null) {
GridCacheReturn ret = null;
if (!near() && !local() && onePhaseCommit()) {
if (needReturnValue()) {
ret = new GridCacheReturn(null, cctx.localNodeId().equals(otherNodeId()), true, null, null, true);
// Originating node.
UUID origNodeId = otherNodeId();
cctx.tm().addCommittedTxReturn(this, wrapper = new GridCacheReturnCompletableWrapper(!cctx.localNodeId().equals(origNodeId) ? origNodeId : null));
} else
cctx.tm().addCommittedTx(this, this.nearXidVersion(), null);
}
// Register this transaction as completed prior to write-phase to
// ensure proper lock ordering for removed entries.
cctx.tm().addCommittedTx(this);
AffinityTopologyVersion topVer = topologyVersion();
WALPointer ptr = null;
cctx.database().checkpointReadLock();
// Reserved partitions (necessary to prevent race due to updates in RENTING state).
Set<GridDhtLocalPartition> reservedParts = new HashSet<>();
try {
assert !txState.mvccEnabled() || mvccSnapshot != null : "Mvcc is not initialized: " + this;
Collection<IgniteTxEntry> entries = near() || cctx.snapshot().needTxReadLogging() ? allEntries() : writeEntries();
// Data entry to write to WAL and associated with it TxEntry.
List<T2<DataEntry, IgniteTxEntry>> dataEntries = null;
batchStoreCommit(writeMap().values());
// Node that for near transactions we grab all entries.
for (IgniteTxEntry txEntry : entries) {
GridCacheContext cacheCtx = txEntry.context();
// Prevent stale updates.
GridDhtLocalPartition locPart = cacheCtx.group().topology().localPartition(txEntry.cached().partition());
if (!near()) {
if (locPart == null)
continue;
if (!reservedParts.contains(locPart) && locPart.reserve()) {
assert locPart.state() != EVICTED && locPart.reservations() > 0 : locPart;
reservedParts.add(locPart);
}
if (locPart.state() == RENTING || locPart.state() == EVICTED) {
LT.warn(log(), "Skipping update to partition that is concurrently evicting " + "[grp=" + cacheCtx.group().cacheOrGroupName() + ", part=" + locPart + "]");
continue;
}
}
boolean replicate = cacheCtx.isDrEnabled();
while (true) {
try {
GridCacheEntryEx cached = txEntry.cached();
if (cached == null)
txEntry.cached(cached = cacheCtx.cache().entryEx(txEntry.key(), topologyVersion()));
if (near() && cacheCtx.dr().receiveEnabled()) {
cached.markObsolete(xidVer);
break;
}
GridNearCacheEntry nearCached = null;
if (updateNearCache(cacheCtx, txEntry.key(), topVer))
nearCached = cacheCtx.dht().near().peekExx(txEntry.key());
if (!F.isEmpty(txEntry.entryProcessors()))
txEntry.cached().unswap(false);
IgniteBiTuple<GridCacheOperation, CacheObject> res = applyTransformClosures(txEntry, false, ret);
GridCacheOperation op = res.get1();
CacheObject val = res.get2();
GridCacheVersion explicitVer = txEntry.conflictVersion();
if (explicitVer == null)
explicitVer = writeVersion();
if (txEntry.ttl() == CU.TTL_ZERO)
op = DELETE;
boolean conflictNeedResolve = cacheCtx.conflictNeedResolve();
GridCacheVersionConflictContext conflictCtx = null;
if (conflictNeedResolve) {
IgniteBiTuple<GridCacheOperation, GridCacheVersionConflictContext> drRes = conflictResolve(op, txEntry, val, explicitVer, cached);
assert drRes != null;
conflictCtx = drRes.get2();
if (conflictCtx.isUseOld())
op = NOOP;
else if (conflictCtx.isUseNew()) {
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
} else if (conflictCtx.isMerge()) {
op = drRes.get1();
val = txEntry.context().toCacheObject(conflictCtx.mergeValue());
explicitVer = writeVersion();
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
}
} else
// Nullify explicit version so that innerSet/innerRemove will work as usual.
explicitVer = null;
GridCacheVersion dhtVer = cached.isNear() ? writeVersion() : null;
if (!near() && cacheCtx.group().persistenceEnabled() && cacheCtx.group().walEnabled() && op != NOOP && op != RELOAD && (op != READ || cctx.snapshot().needTxReadLogging())) {
if (dataEntries == null)
dataEntries = new ArrayList<>(entries.size());
dataEntries.add(new T2<>(new DataEntry(cacheCtx.cacheId(), txEntry.key(), val, op, nearXidVersion(), addConflictVersion(writeVersion(), txEntry.conflictVersion()), 0, txEntry.key().partition(), txEntry.updateCounter(), DataEntry.flags(CU.txOnPrimary(this))), txEntry));
}
if (op == CREATE || op == UPDATE) {
// Invalidate only for near nodes (backups cannot be invalidated).
if (isSystemInvalidate() || (isInvalidate() && cacheCtx.isNear()))
cached.innerRemove(this, eventNodeId(), nodeId, false, true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, near() ? null : explicitVer, resolveTaskName(), dhtVer, txEntry.updateCounter());
else {
assert val != null : txEntry;
GridCacheUpdateTxResult updRes = cached.innerSet(this, eventNodeId(), nodeId, val, false, false, txEntry.ttl(), true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, txEntry.conflictExpireTime(), near() ? null : explicitVer, resolveTaskName(), dhtVer, txEntry.updateCounter());
txEntry.updateCounter(updRes.updateCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
// Keep near entry up to date.
if (nearCached != null) {
CacheObject val0 = cached.valueBytes();
nearCached.updateOrEvict(xidVer, val0, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
}
} else if (op == DELETE) {
GridCacheUpdateTxResult updRes = cached.innerRemove(this, eventNodeId(), nodeId, false, true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, near() ? null : explicitVer, resolveTaskName(), dhtVer, txEntry.updateCounter());
txEntry.updateCounter(updRes.updateCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
// Keep near entry up to date.
if (nearCached != null)
nearCached.updateOrEvict(xidVer, null, 0, 0, nodeId, topVer);
} else if (op == RELOAD) {
CacheObject reloaded = cached.innerReload();
if (nearCached != null) {
nearCached.innerReload();
nearCached.updateOrEvict(cached.version(), reloaded, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
} else if (op == READ) {
assert near();
if (log.isDebugEnabled())
log.debug("Ignoring READ entry when committing: " + txEntry);
} else // No-op.
{
if (conflictCtx == null || !conflictCtx.isUseOld()) {
if (txEntry.ttl() != CU.TTL_NOT_CHANGED)
cached.updateTtl(null, txEntry.ttl());
if (nearCached != null) {
CacheObject val0 = cached.valueBytes();
nearCached.updateOrEvict(xidVer, val0, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
}
}
// that if we replaced removed entries.
assert txEntry.op() == READ || onePhaseCommit() || // and we simply allow the commit to proceed.
!cached.hasLockCandidateUnsafe(xidVer) || cached.lockedByUnsafe(xidVer) : "Transaction does not own lock for commit [entry=" + cached + ", tx=" + this + ']';
// Break out of while loop.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Attempting to commit a removed entry (will retry): " + txEntry);
// Renew cached entry.
txEntry.cached(cacheCtx.cache().entryEx(txEntry.key(), topologyVersion()));
}
}
}
// Apply cache size deltas.
applyTxSizes();
TxCounters txCntrs = txCounters(false);
// Apply update counters.
if (txCntrs != null)
cctx.tm().txHandler().applyPartitionsUpdatesCounters(txCntrs.updateCounters());
cctx.mvccCaching().onTxFinished(this, true);
if (!near() && !F.isEmpty(dataEntries) && cctx.wal() != null) {
// Set new update counters for data entries received from persisted tx entries.
List<DataEntry> entriesWithCounters = dataEntries.stream().map(tuple -> tuple.get1().partitionCounter(tuple.get2().updateCounter())).collect(Collectors.toList());
ptr = cctx.wal().log(new DataRecord(entriesWithCounters));
}
if (ptr != null)
cctx.wal().flush(ptr, false);
} catch (Throwable ex) {
state(UNKNOWN);
if (X.hasCause(ex, NodeStoppingException.class)) {
U.warn(log, "Failed to commit transaction, node is stopping [tx=" + CU.txString(this) + ", err=" + ex + ']');
return;
}
err = heuristicException(ex);
try {
uncommit();
} catch (Throwable e) {
err.addSuppressed(e);
}
throw err;
} finally {
for (GridDhtLocalPartition locPart : reservedParts) locPart.release();
cctx.database().checkpointReadUnlock();
if (wrapper != null)
wrapper.initialize(ret);
}
}
cctx.tm().commitTx(this);
state(COMMITTED);
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheReturnCompletableWrapper in project ignite by apache.
the class IgniteTxManager method start0.
/**
* {@inheritDoc}
*/
@Override
protected void start0() throws IgniteCheckedException {
txHnd = new IgniteTxHandler(cctx);
deferredAckMsgSnd = new GridDeferredAckMessageSender<GridCacheVersion>(cctx.time(), cctx.kernalContext().closure()) {
@Override
public int getTimeout() {
return DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_TIMEOUT;
}
@Override
public int getBufferSize() {
return DEFERRED_ONE_PHASE_COMMIT_ACK_REQUEST_BUFFER_SIZE;
}
@Override
public void finish(UUID nodeId, Collection<GridCacheVersion> vers) {
GridDhtTxOnePhaseCommitAckRequest ackReq = new GridDhtTxOnePhaseCommitAckRequest(vers);
cctx.kernalContext().gateway().readLock();
try {
cctx.io().send(nodeId, ackReq, GridIoPolicy.SYSTEM_POOL);
} catch (ClusterTopologyCheckedException ignored) {
if (log.isDebugEnabled())
log.debug("Failed to send one phase commit ack to backup node because it left grid: " + nodeId);
} catch (IgniteCheckedException e) {
log.error("Failed to send one phase commit ack to backup node [backup=" + nodeId + ']', e);
} finally {
cctx.kernalContext().gateway().readUnlock();
}
}
};
cctx.gridEvents().addDiscoveryEventListener(new TransactionRecoveryListener(), EVT_NODE_FAILED, EVT_NODE_LEFT);
cctx.gridEvents().addDiscoveryEventListener(new DiscoveryEventListener() {
@Override
public void onEvent(DiscoveryEvent evt, DiscoCache discoCache) {
if (evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT) {
UUID nodeId = evt.eventNode().id();
for (TxDeadlockFuture fut : deadlockDetectFuts.values()) fut.onNodeLeft(nodeId);
for (Map.Entry<GridCacheVersion, Object> entry : completedVersHashMap.entrySet()) {
Object obj = entry.getValue();
if (obj instanceof GridCacheReturnCompletableWrapper && nodeId.equals(((GridCacheReturnCompletableWrapper) obj).nodeId()))
removeTxReturn(entry.getKey());
}
}
suspendResumeForPessimisticSupported = IgniteFeatures.allNodesSupports(cctx.discovery().remoteNodes(), IgniteFeatures.SUSPEND_RESUME_PESSIMISTIC_TX);
}
}, EVT_NODE_FAILED, EVT_NODE_LEFT, EVT_NODE_JOINED);
this.txDeadlockDetection = new TxDeadlockDetection(cctx);
cctx.gridIO().addMessageListener(TOPIC_TX, new DeadlockDetectionListener());
cctx.txMetrics().onTxManagerStarted();
keyCollisionsInfo = new KeyCollisionsHolder();
distributedTransactionConfiguration = new DistributedTransactionConfiguration(cctx.kernalContext(), log, (String name, Long oldVal, Long newVal) -> {
if (!Objects.equals(oldVal, newVal)) {
scheduleDumpTask(IGNITE_LONG_OPERATIONS_DUMP_TIMEOUT, () -> cctx.kernalContext().closure().runLocalSafe(() -> cctx.kernalContext().cache().context().exchange().dumpLongRunningOperations(newVal)), newVal);
}
}, (String name, Integer oldVal, Integer newVal) -> {
if (!Objects.equals(oldVal, newVal)) {
scheduleDumpTask(IGNITE_DUMP_TX_COLLISIONS_INTERVAL, this::collectTxCollisionsInfo, newVal);
}
});
cctx.kernalContext().systemView().registerView(TXS_MON_LIST, TXS_MON_LIST_DESC, new TransactionViewWalker(), new ReadOnlyCollectionView2X<>(idMap.values(), nearIdMap.values()), TransactionView::new);
}
use of org.apache.ignite.internal.processors.cache.GridCacheReturnCompletableWrapper in project ignite by apache.
the class IgniteTxManager method getCommittedTxReturn.
/**
* @param xidVer xidVer Completed transaction version.
* @return Tx result.
*/
public GridCacheReturnCompletableWrapper getCommittedTxReturn(GridCacheVersion xidVer) {
Object retVal = completedVersHashMap.get(xidVer);
// Will gain true in regular case or GridCacheReturn in onePhaseCommit case.
if (!Boolean.TRUE.equals(retVal)) {
// Method should be used only after 'committed' checked.
assert !Boolean.FALSE.equals(retVal);
GridCacheReturnCompletableWrapper res = (GridCacheReturnCompletableWrapper) retVal;
removeTxReturn(xidVer);
return res;
} else
return null;
}
Aggregations