use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDhtAtomicCache method updatePartialBatch.
/**
* @param hasNear {@code True} if originating node has near cache.
* @param firstEntryIdx Index of the first entry in the request keys collection.
* @param entries Entries to update.
* @param ver Version to set.
* @param nearNode Originating node.
* @param writeVals Write values.
* @param putMap Values to put.
* @param rmvKeys Keys to remove.
* @param entryProcessorMap Entry processors.
* @param dhtFut DHT update future if has backups.
* @param req Request.
* @param res Response.
* @param replicate Whether replication is enabled.
* @param batchRes Batch update result.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @return Deleted entries.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
@Nullable
private GridDhtAtomicAbstractUpdateFuture updatePartialBatch(final boolean hasNear, final int firstEntryIdx, final List<GridDhtCacheEntry> entries, final GridCacheVersion ver, final ClusterNode nearNode, @Nullable final List<CacheObject> writeVals, @Nullable final Map<KeyCacheObject, CacheObject> putMap, @Nullable final Collection<KeyCacheObject> rmvKeys, @Nullable final Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final boolean replicate, final UpdateBatchResult batchRes, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) {
assert putMap == null ^ rmvKeys == null;
assert req.conflictVersions() == null : "Cannot be called when there are conflict entries in the batch.";
AffinityTopologyVersion topVer = req.topologyVersion();
boolean checkReaders = hasNear || ctx.discovery().hasNearCache(ctx.cacheId(), topVer);
CacheStorePartialUpdateException storeErr = null;
try {
GridCacheOperation op;
if (putMap != null) {
try {
Map<? extends KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> view = F.viewReadOnly(putMap, new C1<CacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>>() {
@Override
public IgniteBiTuple<? extends CacheObject, GridCacheVersion> apply(CacheObject val) {
return F.t(val, ver);
}
});
ctx.store().putAll(null, view);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = UPDATE;
} else {
try {
ctx.store().removeAll(null, rmvKeys);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = DELETE;
}
boolean intercept = ctx.config().getInterceptor() != null;
AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
// Avoid iterator creation.
for (int i = 0; i < entries.size(); i++) {
GridDhtCacheEntry entry = entries.get(i);
assert Thread.holdsLock(entry);
if (entry.obsolete()) {
assert req.operation() == DELETE : "Entry can become obsolete only after remove: " + entry;
continue;
}
if (storeErr != null && storeErr.failedKeys().contains(entry.key().value(ctx.cacheObjectContext(), false)))
continue;
try {
// We are holding java-level locks on entries at this point.
CacheObject writeVal = op == UPDATE ? writeVals.get(i) : null;
assert writeVal != null || op == DELETE : "null write value found.";
Collection<UUID> readers = null;
Collection<UUID> filteredReaders = null;
if (checkReaders) {
readers = entry.readers();
filteredReaders = F.view(entry.readers(), F.notEqualTo(nearNode.id()));
}
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, null, /*write-through*/
false, /*read-through*/
false, /*retval*/
sndPrevVal, req.keepBinary(), expiry, /*event*/
true, /*metrics*/
true, /*primary*/
true, /*verCheck*/
false, topVer, null, replicate ? DR_PRIMARY : DR_NONE, CU.TTL_NOT_CHANGED, CU.EXPIRE_TIME_CALCULATE, null, /*conflict resolve*/
false, /*intercept*/
false, req.subjectId(), taskName, null, null, dhtFut);
assert !updRes.success() || updRes.newTtl() == CU.TTL_NOT_CHANGED || expiry != null : "success=" + updRes.success() + ", newTtl=" + updRes.newTtl() + ", expiry=" + expiry;
if (intercept) {
if (op == UPDATE) {
ctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(ctx, entry.key(), updRes.newValue(), req.keepBinary()));
} else {
assert op == DELETE : op;
// Old value should be already loaded for 'CacheInterceptor.onBeforeRemove'.
ctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(ctx, entry.key(), updRes.oldValue(), req.keepBinary()));
}
}
batchRes.addDeleted(entry, updRes, entries);
if (dhtFut != null) {
EntryProcessor<Object, Object, Object> entryProcessor = entryProcessorMap == null ? null : entryProcessorMap.get(entry.key());
dhtFut.addWriteEntry(affAssignment, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE, null, sndPrevVal, updRes.oldValue(), updRes.updateCounter());
if (!F.isEmpty(filteredReaders))
dhtFut.addNearWriteEntries(filteredReaders, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
}
if (hasNear) {
if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
int idx = firstEntryIdx + i;
if (req.operation() == TRANSFORM) {
res.addNearValue(idx, writeVal, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
} else
res.addNearTtl(idx, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
if (writeVal != null || entry.hasValue()) {
IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
assert f == null : f;
}
} else if (// Reader became primary or backup.
readers.contains(nearNode.id()))
entry.removeReader(nearNode.id(), req.messageId());
else
res.addSkippedIndex(firstEntryIdx + i);
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry cannot become obsolete while holding lock.";
e.printStackTrace();
}
}
} catch (IgniteCheckedException e) {
res.addFailedKeys(putMap != null ? putMap.keySet() : rmvKeys, e);
}
if (storeErr != null) {
ArrayList<KeyCacheObject> failed = new ArrayList<>(storeErr.failedKeys().size());
for (Object failedKey : storeErr.failedKeys()) failed.add(ctx.toCacheKeyObject(failedKey));
res.addFailedKeys(failed, storeErr.getCause());
}
return dhtFut;
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class IgniteTxAdapter method applyTransformClosures.
/**
* @param txEntry Entry to process.
* @param metrics {@code True} if metrics should be updated.
* @param ret Optional return value to initialize.
* @return Tuple containing transformation results.
* @throws IgniteCheckedException If failed to get previous value for transform.
* @throws GridCacheEntryRemovedException If entry was concurrently deleted.
*/
protected IgniteBiTuple<GridCacheOperation, CacheObject> applyTransformClosures(IgniteTxEntry txEntry, boolean metrics, @Nullable GridCacheReturn ret) throws GridCacheEntryRemovedException, IgniteCheckedException {
assert txEntry.op() != TRANSFORM || !F.isEmpty(txEntry.entryProcessors()) : txEntry;
GridCacheContext cacheCtx = txEntry.context();
assert cacheCtx != null;
if (isSystemInvalidate())
return F.t(cacheCtx.writeThrough() ? RELOAD : DELETE, null);
if (F.isEmpty(txEntry.entryProcessors())) {
if (ret != null)
ret.value(cacheCtx, txEntry.value(), txEntry.keepBinary());
return F.t(txEntry.op(), txEntry.value());
} else {
T2<GridCacheOperation, CacheObject> calcVal = txEntry.entryProcessorCalculatedValue();
if (calcVal != null)
return calcVal;
boolean recordEvt = cctx.gridEvents().isRecordable(EVT_CACHE_OBJECT_READ);
final boolean keepBinary = txEntry.keepBinary();
CacheObject cacheVal;
if (txEntry.hasValue())
cacheVal = txEntry.value();
else if (txEntry.hasOldValue())
cacheVal = txEntry.oldValue();
else {
cacheVal = txEntry.cached().innerGet(null, this, /*read through*/
false, /*metrics*/
metrics, /*event*/
recordEvt, /*subjId*/
subjId, /*closure name */
recordEvt ? F.first(txEntry.entryProcessors()).get1() : null, resolveTaskName(), null, keepBinary);
}
boolean modified = false;
Object val = null;
Object key = null;
GridCacheVersion ver;
try {
ver = txEntry.cached().version();
} catch (GridCacheEntryRemovedException e) {
assert optimistic() : txEntry;
if (log.isDebugEnabled())
log.debug("Failed to get entry version: [msg=" + e.getMessage() + ']');
ver = null;
}
for (T2<EntryProcessor<Object, Object, Object>, Object[]> t : txEntry.entryProcessors()) {
CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry<>(txEntry.key(), key, cacheVal, val, ver, keepBinary, txEntry.cached());
Object procRes = null;
Exception err = null;
try {
EntryProcessor<Object, Object, Object> processor = t.get1();
procRes = processor.process(invokeEntry, t.get2());
val = invokeEntry.getValue();
key = invokeEntry.key();
} catch (Exception e) {
err = e;
}
if (ret != null) {
if (err != null || procRes != null)
ret.addEntryProcessResult(txEntry.context(), txEntry.key(), null, procRes, err, keepBinary);
else
ret.invokeResult(true);
}
modified |= invokeEntry.modified();
}
if (modified)
cacheVal = cacheCtx.toCacheObject(cacheCtx.unwrapTemporary(val));
GridCacheOperation op = modified ? (cacheVal == null ? DELETE : UPDATE) : NOOP;
if (op == NOOP) {
ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(txEntry);
if (expiry != null) {
long ttl = CU.toTtl(expiry.getExpiryForAccess());
txEntry.ttl(ttl);
if (ttl == CU.TTL_ZERO)
op = DELETE;
}
}
return F.t(op, cacheVal);
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDistributedTxRemoteAdapter method commitIfLocked.
/**
* @throws IgniteCheckedException If commit failed.
*/
@SuppressWarnings({ "CatchGenericClass" })
private void commitIfLocked() throws IgniteCheckedException {
if (state() == COMMITTING) {
for (IgniteTxEntry txEntry : writeEntries()) {
assert txEntry != null : "Missing transaction entry for tx: " + this;
while (true) {
GridCacheEntryEx entry = txEntry.cached();
assert entry != null : "Missing cached entry for transaction entry: " + txEntry;
try {
GridCacheVersion ver = txEntry.explicitVersion() != null ? txEntry.explicitVersion() : xidVer;
// If locks haven't been acquired yet, keep waiting.
if (!entry.lockedBy(ver)) {
if (log.isDebugEnabled())
log.debug("Transaction does not own lock for entry (will wait) [entry=" + entry + ", tx=" + this + ']');
return;
}
// While.
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry while committing (will retry): " + txEntry);
txEntry.cached(txEntry.context().cache().entryEx(txEntry.key(), topologyVersion()));
}
}
}
// Only one thread gets to commit.
if (COMMIT_ALLOWED_UPD.compareAndSet(this, 0, 1)) {
IgniteCheckedException err = null;
Map<IgniteTxKey, IgniteTxEntry> writeMap = txState.writeMap();
GridCacheReturnCompletableWrapper wrapper = null;
if (!F.isEmpty(writeMap)) {
GridCacheReturn ret = null;
if (!near() && !local() && onePhaseCommit()) {
if (needReturnValue()) {
ret = new GridCacheReturn(null, cctx.localNodeId().equals(otherNodeId()), true, null, true);
// Originating node.
UUID origNodeId = otherNodeId();
cctx.tm().addCommittedTxReturn(this, wrapper = new GridCacheReturnCompletableWrapper(!cctx.localNodeId().equals(origNodeId) ? origNodeId : null));
} else
cctx.tm().addCommittedTx(this, this.nearXidVersion(), null);
}
// Register this transaction as completed prior to write-phase to
// ensure proper lock ordering for removed entries.
cctx.tm().addCommittedTx(this);
AffinityTopologyVersion topVer = topologyVersion();
WALPointer ptr = null;
cctx.database().checkpointReadLock();
try {
Collection<IgniteTxEntry> entries = near() || cctx.snapshot().needTxReadLogging() ? allEntries() : writeEntries();
List<DataEntry> dataEntries = null;
batchStoreCommit(writeMap().values());
try {
// Node that for near transactions we grab all entries.
for (IgniteTxEntry txEntry : entries) {
GridCacheContext cacheCtx = txEntry.context();
boolean replicate = cacheCtx.isDrEnabled();
try {
while (true) {
try {
GridCacheEntryEx cached = txEntry.cached();
if (cached == null)
txEntry.cached(cached = cacheCtx.cache().entryEx(txEntry.key(), topologyVersion()));
if (near() && cacheCtx.dr().receiveEnabled()) {
cached.markObsolete(xidVer);
break;
}
GridNearCacheEntry nearCached = null;
if (updateNearCache(cacheCtx, txEntry.key(), topVer))
nearCached = cacheCtx.dht().near().peekExx(txEntry.key());
if (!F.isEmpty(txEntry.entryProcessors()))
txEntry.cached().unswap(false);
IgniteBiTuple<GridCacheOperation, CacheObject> res = applyTransformClosures(txEntry, false, ret);
GridCacheOperation op = res.get1();
CacheObject val = res.get2();
GridCacheVersion explicitVer = txEntry.conflictVersion();
if (explicitVer == null)
explicitVer = writeVersion();
if (txEntry.ttl() == CU.TTL_ZERO)
op = DELETE;
boolean conflictNeedResolve = cacheCtx.conflictNeedResolve();
GridCacheVersionConflictContext conflictCtx = null;
if (conflictNeedResolve) {
IgniteBiTuple<GridCacheOperation, GridCacheVersionConflictContext> drRes = conflictResolve(op, txEntry, val, explicitVer, cached);
assert drRes != null;
conflictCtx = drRes.get2();
if (conflictCtx.isUseOld())
op = NOOP;
else if (conflictCtx.isUseNew()) {
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
} else if (conflictCtx.isMerge()) {
op = drRes.get1();
val = txEntry.context().toCacheObject(conflictCtx.mergeValue());
explicitVer = writeVersion();
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
}
} else
// Nullify explicit version so that innerSet/innerRemove will work as usual.
explicitVer = null;
GridCacheVersion dhtVer = cached.isNear() ? writeVersion() : null;
if (!near() && cacheCtx.group().persistenceEnabled() && cacheCtx.group().walEnabled() && op != NOOP && op != RELOAD && (op != READ || cctx.snapshot().needTxReadLogging())) {
if (dataEntries == null)
dataEntries = new ArrayList<>(entries.size());
dataEntries.add(new DataEntry(cacheCtx.cacheId(), txEntry.key(), val, op, nearXidVersion(), writeVersion(), 0, txEntry.key().partition(), txEntry.updateCounter()));
}
if (op == CREATE || op == UPDATE) {
// Invalidate only for near nodes (backups cannot be invalidated).
if (isSystemInvalidate() || (isInvalidate() && cacheCtx.isNear()))
cached.innerRemove(this, eventNodeId(), nodeId, false, true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, near() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, txEntry.updateCounter());
else {
assert val != null : txEntry;
GridCacheUpdateTxResult updRes = cached.innerSet(this, eventNodeId(), nodeId, val, false, false, txEntry.ttl(), true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, txEntry.conflictExpireTime(), near() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, txEntry.updateCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
// Keep near entry up to date.
if (nearCached != null) {
CacheObject val0 = cached.valueBytes();
nearCached.updateOrEvict(xidVer, val0, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
}
} else if (op == DELETE) {
GridCacheUpdateTxResult updRes = cached.innerRemove(this, eventNodeId(), nodeId, false, true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, near() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, txEntry.updateCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
// Keep near entry up to date.
if (nearCached != null)
nearCached.updateOrEvict(xidVer, null, 0, 0, nodeId, topVer);
} else if (op == RELOAD) {
CacheObject reloaded = cached.innerReload();
if (nearCached != null) {
nearCached.innerReload();
nearCached.updateOrEvict(cached.version(), reloaded, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
} else if (op == READ) {
assert near();
if (log.isDebugEnabled())
log.debug("Ignoring READ entry when committing: " + txEntry);
} else // No-op.
{
if (conflictCtx == null || !conflictCtx.isUseOld()) {
if (txEntry.ttl() != CU.TTL_NOT_CHANGED)
cached.updateTtl(null, txEntry.ttl());
if (nearCached != null) {
CacheObject val0 = cached.valueBytes();
nearCached.updateOrEvict(xidVer, val0, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
}
}
// that if we replaced removed entries.
assert txEntry.op() == READ || onePhaseCommit() || // and we simply allow the commit to proceed.
!cached.hasLockCandidateUnsafe(xidVer) || cached.lockedByUnsafe(xidVer) : "Transaction does not own lock for commit [entry=" + cached + ", tx=" + this + ']';
// Break out of while loop.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Attempting to commit a removed entry (will retry): " + txEntry);
// Renew cached entry.
txEntry.cached(cacheCtx.cache().entryEx(txEntry.key(), topologyVersion()));
}
}
} catch (Throwable ex) {
boolean nodeStopping = X.hasCause(ex, NodeStoppingException.class);
// In case of error, we still make the best effort to commit,
// as there is no way to rollback at this point.
err = new IgniteTxHeuristicCheckedException("Commit produced a runtime exception " + "(all transaction entries will be invalidated): " + CU.txString(this), ex);
if (nodeStopping) {
U.warn(log, "Failed to commit transaction, node is stopping [tx=" + this + ", err=" + ex + ']');
} else
U.error(log, "Commit failed.", err);
uncommit(nodeStopping);
state(UNKNOWN);
if (ex instanceof Error)
throw (Error) ex;
}
}
if (!near() && !F.isEmpty(dataEntries) && cctx.wal() != null)
cctx.wal().log(new DataRecord(dataEntries));
if (ptr != null && !cctx.tm().logTxRecords())
cctx.wal().fsync(ptr);
} catch (StorageException e) {
throw new IgniteCheckedException("Failed to log transaction record " + "(transaction will be rolled back): " + this, e);
}
} finally {
cctx.database().checkpointReadUnlock();
if (wrapper != null)
wrapper.initialize(ret);
}
}
if (err != null) {
state(UNKNOWN);
throw err;
}
cctx.tm().commitTx(this);
state(COMMITTED);
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDhtTxPrepareFuture method map.
/**
* @param entry Transaction entry.
*/
private void map(IgniteTxEntry entry) {
if (entry.cached().isLocal())
return;
GridDhtCacheEntry cached = (GridDhtCacheEntry) entry.cached();
GridCacheContext cacheCtx = entry.context();
GridDhtCacheAdapter<?, ?> dht = cacheCtx.isNear() ? cacheCtx.near().dht() : cacheCtx.dht();
ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(entry);
if (expiry != null && (entry.op() == READ || entry.op() == NOOP)) {
entry.op(NOOP);
entry.ttl(CU.toTtl(expiry.getExpiryForAccess()));
}
while (true) {
try {
List<ClusterNode> dhtNodes = dht.topology().nodes(cached.partition(), tx.topologyVersion());
assert !dhtNodes.isEmpty() && dhtNodes.get(0).id().equals(cctx.localNodeId()) : "localNode = " + cctx.localNodeId() + ", dhtNodes = " + dhtNodes;
if (log.isDebugEnabled())
log.debug("Mapping entry to DHT nodes [nodes=" + U.toShortString(dhtNodes) + ", entry=" + entry + ']');
for (int i = 1; i < dhtNodes.size(); i++) {
ClusterNode node = dhtNodes.get(i);
addMapping(entry, node, dhtMap);
}
Collection<UUID> readers = cached.readers();
if (!F.isEmpty(readers)) {
for (UUID readerId : readers) {
if (readerId.equals(tx.nearNodeId()))
continue;
ClusterNode readerNode = cctx.discovery().node(readerId);
if (readerNode == null || canSkipNearReader(dht, readerNode, dhtNodes))
continue;
if (log.isDebugEnabled())
log.debug("Mapping entry to near node [node=" + readerNode + ", entry=" + entry + ']');
addMapping(entry, readerNode, nearMap);
}
} else if (log.isDebugEnabled())
log.debug("Entry has no near readers: " + entry);
break;
} catch (GridCacheEntryRemovedException ignore) {
cached = dht.entryExx(entry.key(), tx.topologyVersion());
entry.cached(cached);
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDhtTxPrepareFuture method sendPrepareRequests.
/**
*/
private void sendPrepareRequests() {
if (tx.onePhaseCommit() && !tx.nearMap().isEmpty()) {
for (GridDistributedTxMapping nearMapping : tx.nearMap().values()) {
if (!tx.dhtMap().containsKey(nearMapping.primary().id())) {
tx.onePhaseCommit(false);
break;
}
}
}
int miniId = 0;
assert tx.transactionNodes() != null;
final long timeout = timeoutObj != null ? timeoutObj.timeout : 0;
// Create mini futures.
for (GridDistributedTxMapping dhtMapping : tx.dhtMap().values()) {
assert !dhtMapping.empty();
ClusterNode n = dhtMapping.primary();
assert !n.isLocal();
GridDistributedTxMapping nearMapping = tx.nearMap().get(n.id());
Collection<IgniteTxEntry> nearWrites = nearMapping == null ? null : nearMapping.writes();
Collection<IgniteTxEntry> dhtWrites = dhtMapping.writes();
if (F.isEmpty(dhtWrites) && F.isEmpty(nearWrites))
continue;
if (tx.remainingTime() == -1)
return;
MiniFuture fut = new MiniFuture(n.id(), ++miniId, dhtMapping, nearMapping);
// Append new future.
add(fut);
assert req.transactionNodes() != null;
GridDhtTxPrepareRequest req = new GridDhtTxPrepareRequest(futId, fut.futureId(), tx.topologyVersion(), tx, timeout, dhtWrites, nearWrites, this.req.transactionNodes(), tx.nearXidVersion(), true, tx.onePhaseCommit(), tx.subjectId(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled(), tx.storeWriteThrough(), retVal);
int idx = 0;
for (IgniteTxEntry entry : dhtWrites) {
try {
GridDhtCacheEntry cached = (GridDhtCacheEntry) entry.cached();
GridCacheContext<?, ?> cacheCtx = cached.context();
// Do not invalidate near entry on originating transaction node.
req.invalidateNearEntry(idx, !tx.nearNodeId().equals(n.id()) && cached.readerId(n.id()) != null);
if (cached.isNewLocked()) {
List<ClusterNode> owners = cacheCtx.topology().owners(cached.partition(), tx != null ? tx.topologyVersion() : cacheCtx.affinity().affinityTopologyVersion());
// Do not preload if local node is a partition owner.
if (!owners.contains(cctx.localNode()))
req.markKeyForPreload(idx);
}
break;
} catch (GridCacheEntryRemovedException ignore) {
assert false : "Got removed exception on entry with dht local candidate: " + entry;
}
idx++;
}
if (!F.isEmpty(nearWrites)) {
for (IgniteTxEntry entry : nearWrites) {
try {
if (entry.explicitVersion() == null) {
GridCacheMvccCandidate added = entry.cached().candidate(version());
assert added != null : "Missing candidate for cache entry:" + entry;
assert added.dhtLocal();
if (added.ownerVersion() != null)
req.owned(entry.txKey(), added.ownerVersion());
}
break;
} catch (GridCacheEntryRemovedException ignore) {
assert false : "Got removed exception on entry with dht local candidate: " + entry;
}
}
}
assert req.transactionNodes() != null;
try {
cctx.io().send(n, req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, sent request dht [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
fut.onNodeLeft();
} catch (IgniteCheckedException e) {
if (!cctx.kernalContext().isStopping()) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, failed to send request dht [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ']');
}
fut.onResult(e);
} else {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, failed to send request dht, ignore [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ", err=" + e + ']');
}
}
}
}
for (GridDistributedTxMapping nearMapping : tx.nearMap().values()) {
if (!tx.dhtMap().containsKey(nearMapping.primary().id())) {
if (tx.remainingTime() == -1)
return;
MiniFuture fut = new MiniFuture(nearMapping.primary().id(), ++miniId, null, nearMapping);
// Append new future.
add(fut);
GridDhtTxPrepareRequest req = new GridDhtTxPrepareRequest(futId, fut.futureId(), tx.topologyVersion(), tx, timeout, null, nearMapping.writes(), tx.transactionNodes(), tx.nearXidVersion(), true, tx.onePhaseCommit(), tx.subjectId(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled(), tx.storeWriteThrough(), retVal);
for (IgniteTxEntry entry : nearMapping.entries()) {
if (CU.writes().apply(entry)) {
try {
if (entry.explicitVersion() == null) {
GridCacheMvccCandidate added = entry.cached().candidate(version());
assert added != null : "Null candidate for non-group-lock entry " + "[added=" + added + ", entry=" + entry + ']';
assert added.dhtLocal() : "Got non-dht-local candidate for prepare future" + "[added=" + added + ", entry=" + entry + ']';
if (added != null && added.ownerVersion() != null)
req.owned(entry.txKey(), added.ownerVersion());
}
break;
} catch (GridCacheEntryRemovedException ignore) {
assert false : "Got removed exception on entry with dht local candidate: " + entry;
}
}
}
assert req.transactionNodes() != null;
try {
cctx.io().send(nearMapping.primary(), req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, sent request near [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
fut.onNodeLeft();
} catch (IgniteCheckedException e) {
if (!cctx.kernalContext().isStopping()) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, failed to send request near [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ']');
}
fut.onResult(e);
} else {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT prepare fut, failed to send request near, ignore [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ", err=" + e + ']');
}
}
}
}
}
}
Aggregations