use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDhtTxPrepareFuture method onEntriesLocked.
/**
*/
private void onEntriesLocked() {
ret = new GridCacheReturn(null, tx.localResult(), true, null, true);
for (IgniteTxEntry writeEntry : req.writes()) {
IgniteTxEntry txEntry = tx.entry(writeEntry.txKey());
assert txEntry != null : writeEntry;
GridCacheContext cacheCtx = txEntry.context();
GridCacheEntryEx cached = txEntry.cached();
ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(txEntry);
cctx.database().checkpointReadLock();
try {
if ((txEntry.op() == CREATE || txEntry.op() == UPDATE) && txEntry.conflictExpireTime() == CU.EXPIRE_TIME_CALCULATE) {
if (expiry != null) {
cached.unswap(true);
Duration duration = cached.hasValue() ? expiry.getExpiryForUpdate() : expiry.getExpiryForCreation();
txEntry.ttl(CU.toTtl(duration));
}
}
boolean hasFilters = !F.isEmptyOrNulls(txEntry.filters()) && !F.isAlwaysTrue(txEntry.filters());
CacheObject val;
CacheObject oldVal = null;
boolean readOld = hasFilters || retVal || txEntry.op() == DELETE || txEntry.op() == TRANSFORM || tx.nearOnOriginatingNode() || tx.hasInterceptor();
if (readOld) {
boolean readThrough = !txEntry.skipStore() && (txEntry.op() == TRANSFORM || ((retVal || hasFilters) && cacheCtx.config().isLoadPreviousValue()));
boolean evt = retVal || txEntry.op() == TRANSFORM;
EntryProcessor entryProc = null;
if (evt && txEntry.op() == TRANSFORM)
entryProc = F.first(txEntry.entryProcessors()).get1();
final boolean keepBinary = txEntry.keepBinary();
val = oldVal = cached.innerGet(null, tx, readThrough, /*metrics*/
retVal, /*event*/
evt, tx.subjectId(), entryProc, tx.resolveTaskName(), null, keepBinary);
if (retVal || txEntry.op() == TRANSFORM) {
if (!F.isEmpty(txEntry.entryProcessors())) {
invoke = true;
if (txEntry.hasValue())
val = txEntry.value();
KeyCacheObject key = txEntry.key();
Object procRes = null;
Exception err = null;
boolean modified = false;
txEntry.oldValueOnPrimary(val != null);
for (T2<EntryProcessor<Object, Object, Object>, Object[]> t : txEntry.entryProcessors()) {
CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry<>(key, val, txEntry.cached().version(), keepBinary, txEntry.cached());
try {
EntryProcessor<Object, Object, Object> processor = t.get1();
procRes = processor.process(invokeEntry, t.get2());
val = cacheCtx.toCacheObject(invokeEntry.getValue(true));
if (// no validation for remove case
val != null)
cacheCtx.validateKeyAndValue(key, val);
} catch (Exception e) {
err = e;
break;
}
modified |= invokeEntry.modified();
}
if (modified)
val = cacheCtx.toCacheObject(cacheCtx.unwrapTemporary(val));
GridCacheOperation op = modified ? (val == null ? DELETE : UPDATE) : NOOP;
if (op == NOOP) {
if (expiry != null) {
long ttl = CU.toTtl(expiry.getExpiryForAccess());
txEntry.ttl(ttl);
if (ttl == CU.TTL_ZERO)
op = DELETE;
}
}
txEntry.entryProcessorCalculatedValue(new T2<>(op, op == NOOP ? null : val));
if (retVal) {
if (err != null || procRes != null)
ret.addEntryProcessResult(txEntry.context(), key, null, procRes, err, keepBinary);
else
ret.invokeResult(true);
}
} else if (retVal)
ret.value(cacheCtx, val, keepBinary);
}
if (hasFilters && !cacheCtx.isAll(cached, txEntry.filters())) {
if (expiry != null)
txEntry.ttl(CU.toTtl(expiry.getExpiryForAccess()));
txEntry.op(GridCacheOperation.NOOP);
if (filterFailedKeys == null)
filterFailedKeys = new ArrayList<>();
filterFailedKeys.add(cached.txKey());
ret.success(false);
} else
ret.success(txEntry.op() != DELETE || cached.hasValue());
}
// Send old value in case if rebalancing is not finished.
final boolean sndOldVal = !cacheCtx.isLocal() && !cacheCtx.topology().rebalanceFinished(tx.topologyVersion());
if (sndOldVal) {
if (oldVal == null && !readOld) {
oldVal = cached.innerGet(null, tx, /*readThrough*/
false, /*metrics*/
false, /*event*/
false, /*subjectId*/
tx.subjectId(), /*transformClo*/
null, /*taskName*/
null, /*expiryPlc*/
null, /*keepBinary*/
true);
}
if (oldVal != null)
oldVal.prepareMarshal(cacheCtx.cacheObjectContext());
txEntry.oldValue(oldVal);
}
} catch (IgniteCheckedException e) {
U.error(log, "Failed to get result value for cache entry: " + cached, e);
} catch (GridCacheEntryRemovedException e) {
assert false : "Got entry removed exception while holding transactional lock on entry [e=" + e + ", cached=" + cached + ']';
} finally {
cctx.database().checkpointReadUnlock();
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDhtTxPrepareFuture method readyLocks.
/**
* @param checkEntries Entries.
*/
private void readyLocks(Iterable<IgniteTxEntry> checkEntries) {
for (IgniteTxEntry txEntry : checkEntries) {
GridCacheContext cacheCtx = txEntry.context();
if (cacheCtx.isLocal())
continue;
GridDistributedCacheEntry entry = (GridDistributedCacheEntry) txEntry.cached();
if (entry == null) {
entry = (GridDistributedCacheEntry) cacheCtx.cache().entryEx(txEntry.key(), tx.topologyVersion());
txEntry.cached(entry);
}
if (tx.optimistic() && txEntry.explicitVersion() == null) {
synchronized (this) {
lockKeys.add(txEntry.txKey());
}
}
while (true) {
try {
assert txEntry.explicitVersion() == null || entry.lockedBy(txEntry.explicitVersion());
CacheLockCandidates owners = entry.readyLock(tx.xidVersion());
if (log.isDebugEnabled())
log.debug("Current lock owners for entry [owner=" + owners + ", entry=" + entry + ']');
// While.
break;
}// Possible if entry cached within transaction is obsolete.
catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in future onAllReplies method (will retry): " + txEntry);
entry = (GridDistributedCacheEntry) cacheCtx.cache().entryEx(txEntry.key(), tx.topologyVersion());
txEntry.cached(entry);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridPartitionedGetFuture method localGet.
/**
* @param topVer Topology version.
* @param key Key.
* @param part Partition.
* @param locVals Local values.
* @return {@code True} if there is no need to further search value.
*/
private boolean localGet(AffinityTopologyVersion topVer, KeyCacheObject key, int part, Map<K, V> locVals) {
assert cctx.affinityNode() : this;
GridDhtCacheAdapter<K, V> cache = cache();
boolean readNoEntry = cctx.readNoEntry(expiryPlc, false);
boolean evt = !skipVals;
while (true) {
try {
boolean skipEntry = readNoEntry;
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (readNoEntry) {
CacheDataRow row = cctx.offheap().read(cctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
v = row.value();
if (needVer)
ver = row.version();
if (evt) {
cctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
} else
skipEntry = false;
}
}
if (!skipEntry) {
GridCacheEntryEx entry = cache.entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary);
}
cache.context().evicts().touch(entry, topVer);
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
if (isNew && entry.markObsoleteIfEmpty(ver))
cache.removeEntry(entry);
}
}
}
if (v != null) {
cctx.addResult(locVals, key, v, skipVals, keepCacheObjects, deserializeBinary, true, getRes, ver, 0, 0, needVer);
return true;
}
boolean topStable = cctx.isReplicated() || topVer.equals(cctx.topology().lastTopologyChangeVersion());
// Entry not found, do not continue search if topology did not change and there is no store.
if (!cctx.readThroughConfigured() && (topStable || partitionOwned(part))) {
if (!skipVals && cctx.statisticsEnabled())
cache.metrics0().onRead(false);
return true;
}
return false;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, will retry.
} catch (GridDhtInvalidPartitionException ignored) {
return false;
} catch (IgniteCheckedException e) {
onDone(e);
return true;
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridNearTransactionalCache method startRemoteTx.
/**
* @param nodeId Primary node ID.
* @param req Request.
* @return Remote transaction.
* @throws IgniteCheckedException If failed.
* @throws GridDistributedLockCancelledException If lock has been cancelled.
*/
@SuppressWarnings({ "RedundantTypeArguments", "ForLoopReplaceableByForEach" })
@Nullable
public GridNearTxRemote startRemoteTx(UUID nodeId, GridDhtLockRequest req) throws IgniteCheckedException, GridDistributedLockCancelledException {
List<KeyCacheObject> nearKeys = req.nearKeys();
GridNearTxRemote tx = null;
ClassLoader ldr = ctx.deploy().globalLoader();
if (ldr != null) {
Collection<IgniteTxKey> evicted = null;
// Avoid iterator creation.
for (int i = 0; i < nearKeys.size(); i++) {
KeyCacheObject key = nearKeys.get(i);
if (key == null)
continue;
IgniteTxKey txKey = ctx.txKey(key);
if (log.isDebugEnabled())
log.debug("Unmarshalled key: " + key);
GridNearCacheEntry entry = null;
while (true) {
try {
entry = peekExx(key);
if (entry != null) {
// Handle implicit locks for pessimistic transactions.
if (req.inTx()) {
tx = ctx.tm().nearTx(req.version());
if (tx == null) {
tx = new GridNearTxRemote(ctx.shared(), req.topologyVersion(), nodeId, req.nearNodeId(), req.nearXidVersion(), req.version(), null, ctx.systemTx(), ctx.ioPolicy(), PESSIMISTIC, req.isolation(), req.isInvalidate(), req.timeout(), req.txSize(), req.subjectId(), req.taskNameHash());
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !ctx.tm().onStarted(tx))
throw new IgniteTxRollbackCheckedException("Failed to acquire lock " + "(transaction has been completed): " + req.version());
}
tx.addEntry(ctx, txKey, GridCacheOperation.NOOP, null, /*Value.*/
null, /*dr version*/
req.skipStore(), req.keepBinary());
}
// Add remote candidate before reordering.
// Owned candidates should be reordered inside entry lock.
entry.addRemote(req.nodeId(), nodeId, req.threadId(), req.version(), tx != null, tx != null && tx.implicitSingle(), req.owned(entry.key()));
if (!req.inTx())
ctx.evicts().touch(entry, req.topologyVersion());
} else {
if (evicted == null)
evicted = new LinkedList<>();
evicted.add(txKey);
}
// Double-check in case if sender node left the grid.
if (ctx.discovery().node(req.nodeId()) == null) {
if (log.isDebugEnabled())
log.debug("Node requesting lock left grid (lock request will be ignored): " + req);
if (tx != null)
tx.rollbackRemoteTx();
return null;
}
// Entry is legit.
break;
} catch (GridCacheEntryRemovedException ignored) {
assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry;
if (log.isDebugEnabled())
log.debug("Received entry removed exception (will retry on renewed entry): " + entry);
if (tx != null) {
tx.clearEntry(txKey);
if (log.isDebugEnabled())
log.debug("Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']');
}
}
}
}
if (tx != null && evicted != null) {
assert !evicted.isEmpty();
for (IgniteTxKey evict : evicted) tx.addEvicted(evict);
}
} else {
String err = "Failed to acquire deployment class loader for message: " + req;
U.warn(log, err);
throw new IgniteCheckedException(err);
}
return tx;
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridNearTransactionalCache method removeLocks.
/**
* Removes locks regardless of whether they are owned or not for given
* version and keys.
*
* @param ver Lock version.
* @param keys Keys.
*/
@SuppressWarnings({ "unchecked" })
public void removeLocks(GridCacheVersion ver, Collection<KeyCacheObject> keys) {
if (keys.isEmpty())
return;
try {
int keyCnt = -1;
Map<ClusterNode, GridNearUnlockRequest> map = null;
for (KeyCacheObject key : keys) {
// Send request to remove from remote nodes.
GridNearUnlockRequest req = null;
while (true) {
GridDistributedCacheEntry entry = peekExx(key);
try {
if (entry != null) {
GridCacheMvccCandidate cand = entry.candidate(ver);
if (cand != null) {
if (map == null) {
Collection<ClusterNode> affNodes = CU.affinityNodes(ctx, cand.topologyVersion());
if (F.isEmpty(affNodes))
return;
keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());
map = U.newHashMap(affNodes.size());
}
ClusterNode primary = ctx.affinity().primaryByKey(key, cand.topologyVersion());
if (primary == null) {
if (log.isDebugEnabled())
log.debug("Failed to unlock key (all partition nodes left the grid).");
break;
}
if (!primary.isLocal()) {
req = map.get(primary);
if (req == null) {
map.put(primary, req = new GridNearUnlockRequest(ctx.cacheId(), keyCnt, ctx.deploymentEnabled()));
req.version(ver);
}
}
// Remove candidate from local node first.
if (entry.removeLock(cand.version())) {
if (primary.isLocal()) {
dht.removeLocks(primary.id(), ver, F.asList(key), true);
assert req == null;
continue;
}
req.addKey(entry.key(), ctx);
}
}
}
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Attempted to remove lock from removed entry (will retry) [rmvVer=" + ver + ", entry=" + entry + ']');
}
}
}
if (map == null || map.isEmpty())
return;
IgnitePair<Collection<GridCacheVersion>> versPair = ctx.tm().versions(ver);
Collection<GridCacheVersion> committed = versPair.get1();
Collection<GridCacheVersion> rolledback = versPair.get2();
for (Map.Entry<ClusterNode, GridNearUnlockRequest> mapping : map.entrySet()) {
ClusterNode n = mapping.getKey();
GridDistributedUnlockRequest req = mapping.getValue();
if (!F.isEmpty(req.keys())) {
req.completedVersions(committed, rolledback);
// We don't wait for reply to this message.
ctx.io().send(n, req, ctx.ioPolicy());
}
}
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
}
}
Aggregations