use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDhtColocatedLockFuture method map0.
/**
* @param keys Keys to map.
* @param remap Remap flag.
* @param topLocked Topology locked flag.
* @throws IgniteCheckedException If mapping failed.
*/
private synchronized void map0(Collection<KeyCacheObject> keys, boolean remap, boolean topLocked) throws IgniteCheckedException {
AffinityTopologyVersion topVer = this.topVer;
assert topVer != null;
assert topVer.topologyVersion() > 0;
if (CU.affinityNodes(cctx, topVer).isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for cache " + "(all partition nodes left the grid): " + cctx.name()));
return;
}
boolean clientNode = cctx.kernalContext().clientNode();
assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
// First assume this node is primary for all keys passed in.
if (!clientNode && mapAsPrimary(keys, topVer))
return;
mappings = new ArrayDeque<>();
// Assign keys to primary nodes.
GridNearLockMapping map = null;
for (KeyCacheObject key : keys) {
GridNearLockMapping updated = map(key, map, topVer);
// If new mapping was created, add to collection.
if (updated != map) {
mappings.add(updated);
if (tx != null && updated.node().isLocal())
tx.colocatedLocallyMapped(true);
}
map = updated;
}
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done: " + this);
return;
}
if (log.isDebugEnabled())
log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
boolean hasRmtNodes = false;
boolean first = true;
// Create mini futures.
for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
GridNearLockMapping mapping = iter.next();
ClusterNode node = mapping.node();
Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
boolean loc = node.equals(cctx.localNode());
assert !mappedKeys.isEmpty();
GridNearLockRequest req = null;
Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
for (KeyCacheObject key : mappedKeys) {
IgniteTxKey txKey = cctx.txKey(key);
GridDistributedCacheEntry entry = null;
if (tx != null) {
IgniteTxEntry txEntry = tx.entry(txKey);
if (txEntry != null) {
entry = (GridDistributedCacheEntry) txEntry.cached();
if (entry != null && loc == entry.detached()) {
entry = cctx.colocated().entryExx(key, topVer, true);
txEntry.cached(entry);
}
}
}
boolean explicit;
while (true) {
try {
if (entry == null)
entry = cctx.colocated().entryExx(key, topVer, true);
if (!cctx.isAll(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry being locked did not pass filter (will not lock): " + entry);
onComplete(false, false, true);
return;
}
assert loc ^ entry.detached() : "Invalid entry [loc=" + loc + ", entry=" + entry + ']';
GridCacheMvccCandidate cand = addEntry(entry);
// Will either return value from dht cache or null if this is a miss.
IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.detached() ? null : ((GridDhtCacheEntry) entry).versionedValue(topVer);
GridCacheVersion dhtVer = null;
if (val != null) {
dhtVer = val.get1();
valMap.put(key, val);
}
if (cand != null && !cand.reentry()) {
if (req == null) {
boolean clientFirst = false;
if (first) {
clientFirst = clientNode && !topLocked && (tx == null || !tx.hasRemoteLocks());
first = false;
}
assert !implicitTx() && !implicitSingleTx() : tx;
req = new GridNearLockRequest(cctx.cacheId(), topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), read, retval, isolation(), isInvalidate(), timeout, mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() && tx.syncMode() == FULL_SYNC, inTx() ? tx.subjectId() : null, inTx() ? tx.taskNameHash() : 0, read ? createTtl : -1L, read ? accessTtl : -1L, skipStore, keepBinary, clientFirst, false, cctx.deploymentEnabled());
mapping.request(req);
}
distributedKeys.add(key);
if (tx != null)
tx.addKeyMapping(txKey, mapping.node());
req.addKeyBytes(key, retval, // Include DHT version to match remote DHT entry.
dhtVer, cctx);
}
explicit = inTx() && cand == null;
if (explicit)
tx.addKeyMapping(txKey, mapping.node());
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
entry = null;
}
}
// Mark mapping explicit lock flag.
if (explicit) {
boolean marked = tx != null && tx.markExplicit(node.id());
assert tx == null || marked;
}
}
if (!distributedKeys.isEmpty()) {
mapping.distributedKeys(distributedKeys);
hasRmtNodes |= !mapping.node().isLocal();
} else {
assert mapping.request() == null;
iter.remove();
}
}
if (hasRmtNodes) {
trackable = true;
if (!remap && !cctx.mvcc().addFuture(this))
throw new IllegalStateException("Duplicate future ID: " + this);
} else
trackable = false;
proceedMapping();
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridNearTransactionalCache method clearLocks.
/**
* @param nodeId Node ID.
* @param req Request.
*/
@SuppressWarnings({ "RedundantTypeArguments" })
public void clearLocks(UUID nodeId, GridDhtUnlockRequest req) {
assert nodeId != null;
GridCacheVersion obsoleteVer = ctx.versions().next();
List<KeyCacheObject> keys = req.nearKeys();
if (keys != null) {
AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
for (KeyCacheObject key : keys) {
while (true) {
GridDistributedCacheEntry entry = peekExx(key);
try {
if (entry != null) {
entry.doneRemote(req.version(), req.version(), null, req.committedVersions(), req.rolledbackVersions(), /*system invalidate*/
false);
// we are about to remove.
if (entry.removeLock(req.version())) {
if (log.isDebugEnabled())
log.debug("Removed lock [lockId=" + req.version() + ", key=" + key + ']');
// Try to evict near entry dht-mapped locally.
evictNearEntry(entry, obsoleteVer, topVer);
} else {
if (log.isDebugEnabled())
log.debug("Received unlock request for unknown candidate " + "(added to cancelled locks set): " + req);
}
ctx.evicts().touch(entry, topVer);
} else if (log.isDebugEnabled())
log.debug("Received unlock request for entry that could not be found: " + req);
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Received remove lock request for removed entry (will retry) [entry=" + entry + ", req=" + req + ']');
}
}
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridNearTransactionalCache method unlockAll.
/**
* {@inheritDoc}
*/
@Override
public void unlockAll(Collection<? extends K> keys) {
if (keys.isEmpty())
return;
try {
GridCacheVersion ver = null;
int keyCnt = -1;
Map<ClusterNode, GridNearUnlockRequest> map = null;
Collection<KeyCacheObject> locKeys = new LinkedList<>();
for (K key : keys) {
while (true) {
KeyCacheObject cacheKey = ctx.toCacheKeyObject(key);
GridDistributedCacheEntry entry = peekExx(cacheKey);
if (entry == null)
// While.
break;
try {
GridCacheMvccCandidate cand = entry.candidate(ctx.nodeId(), Thread.currentThread().getId());
AffinityTopologyVersion topVer = AffinityTopologyVersion.NONE;
if (cand != null) {
assert cand.nearLocal() : "Got non-near-local candidate in near cache: " + cand;
ver = cand.version();
if (map == null) {
Collection<ClusterNode> affNodes = CU.affinityNodes(ctx, cand.topologyVersion());
if (F.isEmpty(affNodes))
return;
keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());
map = U.newHashMap(affNodes.size());
}
topVer = cand.topologyVersion();
// Send request to remove from remote nodes.
ClusterNode primary = ctx.affinity().primaryByKey(key, topVer);
if (primary == null) {
if (log.isDebugEnabled())
log.debug("Failed to unlock key (all partition nodes left the grid).");
break;
}
GridNearUnlockRequest req = map.get(primary);
if (req == null) {
map.put(primary, req = new GridNearUnlockRequest(ctx.cacheId(), keyCnt, ctx.deploymentEnabled()));
req.version(ver);
}
// Remove candidate from local node first.
GridCacheMvccCandidate rmv = entry.removeLock();
if (rmv != null) {
if (!rmv.reentry()) {
if (ver != null && !ver.equals(rmv.version()))
throw new IgniteCheckedException("Failed to unlock (if keys were locked separately, " + "then they need to be unlocked separately): " + keys);
if (!primary.isLocal()) {
assert req != null;
req.addKey(entry.key(), ctx);
} else
locKeys.add(cacheKey);
if (log.isDebugEnabled())
log.debug("Removed lock (will distribute): " + rmv);
} else if (log.isDebugEnabled())
log.debug("Current thread still owns lock (or there are no other nodes)" + " [lock=" + rmv + ", curThreadId=" + Thread.currentThread().getId() + ']');
}
}
assert !topVer.equals(AffinityTopologyVersion.NONE) || cand == null;
if (topVer.equals(AffinityTopologyVersion.NONE))
topVer = ctx.affinity().affinityTopologyVersion();
ctx.evicts().touch(entry, topVer);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Attempted to unlock removed entry (will retry): " + entry);
}
}
}
if (ver == null)
return;
for (Map.Entry<ClusterNode, GridNearUnlockRequest> mapping : map.entrySet()) {
ClusterNode n = mapping.getKey();
GridDistributedUnlockRequest req = mapping.getValue();
if (n.isLocal())
dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true);
else if (!F.isEmpty(req.keys()))
// We don't wait for reply to this message.
ctx.io().send(n, req, ctx.ioPolicy());
}
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridLocalAtomicCache method getAllInternal.
/**
* Entry point to all public API get methods.
*
* @param keys Keys to remove.
* @param storeEnabled Store enabled flag.
* @param taskName Task name.
* @param deserializeBinary Deserialize binary .
* @param skipVals Skip value flag.
* @param needVer Need version.
* @return Key-value map.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings("ConstantConditions")
private Map<K, V> getAllInternal(@Nullable Collection<? extends K> keys, boolean storeEnabled, String taskName, boolean deserializeBinary, boolean skipVals, boolean needVer) throws IgniteCheckedException {
ctx.checkSecurity(SecurityPermission.CACHE_READ);
if (F.isEmpty(keys))
return Collections.emptyMap();
CacheOperationContext opCtx = ctx.operationContextPerCall();
UUID subjId = ctx.subjectIdPerCall(null, opCtx);
Map<K, V> vals = U.newHashMap(keys.size());
if (keyCheck)
validateCacheKeys(keys);
final IgniteCacheExpiryPolicy expiry = expiryPolicy(opCtx != null ? opCtx.expiry() : null);
boolean success = true;
boolean readNoEntry = ctx.readNoEntry(expiry, false);
final boolean evt = !skipVals;
for (K key : keys) {
if (key == null)
throw new NullPointerException("Null key.");
KeyCacheObject cacheKey = ctx.toCacheKeyObject(key);
boolean skipEntry = readNoEntry;
if (readNoEntry) {
CacheDataRow row = ctx.offheap().read(ctx, cacheKey);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
ctx.addResult(vals, cacheKey, row.value(), skipVals, false, deserializeBinary, true, null, row.version(), 0, 0, needVer);
if (ctx.statisticsEnabled() && !skipVals)
metrics0().onRead(true);
if (evt) {
ctx.events().readEvent(cacheKey, null, row.value(), subjId, taskName, !deserializeBinary);
}
} else
skipEntry = false;
} else
success = false;
}
if (!skipEntry) {
GridCacheEntryEx entry = null;
while (true) {
try {
entry = entryEx(cacheKey);
if (entry != null) {
CacheObject v;
if (needVer) {
EntryGetResult res = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiry, !deserializeBinary, null);
if (res != null) {
ctx.addResult(vals, cacheKey, res, skipVals, false, deserializeBinary, true, needVer);
} else
success = false;
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
true, /*event*/
evt, subjId, null, taskName, expiry, !deserializeBinary);
if (v != null) {
ctx.addResult(vals, cacheKey, v, skipVals, false, deserializeBinary, true, null, 0, 0);
} else
success = false;
}
}
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, retry.
} finally {
if (entry != null)
ctx.evicts().touch(entry, ctx.affinity().affinityTopologyVersion());
}
if (!success && storeEnabled)
break;
}
}
if (!success) {
if (!storeEnabled && ctx.statisticsEnabled() && !skipVals)
metrics0().onRead(false);
}
}
if (success || !storeEnabled)
return vals;
return getAllAsync(keys, null, opCtx == null || !opCtx.skipStore(), false, subjId, taskName, deserializeBinary, opCtx != null && opCtx.recovery(), /*force primary*/
false, expiry, skipVals, needVer).get();
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDhtAtomicCache method updateAllAsyncInternal0.
/**
* Executes local update after preloader fetched values.
*
* @param nodeId Node ID.
* @param req Update request.
* @param completionCb Completion callback.
*/
private void updateAllAsyncInternal0(UUID nodeId, GridNearAtomicAbstractUpdateRequest req, UpdateReplyClosure completionCb) {
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null) {
U.warn(msgLog, "Skip near update request, node originated update request left [" + "futId=" + req.futureId() + ", node=" + nodeId + ']');
return;
}
GridNearAtomicUpdateResponse res = new GridNearAtomicUpdateResponse(ctx.cacheId(), nodeId, req.futureId(), req.partition(), false, ctx.deploymentEnabled());
assert !req.returnValue() || (req.operation() == TRANSFORM || req.size() == 1);
GridDhtAtomicAbstractUpdateFuture dhtFut = null;
boolean remap = false;
String taskName = ctx.kernalContext().task().resolveTaskName(req.taskNameHash());
IgniteCacheExpiryPolicy expiry = null;
try {
// If batch store update is enabled, we need to lock all entries.
// First, need to acquire locks on cache entries, then check filter.
List<GridDhtCacheEntry> locked = null;
Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
try {
GridDhtPartitionTopology top = topology();
top.readLock();
try {
if (top.stopping()) {
res.addFailedKeys(req.keys(), new IgniteCheckedException("Failed to perform cache operation " + "(cache is stopped): " + name()));
completionCb.apply(req, res);
return;
}
// external transaction or explicit lock.
if (req.topologyLocked() || !needRemap(req.topologyVersion(), top.topologyVersion())) {
ctx.shared().database().ensureFreeSpace(ctx.memoryPolicy());
locked = lockEntries(req, req.topologyVersion());
boolean hasNear = ctx.discovery().cacheNearNode(node, name());
// Assign next version for update inside entries lock.
GridCacheVersion ver = ctx.versions().next(top.topologyVersion());
if (hasNear)
res.nearVersion(ver);
if (msgLog.isDebugEnabled()) {
msgLog.debug("Assigned update version [futId=" + req.futureId() + ", writeVer=" + ver + ']');
}
assert ver != null : "Got null version for update request: " + req;
boolean sndPrevVal = !top.rebalanceFinished(req.topologyVersion());
dhtFut = createDhtFuture(ver, req);
expiry = expiryPolicy(req.expiry());
GridCacheReturn retVal = null;
if (// Several keys ...
req.size() > 1 && writeThrough() && // and store is enabled ...
!req.skipStore() && // and this is not local store ...
!ctx.store().isLocal() && // (conflict resolver should be used for local store)
!// and no DR.
ctx.dr().receiveEnabled()) {
// This method can only be used when there are no replicated entries in the batch.
UpdateBatchResult updRes = updateWithBatch(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
deleted = updRes.deleted();
dhtFut = updRes.dhtFuture();
if (req.operation() == TRANSFORM)
retVal = updRes.invokeResults();
} else {
UpdateSingleResult updRes = updateSingle(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
retVal = updRes.returnValue();
deleted = updRes.deleted();
dhtFut = updRes.dhtFuture();
}
if (retVal == null)
retVal = new GridCacheReturn(ctx, node.isLocal(), true, null, true);
res.returnValue(retVal);
if (dhtFut != null) {
if (req.writeSynchronizationMode() == PRIMARY_SYNC && // To avoid deadlock disable back-pressure for sender data node.
!ctx.discovery().cacheAffinityNode(ctx.discovery().node(nodeId), ctx.name()) && !dhtFut.isDone()) {
final IgniteRunnable tracker = GridNioBackPressureControl.threadTracker();
if (tracker != null && tracker instanceof GridNioMessageTracker) {
((GridNioMessageTracker) tracker).onMessageReceived();
dhtFut.listen(new IgniteInClosure<IgniteInternalFuture<Void>>() {
@Override
public void apply(IgniteInternalFuture<Void> fut) {
((GridNioMessageTracker) tracker).onMessageProcessed();
}
});
}
}
ctx.mvcc().addAtomicFuture(dhtFut.id(), dhtFut);
}
} else {
// Should remap all keys.
remap = true;
res.remapTopologyVersion(top.topologyVersion());
}
} finally {
top.readUnlock();
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry should not become obsolete while holding lock.";
e.printStackTrace();
} finally {
if (locked != null)
unlockEntries(locked, req.topologyVersion());
// Enqueue if necessary after locks release.
if (deleted != null) {
assert !deleted.isEmpty();
assert ctx.deferredDelete() : this;
for (IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion> e : deleted) ctx.onDeferredDelete(e.get1(), e.get2());
}
// TODO fire events only after successful fsync
if (ctx.shared().wal() != null)
ctx.shared().wal().fsync(null);
}
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Caught invalid partition exception for cache entry (will remap update request): " + req);
remap = true;
res.remapTopologyVersion(ctx.topology().topologyVersion());
} catch (Throwable e) {
// At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is
// an attempt to use cleaned resources.
U.error(log, "Unexpected exception during cache update", e);
res.addFailedKeys(req.keys(), e);
completionCb.apply(req, res);
if (e instanceof Error)
throw (Error) e;
return;
}
if (remap) {
assert dhtFut == null;
completionCb.apply(req, res);
} else if (dhtFut != null)
dhtFut.map(node, res.returnValue(), res, completionCb);
if (req.writeSynchronizationMode() != FULL_ASYNC)
req.cleanup(!node.isLocal());
sendTtlUpdateRequest(expiry);
}
Aggregations