use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDhtPartitionDemander method preloadEntry.
/**
* Adds {@code entry} to partition {@code p}.
*
* @param from Node which sent entry.
* @param p Partition id.
* @param entry Preloaded entry.
* @param topVer Topology version.
* @return {@code False} if partition has become invalid during preloading.
* @throws IgniteInterruptedCheckedException If interrupted.
*/
private boolean preloadEntry(ClusterNode from, int p, GridCacheEntryInfo entry, AffinityTopologyVersion topVer) throws IgniteCheckedException {
ctx.database().checkpointReadLock();
try {
GridCacheEntryEx cached = null;
try {
GridCacheContext cctx = grp.sharedGroup() ? ctx.cacheContext(entry.cacheId()) : grp.singleCacheContext();
cached = cctx.dhtCache().entryEx(entry.key());
if (log.isDebugEnabled())
log.debug("Rebalancing key [key=" + entry.key() + ", part=" + p + ", node=" + from.id() + ']');
cctx.shared().database().checkpointReadLock();
try {
if (preloadPred == null || preloadPred.apply(entry)) {
if (cached.initialValue(entry.value(), entry.version(), entry.ttl(), entry.expireTime(), true, topVer, cctx.isDrEnabled() ? DR_PRELOAD : DR_NONE, false)) {
// Start tracking.
cctx.evicts().touch(cached, topVer);
if (cctx.events().isRecordable(EVT_CACHE_REBALANCE_OBJECT_LOADED) && !cached.isInternal())
cctx.events().addEvent(cached.partition(), cached.key(), cctx.localNodeId(), (IgniteUuid) null, null, EVT_CACHE_REBALANCE_OBJECT_LOADED, entry.value(), true, null, false, null, null, null, true);
} else {
// Start tracking.
cctx.evicts().touch(cached, topVer);
if (log.isDebugEnabled())
log.debug("Rebalancing entry is already in cache (will ignore) [key=" + cached.key() + ", part=" + p + ']');
}
} else if (log.isDebugEnabled())
log.debug("Rebalance predicate evaluated to false for entry (will ignore): " + entry);
} finally {
cctx.shared().database().checkpointReadUnlock();
}
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Entry has been concurrently removed while rebalancing (will ignore) [key=" + cached.key() + ", part=" + p + ']');
} catch (GridDhtInvalidPartitionException ignored) {
if (log.isDebugEnabled())
log.debug("Partition became invalid during rebalancing (will ignore): " + p);
return false;
}
} catch (IgniteInterruptedCheckedException e) {
throw e;
} catch (IgniteCheckedException e) {
throw new IgniteCheckedException("Failed to cache rebalanced entry (will stop rebalancing) [local=" + ctx.localNode() + ", node=" + from.id() + ", key=" + entry.key() + ", part=" + p + ']', e);
} finally {
ctx.database().checkpointReadUnlock();
}
return true;
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridNearLockFuture method proceedMapping0.
/**
* Gets next near lock mapping and either acquires dht locks locally or sends near lock request to
* remote primary node.
*
* @throws IgniteCheckedException If mapping can not be completed.
*/
@SuppressWarnings("unchecked")
private void proceedMapping0() throws IgniteCheckedException {
GridNearLockMapping map;
synchronized (this) {
map = mappings.poll();
}
// If there are no more mappings to process, complete the future.
if (map == null)
return;
final GridNearLockRequest req = map.request();
final Collection<KeyCacheObject> mappedKeys = map.distributedKeys();
final ClusterNode node = map.node();
if (filter != null && filter.length != 0)
req.filter(filter, cctx);
if (node.isLocal()) {
req.miniId(-1);
if (log.isDebugEnabled())
log.debug("Before locally locking near request: " + req);
IgniteInternalFuture<GridNearLockResponse> fut = dht().lockAllAsync(cctx, cctx.localNode(), req, filter);
// Add new future.
add(new GridEmbeddedFuture<>(new C2<GridNearLockResponse, Exception, Boolean>() {
@Override
public Boolean apply(GridNearLockResponse res, Exception e) {
if (CU.isLockTimeoutOrCancelled(e) || (res != null && CU.isLockTimeoutOrCancelled(res.error())))
return false;
if (e != null) {
onError(e);
return false;
}
if (res == null) {
onError(new IgniteCheckedException("Lock response is null for future: " + this));
return false;
}
if (res.error() != null) {
onError(res.error());
return false;
}
if (log.isDebugEnabled())
log.debug("Acquired lock for local DHT mapping [locId=" + cctx.nodeId() + ", mappedKeys=" + mappedKeys + ", fut=" + GridNearLockFuture.this + ']');
try {
int i = 0;
for (KeyCacheObject k : mappedKeys) {
while (true) {
GridNearCacheEntry entry = cctx.near().entryExx(k, req.topologyVersion());
try {
IgniteBiTuple<GridCacheVersion, CacheObject> oldValTup = valMap.get(entry.key());
boolean hasBytes = entry.hasValue();
CacheObject oldVal = entry.rawGet();
CacheObject newVal = res.value(i);
GridCacheVersion dhtVer = res.dhtVersion(i);
GridCacheVersion mappedVer = res.mappedVersion(i);
// On local node don't record twice if DHT cache already recorded.
boolean record = retval && oldValTup != null && oldValTup.get1().equals(dhtVer);
if (newVal == null) {
if (oldValTup != null) {
if (oldValTup.get1().equals(dhtVer))
newVal = oldValTup.get2();
oldVal = oldValTup.get2();
}
}
// Lock is held at this point, so we can set the
// returned value if any.
entry.resetFromPrimary(newVal, lockVer, dhtVer, node.id(), topVer);
entry.readyNearLock(lockVer, mappedVer, res.committedVersions(), res.rolledbackVersions(), res.pending());
if (inTx() && implicitTx() && tx.onePhaseCommit()) {
boolean pass = res.filterResult(i);
tx.entry(cctx.txKey(k)).filters(pass ? CU.empty0() : CU.alwaysFalse0Arr());
}
if (record) {
if (cctx.events().isRecordable(EVT_CACHE_OBJECT_READ))
cctx.events().addEvent(entry.partition(), entry.key(), tx, null, EVT_CACHE_OBJECT_READ, newVal, newVal != null, oldVal, hasBytes, CU.subjectId(tx, cctx.shared()), null, inTx() ? tx.resolveTaskName() : null, keepBinary);
if (cctx.statisticsEnabled())
cctx.cache().metrics0().onRead(oldVal != null);
}
if (log.isDebugEnabled())
log.debug("Processed response for entry [res=" + res + ", entry=" + entry + ']');
// Inner while loop.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Failed to add candidates because entry was " + "removed (will renew).");
synchronized (GridNearLockFuture.this) {
// Replace old entry with new one.
entries.set(i, (GridDistributedCacheEntry) cctx.cache().entryEx(entry.key()));
}
}
}
// Increment outside of while loop.
i++;
}
// Proceed and add new future (if any) before completing embedded future.
proceedMapping();
} catch (IgniteCheckedException ex) {
onError(ex);
return false;
}
return true;
}
}, fut));
} else {
final MiniFuture fut = new MiniFuture(node, mappedKeys, ++miniId);
req.miniId(fut.futureId());
// Append new future.
add(fut);
IgniteInternalFuture<?> txSync = null;
if (inTx())
txSync = cctx.tm().awaitFinishAckAsync(node.id(), tx.threadId());
if (txSync == null || txSync.isDone()) {
try {
if (log.isDebugEnabled())
log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']');
cctx.io().send(node, req, cctx.ioPolicy());
} catch (ClusterTopologyCheckedException ex) {
fut.onResult(ex);
}
} else {
txSync.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
try {
if (log.isDebugEnabled())
log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']');
cctx.io().send(node, req, cctx.ioPolicy());
} catch (ClusterTopologyCheckedException ex) {
fut.onResult(ex);
} catch (IgniteCheckedException e) {
onError(e);
}
}
});
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridNearLockFuture method map.
/**
* Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
* such approach does not preserve order of lock acquisition. Instead, keys are split in continuous
* groups belonging to one primary node and locks for these groups are acquired sequentially.
*
* @param keys Keys.
* @param remap Remap flag.
* @param topLocked {@code True} if thread already acquired lock preventing topology change.
*/
private void map(Iterable<KeyCacheObject> keys, boolean remap, boolean topLocked) {
try {
AffinityTopologyVersion topVer = this.topVer;
assert topVer != null;
assert topVer.topologyVersion() > 0 : topVer;
if (CU.affinityNodes(cctx, topVer).isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for near-only cache (all " + "partition nodes left the grid)."));
return;
}
boolean clientNode = cctx.kernalContext().clientNode();
assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
synchronized (this) {
mappings = new ArrayDeque<>();
// Assign keys to primary nodes.
GridNearLockMapping map = null;
for (KeyCacheObject key : keys) {
GridNearLockMapping updated = map(key, map, topVer);
// If new mapping was created, add to collection.
if (updated != map) {
mappings.add(updated);
if (tx != null && updated.node().isLocal())
tx.nearLocallyMapped(true);
}
map = updated;
}
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done: " + this);
return;
}
if (log.isDebugEnabled())
log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
boolean first = true;
// Create mini futures.
for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
GridNearLockMapping mapping = iter.next();
ClusterNode node = mapping.node();
Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
assert !mappedKeys.isEmpty();
GridNearLockRequest req = null;
Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
boolean explicit = false;
for (KeyCacheObject key : mappedKeys) {
IgniteTxKey txKey = cctx.txKey(key);
while (true) {
GridNearCacheEntry entry = null;
try {
entry = cctx.near().entryExx(key, topVer);
if (!cctx.isAll(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry being locked did not pass filter (will not lock): " + entry);
onComplete(false, false, true);
return;
}
// Removed exception may be thrown here.
GridCacheMvccCandidate cand = addEntry(topVer, entry, node.id());
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']');
return;
}
if (cand != null) {
if (tx == null && !cand.reentry())
cctx.mvcc().addExplicitLock(threadId, cand, topVer);
IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.versionedValue();
if (val == null) {
GridDhtCacheEntry dhtEntry = dht().peekExx(key);
try {
if (dhtEntry != null)
val = dhtEntry.versionedValue(topVer);
} catch (GridCacheEntryRemovedException ignored) {
assert dhtEntry.obsolete() : dhtEntry;
if (log.isDebugEnabled())
log.debug("Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
}
}
GridCacheVersion dhtVer = null;
if (val != null) {
dhtVer = val.get1();
valMap.put(key, val);
}
if (!cand.reentry()) {
if (req == null) {
boolean clientFirst = false;
if (first) {
clientFirst = clientNode && !topLocked && (tx == null || !tx.hasRemoteLocks());
first = false;
}
assert !implicitTx() && !implicitSingleTx() : tx;
req = new GridNearLockRequest(cctx.cacheId(), topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), read, retval, isolation(), isInvalidate(), timeout, mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() && tx.syncMode() == FULL_SYNC, inTx() ? tx.subjectId() : null, inTx() ? tx.taskNameHash() : 0, read ? createTtl : -1L, read ? accessTtl : -1L, skipStore, keepBinary, clientFirst, true, cctx.deploymentEnabled());
mapping.request(req);
}
distributedKeys.add(key);
if (tx != null)
tx.addKeyMapping(txKey, mapping.node());
req.addKeyBytes(key, retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry.
cctx);
}
if (cand.reentry())
explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
} else {
if (timedOut)
return;
// Ignore reentries within transactions.
explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
}
if (explicit)
tx.addKeyMapping(txKey, mapping.node());
break;
} catch (GridCacheEntryRemovedException ignored) {
assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;
if (log.isDebugEnabled())
log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
}
}
// Mark mapping explicit lock flag.
if (explicit) {
boolean marked = tx != null && tx.markExplicit(node.id());
assert tx == null || marked;
}
}
if (!distributedKeys.isEmpty())
mapping.distributedKeys(distributedKeys);
else {
assert mapping.request() == null;
iter.remove();
}
}
}
cctx.mvcc().recheckPendingLocks();
proceedMapping();
} catch (IgniteCheckedException ex) {
onError(ex);
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class IgniteTxHandler method startRemoteTx.
/**
* @param nodeId Node ID.
* @param req Request.
* @param res Response.
* @return Remote transaction.
* @throws IgniteCheckedException If failed.
*/
@Nullable
GridDhtTxRemote startRemoteTx(UUID nodeId, GridDhtTxPrepareRequest req, GridDhtTxPrepareResponse res) throws IgniteCheckedException {
if (!F.isEmpty(req.writes())) {
GridDhtTxRemote tx = ctx.tm().tx(req.version());
if (tx == null) {
boolean single = req.last() && req.writes().size() == 1;
tx = new GridDhtTxRemote(ctx, req.nearNodeId(), req.futureId(), nodeId, req.topologyVersion(), req.version(), null, req.system(), req.policy(), req.concurrency(), req.isolation(), req.isInvalidate(), req.timeout(), req.writes() != null ? Math.max(req.writes().size(), req.txSize()) : req.txSize(), req.nearXidVersion(), req.transactionNodes(), req.subjectId(), req.taskNameHash(), single, req.storeWriteThrough());
tx.writeVersion(req.writeVersion());
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !ctx.tm().onStarted(tx)) {
if (log.isDebugEnabled())
log.debug("Attempt to start a completed transaction (will ignore): " + tx);
return null;
}
if (ctx.discovery().node(nodeId) == null) {
tx.state(ROLLING_BACK);
tx.state(ROLLED_BACK);
ctx.tm().uncommitTx(tx);
return null;
}
} else {
tx.writeVersion(req.writeVersion());
tx.transactionNodes(req.transactionNodes());
}
if (!tx.isSystemInvalidate()) {
int idx = 0;
for (IgniteTxEntry entry : req.writes()) {
GridCacheContext cacheCtx = entry.context();
int part = cacheCtx.affinity().partition(entry.key());
GridDhtLocalPartition locPart = cacheCtx.topology().localPartition(part, req.topologyVersion(), false);
if (locPart != null && locPart.reserve()) {
try {
tx.addWrite(entry, ctx.deploy().globalLoader());
if (isNearEnabled(cacheCtx) && req.invalidateNearEntry(idx))
invalidateNearEntry(cacheCtx, entry.key(), req.version());
if (req.needPreloadKey(idx)) {
GridCacheEntryEx cached = entry.cached();
if (cached == null)
cached = cacheCtx.cache().entryEx(entry.key(), req.topologyVersion());
GridCacheEntryInfo info = cached.info();
if (info != null && !info.isNew() && !info.isDeleted())
res.addPreloadEntry(info);
}
if (cacheCtx.readThroughConfigured() && !entry.skipStore() && entry.op() == TRANSFORM && entry.oldValueOnPrimary() && !entry.hasValue()) {
while (true) {
try {
GridCacheEntryEx cached = entry.cached();
if (cached == null) {
cached = cacheCtx.cache().entryEx(entry.key(), req.topologyVersion());
entry.cached(cached);
}
CacheObject val = cached.innerGet(/*ver*/
null, tx, /*readThrough*/
false, /*updateMetrics*/
false, /*evt*/
false, tx.subjectId(), /*transformClo*/
null, tx.resolveTaskName(), /*expiryPlc*/
null, /*keepBinary*/
true);
if (val == null)
val = cacheCtx.toCacheObject(cacheCtx.store().load(null, entry.key()));
if (val != null)
entry.readValue(val);
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got entry removed exception, will retry: " + entry.txKey());
entry.cached(cacheCtx.cache().entryEx(entry.key(), req.topologyVersion()));
}
}
}
} catch (GridDhtInvalidPartitionException e) {
tx.addInvalidPartition(cacheCtx, e.partition());
tx.clearEntry(entry.txKey());
} finally {
locPart.release();
}
} else
tx.addInvalidPartition(cacheCtx, part);
idx++;
}
}
// Prepare prior to reordering, so the pending locks added
// in prepare phase will get properly ordered as well.
tx.prepareRemoteTx();
if (req.last()) {
assert !F.isEmpty(req.transactionNodes()) : "Received last prepare request with empty transaction nodes: " + req;
tx.state(PREPARED);
}
res.invalidPartitionsByCacheId(tx.invalidPartitions());
if (tx.empty() && req.last()) {
tx.rollbackRemoteTx();
return null;
}
return tx;
}
return null;
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDhtGetFuture method getAsync.
/**
* @param keys Keys to get.
* @return Future for local get.
*/
@SuppressWarnings({ "unchecked", "IfMayBeConditional" })
private IgniteInternalFuture<Collection<GridCacheEntryInfo>> getAsync(final Map<KeyCacheObject, Boolean> keys) {
if (F.isEmpty(keys))
return new GridFinishedFuture<Collection<GridCacheEntryInfo>>(Collections.<GridCacheEntryInfo>emptyList());
String taskName0 = cctx.kernalContext().job().currentTaskName();
if (taskName0 == null)
taskName0 = cctx.kernalContext().task().resolveTaskName(taskNameHash);
final String taskName = taskName0;
GridCompoundFuture<Boolean, Boolean> txFut = null;
ReaderArguments readerArgs = null;
if (addReaders && !skipVals && !cctx.localNodeId().equals(reader)) {
for (Map.Entry<KeyCacheObject, Boolean> k : keys.entrySet()) {
if (!k.getValue())
continue;
while (true) {
GridDhtCacheEntry e = cache().entryExx(k.getKey(), topVer);
try {
if (e.obsolete())
continue;
boolean addReader = !e.deleted();
if (addReader) {
e.unswap(false);
// we have to add reader again later.
if (readerArgs == null)
readerArgs = new ReaderArguments(reader, msgId, topVer);
}
// Register reader. If there are active transactions for this entry,
// then will wait for their completion before proceeding.
// TODO: IGNITE-3498:
// TODO: What if any transaction we wait for actually removes this entry?
// TODO: In this case seems like we will be stuck with untracked near entry.
// TODO: To fix, check that reader is contained in the list of readers once
// TODO: again after the returned future completes - if not, try again.
IgniteInternalFuture<Boolean> f = addReader ? e.addReader(reader, msgId, topVer) : null;
if (f != null) {
if (txFut == null)
txFut = new GridCompoundFuture<>(CU.boolReducer());
txFut.add(f);
}
break;
} catch (IgniteCheckedException err) {
return new GridFinishedFuture<>(err);
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when getting a DHT value: " + e);
} finally {
cctx.evicts().touch(e, topVer);
}
}
}
if (txFut != null)
txFut.markInitialized();
}
IgniteInternalFuture<Map<KeyCacheObject, EntryGetResult>> fut;
if (txFut == null || txFut.isDone()) {
fut = cache().getDhtAllAsync(keys.keySet(), readerArgs, readThrough, subjId, taskName, expiryPlc, skipVals, recovery);
} else {
final ReaderArguments args = readerArgs;
// If we are here, then there were active transactions for some entries
// when we were adding the reader. In that case we must wait for those
// transactions to complete.
fut = new GridEmbeddedFuture<>(txFut, new C2<Boolean, Exception, IgniteInternalFuture<Map<KeyCacheObject, EntryGetResult>>>() {
@Override
public IgniteInternalFuture<Map<KeyCacheObject, EntryGetResult>> apply(Boolean b, Exception e) {
if (e != null)
throw new GridClosureException(e);
return cache().getDhtAllAsync(keys.keySet(), args, readThrough, subjId, taskName, expiryPlc, skipVals, recovery);
}
});
}
if (fut.isDone()) {
if (fut.error() != null)
onDone(fut.error());
else
return new GridFinishedFuture<>(toEntryInfos(fut.result()));
}
return new GridEmbeddedFuture<>(new C2<Map<KeyCacheObject, EntryGetResult>, Exception, Collection<GridCacheEntryInfo>>() {
@Override
public Collection<GridCacheEntryInfo> apply(Map<KeyCacheObject, EntryGetResult> map, Exception e) {
if (e != null) {
onDone(e);
return Collections.emptyList();
} else
return toEntryInfos(map);
}
}, fut);
}
Aggregations