use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDhtColocatedCache method lockAllAsync0.
/**
* @param cacheCtx Cache context.
* @param tx Started colocated transaction (if any).
* @param threadId Thread ID.
* @param ver Lock version.
* @param topVer Topology version.
* @param keys Mapped keys.
* @param txRead Tx read.
* @param retval Return value flag.
* @param timeout Lock timeout.
* @param createTtl TTL for create operation.
* @param accessTtl TTL for read operation.
* @param filter filter Optional filter.
* @param skipStore Skip store flag.
* @return Lock future.
*/
private IgniteInternalFuture<Exception> lockAllAsync0(GridCacheContext<?, ?> cacheCtx, @Nullable final GridNearTxLocal tx, long threadId, final GridCacheVersion ver, AffinityTopologyVersion topVer, final Collection<KeyCacheObject> keys, final boolean txRead, boolean retval, final long timeout, final long createTtl, final long accessTtl, @Nullable final CacheEntryPredicate[] filter, boolean skipStore, boolean keepBinary) {
int cnt = keys.size();
if (tx == null) {
GridDhtLockFuture fut = new GridDhtLockFuture(ctx, ctx.localNodeId(), ver, topVer, cnt, txRead, retval, timeout, tx, threadId, createTtl, accessTtl, filter, skipStore, keepBinary);
// Add before mapping.
if (!ctx.mvcc().addFuture(fut))
throw new IllegalStateException("Duplicate future ID: " + fut);
boolean timedout = false;
for (KeyCacheObject key : keys) {
if (timedout)
break;
while (true) {
GridDhtCacheEntry entry = entryExx(key, topVer);
try {
fut.addEntry(key == null ? null : entry);
if (fut.isDone())
timedout = true;
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when adding lock (will retry): " + entry);
} catch (GridDistributedLockCancelledException e) {
if (log.isDebugEnabled())
log.debug("Failed to add entry [err=" + e + ", entry=" + entry + ']');
fut.onError(e);
return new GridDhtFinishedFuture<>(e);
}
}
}
// This will send remote messages.
fut.map();
return new GridDhtEmbeddedFuture<>(new C2<Boolean, Exception, Exception>() {
@Override
public Exception apply(Boolean b, Exception e) {
if (e != null)
e = U.unwrap(e);
else if (!b)
e = new GridCacheLockTimeoutException(ver);
return e;
}
}, fut);
} else {
// Handle implicit locks for pessimistic transactions.
ctx.tm().txContext(tx);
if (log.isDebugEnabled())
log.debug("Performing colocated lock [tx=" + tx + ", keys=" + keys + ']');
IgniteInternalFuture<GridCacheReturn> txFut = tx.lockAllAsync(cacheCtx, keys, retval, txRead, createTtl, accessTtl, skipStore, keepBinary);
return new GridDhtEmbeddedFuture<>(new C2<GridCacheReturn, Exception, Exception>() {
@Override
public Exception apply(GridCacheReturn ret, Exception e) {
if (e != null)
e = U.unwrap(e);
assert !tx.empty();
return e;
}
}, txFut);
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDhtColocatedCache method loadAsync.
/**
* @param keys Keys to load.
* @param readThrough Read through flag.
* @param forcePrimary Force get from primary node flag.
* @param topVer Topology version.
* @param subjId Subject ID.
* @param taskName Task name.
* @param deserializeBinary Deserialize binary flag.
* @param expiryPlc Expiry policy.
* @param skipVals Skip values flag.
* @param canRemap Flag indicating whether future can be remapped on a newer topology version.
* @param needVer If {@code true} returns values as tuples containing value and version.
* @param keepCacheObj Keep cache objects flag.
* @return Load future.
*/
public final IgniteInternalFuture<Map<K, V>> loadAsync(@Nullable Collection<KeyCacheObject> keys, boolean readThrough, boolean forcePrimary, AffinityTopologyVersion topVer, @Nullable UUID subjId, String taskName, boolean deserializeBinary, boolean recovery, @Nullable IgniteCacheExpiryPolicy expiryPlc, boolean skipVals, boolean canRemap, boolean needVer, boolean keepCacheObj) {
if (keys == null || keys.isEmpty())
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
if (expiryPlc == null)
expiryPlc = expiryPolicy(null);
// Optimisation: try to resolve value locally and escape 'get future' creation.
if (!forcePrimary && ctx.affinityNode()) {
try {
Map<K, V> locVals = null;
boolean success = true;
boolean readNoEntry = ctx.readNoEntry(expiryPlc, false);
boolean evt = !skipVals;
for (KeyCacheObject key : keys) {
if (readNoEntry) {
CacheDataRow row = ctx.offheap().read(key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
if (locVals == null)
locVals = U.newHashMap(keys.size());
ctx.addResult(locVals, key, row.value(), skipVals, keepCacheObj, deserializeBinary, true, null, row.version(), 0, 0, needVer);
if (evt) {
ctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
} else
success = false;
} else
success = false;
} else {
GridCacheEntryEx entry = null;
while (true) {
try {
entry = entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary);
}
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
GridCacheVersion obsoleteVer = context().versions().next();
if (isNew && entry.markObsoleteIfEmpty(obsoleteVer))
removeEntry(entry);
success = false;
} else {
if (locVals == null)
locVals = U.newHashMap(keys.size());
ctx.addResult(locVals, key, v, skipVals, keepCacheObj, deserializeBinary, true, getRes, ver, 0, 0, needVer);
}
} else
success = false;
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, retry.
} catch (GridDhtInvalidPartitionException ignored) {
success = false;
// While.
break;
} finally {
if (entry != null)
context().evicts().touch(entry, topVer);
}
}
}
if (!success)
break;
else if (!skipVals && ctx.config().isStatisticsEnabled())
ctx.cache().metrics0().onRead(true);
}
if (success) {
sendTtlUpdateRequest(expiryPlc);
return new GridFinishedFuture<>(locVals);
}
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
}
if (expiryPlc != null)
expiryPlc.reset();
// Either reload or not all values are available locally.
GridPartitionedGetFuture<K, V> fut = new GridPartitionedGetFuture<>(ctx, keys, topVer, readThrough, forcePrimary, subjId, taskName, deserializeBinary, recovery, expiryPlc, skipVals, canRemap, needVer, keepCacheObj);
fut.init();
return fut;
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDhtAtomicCache method getAllAsync0.
/**
* Entry point to all public API get methods.
*
* @param keys Keys.
* @param forcePrimary Force primary flag.
* @param subjId Subject ID.
* @param taskName Task name.
* @param deserializeBinary Deserialize binary flag.
* @param expiryPlc Expiry policy.
* @param skipVals Skip values flag.
* @param skipStore Skip store flag.
* @param needVer Need version.
* @return Get future.
*/
private IgniteInternalFuture<Map<K, V>> getAllAsync0(@Nullable Collection<KeyCacheObject> keys, boolean forcePrimary, UUID subjId, String taskName, boolean deserializeBinary, boolean recovery, @Nullable ExpiryPolicy expiryPlc, boolean skipVals, boolean skipStore, boolean canRemap, boolean needVer) {
AffinityTopologyVersion topVer = canRemap ? ctx.affinity().affinityTopologyVersion() : ctx.shared().exchange().readyAffinityVersion();
final IgniteCacheExpiryPolicy expiry = skipVals ? null : expiryPolicy(expiryPlc);
final boolean evt = !skipVals;
// Optimisation: try to resolve value locally and escape 'get future' creation.
if (!forcePrimary && ctx.affinityNode()) {
try {
Map<K, V> locVals = U.newHashMap(keys.size());
boolean success = true;
boolean readNoEntry = ctx.readNoEntry(expiry, false);
// Optimistically expect that all keys are available locally (avoid creation of get future).
for (KeyCacheObject key : keys) {
if (readNoEntry) {
CacheDataRow row = ctx.offheap().read(key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
ctx.addResult(locVals, key, row.value(), skipVals, false, deserializeBinary, true, null, row.version(), 0, 0, needVer);
if (evt) {
ctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
} else
success = false;
} else
success = false;
} else {
GridCacheEntryEx entry = null;
while (true) {
try {
entry = entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiry, true, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiry, !deserializeBinary);
}
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
if (isNew && entry.markObsoleteIfEmpty(context().versions().next()))
removeEntry(entry);
success = false;
} else {
ctx.addResult(locVals, key, v, skipVals, false, deserializeBinary, true, getRes, ver, 0, 0, needVer);
}
} else
success = false;
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, retry.
} catch (GridDhtInvalidPartitionException ignored) {
success = false;
// While.
break;
} finally {
if (entry != null)
ctx.evicts().touch(entry, topVer);
}
}
}
if (!success)
break;
else if (!skipVals && ctx.config().isStatisticsEnabled())
metrics0().onRead(true);
}
if (success) {
sendTtlUpdateRequest(expiry);
return new GridFinishedFuture<>(locVals);
}
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
}
if (expiry != null)
expiry.reset();
// Either reload or not all values are available locally.
GridPartitionedGetFuture<K, V> fut = new GridPartitionedGetFuture<>(ctx, keys, topVer, !skipStore, forcePrimary, subjId, taskName, deserializeBinary, recovery, expiry, skipVals, canRemap, needVer, false);
fut.init();
return fut;
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridPartitionedGetFuture method localGet.
/**
* @param key Key.
* @param part Partition.
* @param locVals Local values.
* @return {@code True} if there is no need to further search value.
*/
private boolean localGet(KeyCacheObject key, int part, Map<K, V> locVals) {
assert cctx.affinityNode() : this;
GridDhtCacheAdapter<K, V> cache = cache();
boolean readNoEntry = cctx.readNoEntry(expiryPlc, false);
boolean evt = !skipVals;
while (true) {
try {
boolean skipEntry = readNoEntry;
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (readNoEntry) {
CacheDataRow row = cctx.offheap().read(key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
v = row.value();
if (needVer)
ver = row.version();
if (evt) {
cctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
} else
skipEntry = false;
}
}
if (!skipEntry) {
GridCacheEntryEx entry = cache.entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary);
}
cache.context().evicts().touch(entry, topVer);
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
if (isNew && entry.markObsoleteIfEmpty(ver))
cache.removeEntry(entry);
}
}
}
if (v != null) {
cctx.addResult(locVals, key, v, skipVals, keepCacheObjects, deserializeBinary, true, getRes, ver, 0, 0, needVer);
return true;
}
boolean topStable = cctx.isReplicated() || topVer.equals(cctx.topology().topologyVersion());
// Entry not found, do not continue search if topology did not change and there is no store.
if (!cctx.readThroughConfigured() && (topStable || partitionOwned(part))) {
if (!skipVals && cctx.config().isStatisticsEnabled())
cache.metrics0().onRead(false);
return true;
}
return false;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, will retry.
} catch (GridDhtInvalidPartitionException ignored) {
return false;
} catch (IgniteCheckedException e) {
onDone(e);
return true;
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException in project ignite by apache.
the class GridDhtColocatedLockFuture method map0.
/**
* @param keys Keys to map.
* @param remap Remap flag.
* @param topLocked Topology locked flag.
* @throws IgniteCheckedException If mapping failed.
*/
private synchronized void map0(Collection<KeyCacheObject> keys, boolean remap, boolean topLocked) throws IgniteCheckedException {
AffinityTopologyVersion topVer = this.topVer;
assert topVer != null;
assert topVer.topologyVersion() > 0;
if (CU.affinityNodes(cctx, topVer).isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for cache " + "(all partition nodes left the grid): " + cctx.name()));
return;
}
boolean clientNode = cctx.kernalContext().clientNode();
assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
// First assume this node is primary for all keys passed in.
if (!clientNode && mapAsPrimary(keys, topVer))
return;
mappings = new ArrayDeque<>();
// Assign keys to primary nodes.
GridNearLockMapping map = null;
for (KeyCacheObject key : keys) {
GridNearLockMapping updated = map(key, map, topVer);
// If new mapping was created, add to collection.
if (updated != map) {
mappings.add(updated);
if (tx != null && updated.node().isLocal())
tx.colocatedLocallyMapped(true);
}
map = updated;
}
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done: " + this);
return;
}
if (log.isDebugEnabled())
log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
boolean hasRmtNodes = false;
boolean first = true;
// Create mini futures.
for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
GridNearLockMapping mapping = iter.next();
ClusterNode node = mapping.node();
Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
boolean loc = node.equals(cctx.localNode());
assert !mappedKeys.isEmpty();
GridNearLockRequest req = null;
Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
for (KeyCacheObject key : mappedKeys) {
IgniteTxKey txKey = cctx.txKey(key);
GridDistributedCacheEntry entry = null;
if (tx != null) {
IgniteTxEntry txEntry = tx.entry(txKey);
if (txEntry != null) {
entry = (GridDistributedCacheEntry) txEntry.cached();
if (entry != null && loc == entry.detached()) {
entry = cctx.colocated().entryExx(key, topVer, true);
txEntry.cached(entry);
}
}
}
boolean explicit;
while (true) {
try {
if (entry == null)
entry = cctx.colocated().entryExx(key, topVer, true);
if (!cctx.isAll(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry being locked did not pass filter (will not lock): " + entry);
onComplete(false, false);
return;
}
assert loc ^ entry.detached() : "Invalid entry [loc=" + loc + ", entry=" + entry + ']';
GridCacheMvccCandidate cand = addEntry(entry);
// Will either return value from dht cache or null if this is a miss.
IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.detached() ? null : ((GridDhtCacheEntry) entry).versionedValue(topVer);
GridCacheVersion dhtVer = null;
if (val != null) {
dhtVer = val.get1();
valMap.put(key, val);
}
if (cand != null && !cand.reentry()) {
if (req == null) {
boolean clientFirst = false;
if (first) {
clientFirst = clientNode && !topLocked && (tx == null || !tx.hasRemoteLocks());
first = false;
}
assert !implicitTx() && !implicitSingleTx() : tx;
req = new GridNearLockRequest(cctx.cacheId(), topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), read, retval, isolation(), isInvalidate(), timeout, mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() && tx.syncMode() == FULL_SYNC, inTx() ? tx.subjectId() : null, inTx() ? tx.taskNameHash() : 0, read ? createTtl : -1L, read ? accessTtl : -1L, skipStore, keepBinary, clientFirst, cctx.deploymentEnabled());
mapping.request(req);
}
distributedKeys.add(key);
if (tx != null)
tx.addKeyMapping(txKey, mapping.node());
req.addKeyBytes(key, retval, // Include DHT version to match remote DHT entry.
dhtVer, cctx);
}
explicit = inTx() && cand == null;
if (explicit)
tx.addKeyMapping(txKey, mapping.node());
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
entry = null;
}
}
// Mark mapping explicit lock flag.
if (explicit) {
boolean marked = tx != null && tx.markExplicit(node.id());
assert tx == null || marked;
}
}
if (!distributedKeys.isEmpty()) {
mapping.distributedKeys(distributedKeys);
hasRmtNodes |= !mapping.node().isLocal();
} else {
assert mapping.request() == null;
iter.remove();
}
}
if (hasRmtNodes) {
trackable = true;
if (!remap && !cctx.mvcc().addFuture(this))
throw new IllegalStateException("Duplicate future ID: " + this);
} else
trackable = false;
proceedMapping();
}
Aggregations