use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtPreloader method processForceKeysRequest0.
/**
* @param node Node originated request.
* @param msg Force keys message.
*/
private void processForceKeysRequest0(ClusterNode node, GridDhtForceKeysRequest msg) {
if (!enterBusy())
return;
try {
ClusterNode loc = cctx.localNode();
GridDhtForceKeysResponse res = new GridDhtForceKeysResponse(cctx.cacheId(), msg.futureId(), msg.miniId(), cctx.deploymentEnabled());
for (KeyCacheObject k : msg.keys()) {
int p = cctx.affinity().partition(k);
GridDhtLocalPartition locPart = top.localPartition(p, AffinityTopologyVersion.NONE, false);
// If this node is no longer an owner.
if (locPart == null && !top.owners(p).contains(loc)) {
res.addMissed(k);
continue;
}
GridCacheEntryEx entry = null;
while (true) {
try {
entry = cctx.dht().entryEx(k);
entry.unswap();
GridCacheEntryInfo info = entry.info();
if (info == null) {
assert entry.obsolete() : entry;
continue;
}
if (!info.isNew())
res.addInfo(info);
cctx.evicts().touch(entry, msg.topologyVersion());
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry: " + k);
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Local node is no longer an owner: " + p);
res.addMissed(k);
break;
}
}
}
if (log.isDebugEnabled())
log.debug("Sending force key response [node=" + node.id() + ", res=" + res + ']');
cctx.io().send(node, res, cctx.ioPolicy());
} catch (ClusterTopologyCheckedException ignore) {
if (log.isDebugEnabled())
log.debug("Received force key request form failed node (will ignore) [nodeId=" + node.id() + ", req=" + msg + ']');
} catch (IgniteCheckedException e) {
U.error(log, "Failed to reply to force key request [nodeId=" + node.id() + ", req=" + msg + ']', e);
} finally {
leaveBusy();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtAtomicCache method getAllAsync0.
/**
* Entry point to all public API get methods.
*
* @param keys Keys.
* @param forcePrimary Force primary flag.
* @param subjId Subject ID.
* @param taskName Task name.
* @param deserializeBinary Deserialize binary flag.
* @param expiryPlc Expiry policy.
* @param skipVals Skip values flag.
* @param skipStore Skip store flag.
* @param needVer Need version.
* @return Get future.
*/
private IgniteInternalFuture<Map<K, V>> getAllAsync0(@Nullable Collection<KeyCacheObject> keys, boolean forcePrimary, UUID subjId, String taskName, boolean deserializeBinary, boolean recovery, @Nullable ExpiryPolicy expiryPlc, boolean skipVals, boolean skipStore, boolean needVer) {
AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
final IgniteCacheExpiryPolicy expiry = skipVals ? null : expiryPolicy(expiryPlc);
final boolean evt = !skipVals;
// Optimisation: try to resolve value locally and escape 'get future' creation.
if (!forcePrimary && ctx.affinityNode()) {
try {
Map<K, V> locVals = U.newHashMap(keys.size());
boolean success = true;
boolean readNoEntry = ctx.readNoEntry(expiry, false);
// Optimistically expect that all keys are available locally (avoid creation of get future).
for (KeyCacheObject key : keys) {
if (readNoEntry) {
CacheDataRow row = ctx.offheap().read(ctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
ctx.addResult(locVals, key, row.value(), skipVals, false, deserializeBinary, true, null, row.version(), 0, 0, needVer);
if (evt) {
ctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
} else
success = false;
} else
success = false;
} else {
GridCacheEntryEx entry = null;
while (true) {
try {
entry = entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiry, true, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiry, !deserializeBinary);
}
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
if (isNew && entry.markObsoleteIfEmpty(context().versions().next()))
removeEntry(entry);
success = false;
} else {
ctx.addResult(locVals, key, v, skipVals, false, deserializeBinary, true, getRes, ver, 0, 0, needVer);
}
} else
success = false;
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, retry.
} catch (GridDhtInvalidPartitionException ignored) {
success = false;
// While.
break;
} finally {
if (entry != null)
ctx.evicts().touch(entry, topVer);
}
}
}
if (!success)
break;
else if (!skipVals && ctx.statisticsEnabled())
metrics0().onRead(true);
}
if (success) {
sendTtlUpdateRequest(expiry);
return new GridFinishedFuture<>(locVals);
}
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
}
if (expiry != null)
expiry.reset();
// Either reload or not all values are available locally.
GridPartitionedGetFuture<K, V> fut = new GridPartitionedGetFuture<>(ctx, keys, !skipStore, forcePrimary, subjId, taskName, deserializeBinary, recovery, expiry, skipVals, needVer, false);
fut.init(topVer);
return fut;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtColocatedCache method loadAsync.
/**
* @param keys Keys to load.
* @param readThrough Read through flag.
* @param forcePrimary Force get from primary node flag.
* @param topVer Topology version.
* @param subjId Subject ID.
* @param taskName Task name.
* @param deserializeBinary Deserialize binary flag.
* @param expiryPlc Expiry policy.
* @param skipVals Skip values flag.
* @param needVer If {@code true} returns values as tuples containing value and version.
* @param keepCacheObj Keep cache objects flag.
* @return Load future.
*/
public final IgniteInternalFuture<Map<K, V>> loadAsync(@Nullable Collection<KeyCacheObject> keys, boolean readThrough, boolean forcePrimary, AffinityTopologyVersion topVer, @Nullable UUID subjId, String taskName, boolean deserializeBinary, boolean recovery, @Nullable IgniteCacheExpiryPolicy expiryPlc, boolean skipVals, boolean needVer, boolean keepCacheObj) {
if (keys == null || keys.isEmpty())
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
if (expiryPlc == null)
expiryPlc = expiryPolicy(null);
// Optimisation: try to resolve value locally and escape 'get future' creation.
if (!forcePrimary && ctx.affinityNode()) {
try {
Map<K, V> locVals = null;
boolean success = true;
boolean readNoEntry = ctx.readNoEntry(expiryPlc, false);
boolean evt = !skipVals;
for (KeyCacheObject key : keys) {
if (readNoEntry) {
CacheDataRow row = ctx.offheap().read(ctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
if (locVals == null)
locVals = U.newHashMap(keys.size());
ctx.addResult(locVals, key, row.value(), skipVals, keepCacheObj, deserializeBinary, true, null, row.version(), 0, 0, needVer);
if (evt) {
ctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
} else
success = false;
} else
success = false;
} else {
GridCacheEntryEx entry = null;
while (true) {
try {
entry = entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary);
}
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
GridCacheVersion obsoleteVer = context().versions().next();
if (isNew && entry.markObsoleteIfEmpty(obsoleteVer))
removeEntry(entry);
success = false;
} else {
if (locVals == null)
locVals = U.newHashMap(keys.size());
ctx.addResult(locVals, key, v, skipVals, keepCacheObj, deserializeBinary, true, getRes, ver, 0, 0, needVer);
}
} else
success = false;
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, retry.
} catch (GridDhtInvalidPartitionException ignored) {
success = false;
// While.
break;
} finally {
if (entry != null)
context().evicts().touch(entry, topVer);
}
}
}
if (!success)
break;
else if (!skipVals && ctx.statisticsEnabled())
ctx.cache().metrics0().onRead(true);
}
if (success) {
sendTtlUpdateRequest(expiryPlc);
return new GridFinishedFuture<>(locVals);
}
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
}
if (expiryPlc != null)
expiryPlc.reset();
// Either reload or not all values are available locally.
GridPartitionedGetFuture<K, V> fut = new GridPartitionedGetFuture<>(ctx, keys, readThrough, forcePrimary, subjId, taskName, deserializeBinary, recovery, expiryPlc, skipVals, needVer, keepCacheObj);
fut.init(topVer);
return fut;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException in project ignite by apache.
the class IgniteCacheOffheapManagerImpl method clear.
/**
* Clears offheap entries.
*
* @param readers {@code True} to clear readers.
*/
@SuppressWarnings("unchecked")
@Override
public void clear(boolean readers) {
GridCacheVersion obsoleteVer = null;
GridIterator<CacheDataRow> it = rowsIterator(true, true, null);
while (it.hasNext()) {
KeyCacheObject key = it.next().key();
try {
if (obsoleteVer == null)
obsoleteVer = cctx.versions().next();
GridCacheEntryEx entry = cctx.cache().entryEx(key);
entry.clear(obsoleteVer, readers);
} catch (GridDhtInvalidPartitionException ignore) {
// Ignore.
} catch (IgniteCheckedException e) {
U.error(log, "Failed to clear cache entry: " + key, e);
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtAtomicCache method updateAllAsyncInternal0.
/**
* Executes local update after preloader fetched values.
*
* @param nodeId Node ID.
* @param req Update request.
* @param completionCb Completion callback.
*/
private void updateAllAsyncInternal0(UUID nodeId, GridNearAtomicAbstractUpdateRequest req, UpdateReplyClosure completionCb) {
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null) {
U.warn(msgLog, "Skip near update request, node originated update request left [" + "futId=" + req.futureId() + ", node=" + nodeId + ']');
return;
}
GridNearAtomicUpdateResponse res = new GridNearAtomicUpdateResponse(ctx.cacheId(), nodeId, req.futureId(), req.partition(), false, ctx.deploymentEnabled());
assert !req.returnValue() || (req.operation() == TRANSFORM || req.size() == 1);
GridDhtAtomicAbstractUpdateFuture dhtFut = null;
boolean remap = false;
String taskName = ctx.kernalContext().task().resolveTaskName(req.taskNameHash());
IgniteCacheExpiryPolicy expiry = null;
try {
// If batch store update is enabled, we need to lock all entries.
// First, need to acquire locks on cache entries, then check filter.
List<GridDhtCacheEntry> locked = null;
Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
try {
GridDhtPartitionTopology top = topology();
top.readLock();
try {
if (top.stopping()) {
res.addFailedKeys(req.keys(), new IgniteCheckedException("Failed to perform cache operation " + "(cache is stopped): " + name()));
completionCb.apply(req, res);
return;
}
// external transaction or explicit lock.
if (req.topologyLocked() || !needRemap(req.topologyVersion(), top.topologyVersion())) {
ctx.shared().database().ensureFreeSpace(ctx.memoryPolicy());
locked = lockEntries(req, req.topologyVersion());
boolean hasNear = ctx.discovery().cacheNearNode(node, name());
// Assign next version for update inside entries lock.
GridCacheVersion ver = ctx.versions().next(top.topologyVersion());
if (hasNear)
res.nearVersion(ver);
if (msgLog.isDebugEnabled()) {
msgLog.debug("Assigned update version [futId=" + req.futureId() + ", writeVer=" + ver + ']');
}
assert ver != null : "Got null version for update request: " + req;
boolean sndPrevVal = !top.rebalanceFinished(req.topologyVersion());
dhtFut = createDhtFuture(ver, req);
expiry = expiryPolicy(req.expiry());
GridCacheReturn retVal = null;
if (// Several keys ...
req.size() > 1 && writeThrough() && // and store is enabled ...
!req.skipStore() && // and this is not local store ...
!ctx.store().isLocal() && // (conflict resolver should be used for local store)
!// and no DR.
ctx.dr().receiveEnabled()) {
// This method can only be used when there are no replicated entries in the batch.
UpdateBatchResult updRes = updateWithBatch(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
deleted = updRes.deleted();
dhtFut = updRes.dhtFuture();
if (req.operation() == TRANSFORM)
retVal = updRes.invokeResults();
} else {
UpdateSingleResult updRes = updateSingle(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
retVal = updRes.returnValue();
deleted = updRes.deleted();
dhtFut = updRes.dhtFuture();
}
if (retVal == null)
retVal = new GridCacheReturn(ctx, node.isLocal(), true, null, true);
res.returnValue(retVal);
if (dhtFut != null) {
if (req.writeSynchronizationMode() == PRIMARY_SYNC && // To avoid deadlock disable back-pressure for sender data node.
!ctx.discovery().cacheAffinityNode(ctx.discovery().node(nodeId), ctx.name()) && !dhtFut.isDone()) {
final IgniteRunnable tracker = GridNioBackPressureControl.threadTracker();
if (tracker != null && tracker instanceof GridNioMessageTracker) {
((GridNioMessageTracker) tracker).onMessageReceived();
dhtFut.listen(new IgniteInClosure<IgniteInternalFuture<Void>>() {
@Override
public void apply(IgniteInternalFuture<Void> fut) {
((GridNioMessageTracker) tracker).onMessageProcessed();
}
});
}
}
ctx.mvcc().addAtomicFuture(dhtFut.id(), dhtFut);
}
} else {
// Should remap all keys.
remap = true;
res.remapTopologyVersion(top.topologyVersion());
}
} finally {
top.readUnlock();
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry should not become obsolete while holding lock.";
e.printStackTrace();
} finally {
if (locked != null)
unlockEntries(locked, req.topologyVersion());
// Enqueue if necessary after locks release.
if (deleted != null) {
assert !deleted.isEmpty();
assert ctx.deferredDelete() : this;
for (IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion> e : deleted) ctx.onDeferredDelete(e.get1(), e.get2());
}
// TODO fire events only after successful fsync
if (ctx.shared().wal() != null)
ctx.shared().wal().fsync(null);
}
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Caught invalid partition exception for cache entry (will remap update request): " + req);
remap = true;
res.remapTopologyVersion(ctx.topology().topologyVersion());
} catch (Throwable e) {
// At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is
// an attempt to use cleaned resources.
U.error(log, "Unexpected exception during cache update", e);
res.addFailedKeys(req.keys(), e);
completionCb.apply(req, res);
if (e instanceof Error)
throw (Error) e;
return;
}
if (remap) {
assert dhtFut == null;
completionCb.apply(req, res);
} else if (dhtFut != null)
dhtFut.map(node, res.returnValue(), res, completionCb);
if (req.writeSynchronizationMode() != FULL_ASYNC)
req.cleanup(!node.isLocal());
sendTtlUpdateRequest(expiry);
}
Aggregations