use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtGetSingleFuture method map.
/**
* @param key Key.
* @return {@code True} if mapped.
*/
private boolean map(KeyCacheObject key, boolean forceKeys) {
try {
int keyPart = cctx.affinity().partition(key);
if (cctx.mvccEnabled()) {
boolean noOwners = cctx.topology().owners(keyPart, topVer).isEmpty();
// request with no results and therefore forceKeys flag may be set to true here.
if (noOwners)
forceKeys = true;
}
GridDhtLocalPartition part = topVer.topologyVersion() > 0 ? cache().topology().localPartition(keyPart, topVer, true) : cache().topology().localPartition(keyPart);
if (part == null)
return false;
assert this.part == -1;
// By reserving, we make sure that partition won't be unloaded while processed.
if (part.reserve()) {
if (forceKeys || (part.state() == OWNING || part.state() == LOST)) {
this.part = part.id();
return true;
} else {
part.release();
return false;
}
} else
return false;
} catch (GridDhtInvalidPartitionException ex) {
return false;
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtCacheAdapter method updateTtl.
/**
* @param cache Cache.
* @param keys Entries keys.
* @param vers Entries versions.
* @param ttl TTL.
*/
private void updateTtl(GridCacheAdapter<K, V> cache, List<KeyCacheObject> keys, List<GridCacheVersion> vers, long ttl) {
assert !F.isEmpty(keys);
assert keys.size() == vers.size();
int size = keys.size();
for (int i = 0; i < size; i++) {
try {
GridCacheEntryEx entry = null;
try {
while (true) {
try {
entry = cache.entryEx(keys.get(i));
entry.unswap(false);
entry.updateTtl(vers.get(i), ttl);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry: " + entry);
} catch (GridDhtInvalidPartitionException e) {
if (log.isDebugEnabled())
log.debug("Got GridDhtInvalidPartitionException: " + e);
break;
}
}
} finally {
if (entry != null)
entry.touch();
}
} catch (IgniteCheckedException e) {
log.error("Failed to unswap entry.", e);
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtCacheAdapter method processForceKeysRequest0.
/**
* @param node Node originated request.
* @param msg Force keys message.
*/
private void processForceKeysRequest0(ClusterNode node, GridDhtForceKeysRequest msg) {
try {
ClusterNode loc = ctx.localNode();
GridDhtForceKeysResponse res = new GridDhtForceKeysResponse(ctx.cacheId(), msg.futureId(), msg.miniId(), ctx.deploymentEnabled());
GridDhtPartitionTopology top = ctx.topology();
for (KeyCacheObject k : msg.keys()) {
int p = ctx.affinity().partition(k);
GridDhtLocalPartition locPart = top.localPartition(p, AffinityTopologyVersion.NONE, false);
// If this node is no longer an owner.
if (locPart == null && !top.owners(p).contains(loc)) {
res.addMissed(k);
continue;
}
GridCacheEntryEx entry;
while (true) {
ctx.shared().database().checkpointReadLock();
try {
entry = ctx.dht().entryEx(k);
entry.unswap();
if (ctx.mvccEnabled()) {
List<GridCacheEntryInfo> infos = entry.allVersionsInfo();
if (infos == null) {
assert entry.obsolete() : entry;
continue;
}
for (int i = 0; i < infos.size(); i++) res.addInfo(infos.get(i));
} else {
GridCacheEntryInfo info = entry.info();
if (info == null) {
assert entry.obsolete() : entry;
continue;
}
if (!info.isNew())
res.addInfo(info);
}
entry.touch();
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry: " + k);
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Local node is no longer an owner: " + p);
res.addMissed(k);
break;
} finally {
ctx.shared().database().checkpointReadUnlock();
}
}
}
if (log.isDebugEnabled())
log.debug("Sending force key response [node=" + node.id() + ", res=" + res + ']');
ctx.io().send(node, res, ctx.ioPolicy());
} catch (ClusterTopologyCheckedException ignore) {
if (log.isDebugEnabled())
log.debug("Received force key request form failed node (will ignore) [nodeId=" + node.id() + ", req=" + msg + ']');
} catch (IgniteCheckedException e) {
U.error(log, "Failed to reply to force key request [nodeId=" + node.id() + ", req=" + msg + ']', e);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtCacheAdapter method loadEntry.
/**
* @param key Key.
* @param val Value.
* @param ver Cache version.
* @param p Optional predicate.
* @param topVer Topology version.
* @param replicate Replication flag.
* @param plc Expiry policy.
*/
private void loadEntry(KeyCacheObject key, Object val, GridCacheVersion ver, @Nullable IgniteBiPredicate<K, V> p, AffinityTopologyVersion topVer, boolean replicate, @Nullable ExpiryPolicy plc) {
if (p != null && !p.apply(key.<K>value(ctx.cacheObjectContext(), false), (V) val))
return;
try {
GridDhtLocalPartition part = ctx.group().topology().localPartition(ctx.affinity().partition(key), AffinityTopologyVersion.NONE, true);
// Reserve to make sure that partition does not get unloaded.
if (part.reserve()) {
GridCacheEntryEx entry = null;
ctx.shared().database().checkpointReadLock();
try {
long ttl = CU.ttlForLoad(plc);
if (ttl == CU.TTL_ZERO)
return;
CacheObject cacheVal = ctx.toCacheObject(val);
entry = entryEx(key);
entry.initialValue(cacheVal, ver, ttl, CU.EXPIRE_TIME_CALCULATE, false, topVer, replicate ? DR_LOAD : DR_NONE, true, false);
} catch (IgniteCheckedException e) {
throw new IgniteException("Failed to put cache value: " + entry, e);
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry during loadCache (will ignore): " + entry);
} finally {
if (entry != null)
entry.touch();
part.release();
ctx.shared().database().checkpointReadUnlock();
}
} else if (log.isDebugEnabled())
log.debug("Will node load entry into cache (partition is invalid): " + part);
} catch (GridDhtInvalidPartitionException e) {
if (log.isDebugEnabled())
log.debug(S.toString("Ignoring entry for partition that does not belong", "key", key, true, "val", val, true, "err", e, false));
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridCacheTtlManager method expire.
/**
* Processes specified amount of expired entries.
*
* @param amount Limit of processed entries by single call, {@code -1} for no limit.
* @return {@code True} if unprocessed expired entries remains.
*/
public boolean expire(int amount) {
// TTL manager is not initialized or eagerTtl disabled for cache.
if (!eagerTtlEnabled)
return false;
assert cctx != null;
long now = U.currentTimeMillis();
try {
if (pendingEntries != null) {
GridNearCacheAdapter nearCache = cctx.near();
GridCacheVersion obsoleteVer = null;
int limit = (-1 != amount) ? amount : pendingEntries.sizex();
for (int cnt = limit; cnt > 0; cnt--) {
EntryWrapper e = pendingEntries.firstx();
if (e == null || e.expireTime > now)
// All expired entries are processed.
break;
if (pendingEntries.remove(e)) {
if (obsoleteVer == null)
obsoleteVer = cctx.cache().nextVersion();
GridNearCacheEntry nearEntry = nearCache.peekExx(e.key);
if (nearEntry != null)
expireC.apply(nearEntry, obsoleteVer);
}
}
}
if (!cctx.affinityNode())
return false;
if (!hasPendingEntries || nextCleanTime > U.currentTimeMillis())
return false;
boolean more = cctx.offheap().expire(dhtCtx, expireC, amount);
if (more)
return true;
// There is nothing to clean, so the next clean up can be postponed.
nextCleanTime = U.currentTimeMillis() + unwindThrottlingTimeout;
if (amount != -1 && pendingEntries != null) {
EntryWrapper e = pendingEntries.firstx();
return e != null && e.expireTime <= now;
}
} catch (GridDhtInvalidPartitionException e) {
if (log.isDebugEnabled())
log.debug("Partition became invalid during rebalancing (will ignore): " + e.partition());
return false;
} catch (IgniteCheckedException e) {
U.error(log, "Failed to process entry expiration: " + e, e);
} catch (IgniteException e) {
if (e.hasCause(NodeStoppingException.class)) {
if (log.isDebugEnabled())
log.debug("Failed to expire because node is stopped: " + e);
} else
throw e;
}
return false;
}
Aggregations