use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class IgniteCacheExpiryPolicyWithStoreAbstractTest method checkTtl.
/**
* @param key Key.
* @param ttl TTL.
* @param primaryOnly If {@code true} expect entries only on primary node.
* @throws Exception If failed.
*/
private void checkTtl(Object key, final long ttl, boolean primaryOnly) throws Exception {
boolean found = false;
for (int i = 0; i < gridCount(); i++) {
IgniteKernal grid = (IgniteKernal) grid(i);
GridCacheAdapter<Object, Object> cache = grid.context().cache().internalCache(DEFAULT_CACHE_NAME);
GridCacheEntryEx e = null;
try {
e = cache.entryEx(key);
e.unswap();
} catch (GridDhtInvalidPartitionException ignore) {
// No-op.
}
if ((e == null || e.rawGet() == null) && cache.context().isNear())
e = cache.context().near().dht().peekEx(key);
if (e == null || e.rawGet() == null)
e = null;
if (e == null) {
if (primaryOnly)
assertTrue("Not found " + key, !grid.affinity(DEFAULT_CACHE_NAME).isPrimary(grid.localNode(), key));
else
assertTrue("Not found " + key, !grid.affinity(DEFAULT_CACHE_NAME).isPrimaryOrBackup(grid.localNode(), key));
} else {
found = true;
if (ttl > 0)
assertTrue(e.expireTime() > 0);
else
assertEquals(0, e.expireTime());
}
}
assertTrue(found);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtAtomicCache method updateAllAsyncInternal0.
/**
* Executes local update after preloader fetched values.
*
* @param node Node.
* @param req Update request.
* @param completionCb Completion callback.
*/
private void updateAllAsyncInternal0(final ClusterNode node, final GridNearAtomicAbstractUpdateRequest req, final UpdateReplyClosure completionCb) {
GridNearAtomicUpdateResponse res = new GridNearAtomicUpdateResponse(ctx.cacheId(), node.id(), req.futureId(), req.partition(), false, ctx.deploymentEnabled());
assert !req.returnValue() || (req.operation() == TRANSFORM || req.size() == 1);
GridDhtAtomicAbstractUpdateFuture dhtFut = null;
IgniteCacheExpiryPolicy expiry = null;
boolean needTaskName = ctx.events().isRecordable(EVT_CACHE_OBJECT_READ) || ctx.events().isRecordable(EVT_CACHE_OBJECT_PUT) || ctx.events().isRecordable(EVT_CACHE_OBJECT_REMOVED);
String taskName = needTaskName ? ctx.kernalContext().task().resolveTaskName(req.taskNameHash()) : null;
ctx.shared().database().checkpointReadLock();
try {
ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
// If batch store update is enabled, we need to lock all entries.
// First, need to acquire locks on cache entries, then check filter.
List<GridDhtCacheEntry> locked = lockEntries(req, req.topologyVersion());
;
Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
DhtAtomicUpdateResult updDhtRes = new DhtAtomicUpdateResult();
try {
while (true) {
try {
GridDhtPartitionTopology top = topology();
top.readLock();
try {
if (top.stopping()) {
if (ctx.shared().cache().isCacheRestarting(name()))
res.addFailedKeys(req.keys(), new IgniteCacheRestartingException(name()));
else
res.addFailedKeys(req.keys(), new CacheStoppedException(name()));
completionCb.apply(req, res);
return;
}
boolean remap = false;
// external transaction or explicit lock.
if (!req.topologyLocked()) {
AffinityTopologyVersion waitVer = top.topologyVersionFuture().initialVersion();
// No need to remap if next future version is compatible.
boolean compatible = waitVer.isBetween(req.lastAffinityChangedTopologyVersion(), req.topologyVersion());
// Can not wait for topology future since it will break
// GridNearAtomicCheckUpdateRequest processing.
remap = !compatible && !top.topologyVersionFuture().isDone() || needRemap(req.topologyVersion(), top.readyTopologyVersion());
}
if (!remap) {
update(node, locked, req, res, updDhtRes, taskName);
dhtFut = updDhtRes.dhtFuture();
deleted = updDhtRes.deleted();
expiry = updDhtRes.expiryPolicy();
} else
// Should remap all keys.
res.remapTopologyVersion(top.lastTopologyChangeVersion());
} finally {
top.readUnlock();
}
// Must be done outside topology read lock to avoid deadlocks.
if (res.returnValue() != null)
res.returnValue().marshalResult(ctx);
break;
} catch (UnregisteredClassException ex) {
IgniteCacheObjectProcessor cacheObjProc = ctx.cacheObjects();
assert cacheObjProc instanceof CacheObjectBinaryProcessorImpl;
((CacheObjectBinaryProcessorImpl) cacheObjProc).binaryContext().registerClass(ex.cls(), true, false);
} catch (UnregisteredBinaryTypeException ex) {
if (ex.future() != null) {
// Wait for the future that couldn't be processed because of
// IgniteThread#isForbiddenToRequestBinaryMetadata flag being true. Usually this means
// that awaiting for the future right there would lead to potential deadlock if
// continuous queries are used in parallel with entry processor.
ex.future().get();
// Retry and don't update current binary metadata, because it most likely already exists.
continue;
}
IgniteCacheObjectProcessor cacheObjProc = ctx.cacheObjects();
assert cacheObjProc instanceof CacheObjectBinaryProcessorImpl;
((CacheObjectBinaryProcessorImpl) cacheObjProc).binaryContext().updateMetadata(ex.typeId(), ex.binaryMetadata(), false);
}
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry should not become obsolete while holding lock.";
e.printStackTrace();
} finally {
if (locked != null)
unlockEntries(locked, req.topologyVersion());
// Enqueue if necessary after locks release.
if (deleted != null) {
assert !deleted.isEmpty();
assert ctx.deferredDelete() : this;
for (IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion> e : deleted) ctx.onDeferredDelete(e.get1(), e.get2());
}
// TODO fire events only after successful fsync
if (ctx.shared().wal() != null)
ctx.shared().wal().flush(null, false);
}
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Caught invalid partition exception for cache entry (will remap update request): " + req);
res.remapTopologyVersion(ctx.topology().lastTopologyChangeVersion());
} catch (Throwable e) {
// At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is
// an attempt to use cleaned resources.
U.error(log, "Unexpected exception during cache update", e);
res.addFailedKeys(req.keys(), e);
completionCb.apply(req, res);
if (e instanceof Error)
throw (Error) e;
return;
} finally {
ctx.shared().database().checkpointReadUnlock();
}
if (res.remapTopologyVersion() != null) {
assert dhtFut == null;
completionCb.apply(req, res);
} else {
if (dhtFut != null)
dhtFut.map(node, res.returnValue(), res, completionCb);
}
if (req.writeSynchronizationMode() != FULL_ASYNC)
req.cleanup(!node.isLocal());
sendTtlUpdateRequest(expiry);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtAtomicCache method processDhtAtomicUpdateRequest.
/**
* @param nodeId Sender node ID.
* @param req Dht atomic update request.
*/
private void processDhtAtomicUpdateRequest(UUID nodeId, GridDhtAtomicAbstractUpdateRequest req) {
assert Thread.currentThread().getName().startsWith("sys-stripe-") : Thread.currentThread().getName();
if (msgLog.isDebugEnabled()) {
msgLog.debug("Received DHT atomic update request [futId=" + req.futureId() + ", writeVer=" + req.writeVersion() + ", node=" + nodeId + ']');
}
assert req.partition() >= 0 : req;
GridCacheVersion ver = req.writeVersion();
ctx.versions().onReceived(nodeId, ver);
GridDhtAtomicNearResponse nearRes = null;
if (req.nearNodeId() != null) {
nearRes = new GridDhtAtomicNearResponse(ctx.cacheId(), req.partition(), req.nearFutureId(), nodeId, req.flags());
}
boolean replicate = ctx.isDrEnabled();
boolean intercept = req.forceTransformBackups() && ctx.config().getInterceptor() != null;
boolean needTaskName = ctx.events().isRecordable(EVT_CACHE_OBJECT_READ) || ctx.events().isRecordable(EVT_CACHE_OBJECT_PUT) || ctx.events().isRecordable(EVT_CACHE_OBJECT_REMOVED);
String taskName = needTaskName ? ctx.kernalContext().task().resolveTaskName(req.taskNameHash()) : null;
ctx.shared().database().checkpointReadLock();
try {
for (int i = 0; i < req.size(); i++) {
KeyCacheObject key = req.key(i);
try {
while (true) {
GridDhtCacheEntry entry = null;
try {
entry = entryExx(key);
CacheObject val = req.value(i);
CacheObject prevVal = req.previousValue(i);
EntryProcessor<Object, Object, Object> entryProcessor = req.entryProcessor(i);
Long updateIdx = req.updateCounter(i);
GridCacheOperation op = entryProcessor != null ? TRANSFORM : (val != null) ? UPDATE : DELETE;
long ttl = req.ttl(i);
long expireTime = req.conflictExpireTime(i);
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nodeId, nodeId, op, op == TRANSFORM ? entryProcessor : val, op == TRANSFORM ? req.invokeArguments() : null, /*write-through*/
(ctx.store().isLocal() && !ctx.shared().localStorePrimaryOnly()) && writeThrough() && !req.skipStore(), /*read-through*/
false, /*retval*/
false, req.keepBinary(), /*expiry policy*/
null, /*event*/
true, /*metrics*/
true, /*primary*/
false, /*check version*/
!req.forceTransformBackups(), req.topologyVersion(), CU.empty0(), replicate ? DR_BACKUP : DR_NONE, ttl, expireTime, req.conflictVersion(i), false, intercept, taskName, prevVal, updateIdx, null, req.transformOperation());
if (updRes.removeVersion() != null)
ctx.onDeferredDelete(entry, updRes.removeVersion());
entry.onUnlock();
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry while updating backup value (will retry): " + key);
entry = null;
} finally {
if (entry != null)
entry.touch();
}
}
} catch (NodeStoppingException e) {
U.warn(log, "Failed to update key on backup (local node is stopping): " + key);
return;
} catch (GridDhtInvalidPartitionException ignored) {
// Ignore.
} catch (IgniteCheckedException | RuntimeException e) {
if (e instanceof RuntimeException && !X.hasCause(e, IgniteOutOfMemoryException.class))
throw (RuntimeException) e;
IgniteCheckedException err = new IgniteCheckedException("Failed to update key on backup node: " + key, e);
if (nearRes != null)
nearRes.addFailedKey(key, err);
U.error(log, "Failed to update key on backup node: " + key, e);
}
}
} finally {
ctx.shared().database().checkpointReadUnlock();
}
GridDhtAtomicUpdateResponse dhtRes = null;
if (req.nearSize() > 0 || req.obsoleteNearKeysSize() > 0) {
List<KeyCacheObject> nearEvicted = null;
if (isNearEnabled(ctx))
nearEvicted = ((GridNearAtomicCache<K, V>) near()).processDhtAtomicUpdateRequest(nodeId, req, nearRes);
else if (req.nearSize() > 0) {
nearEvicted = new ArrayList<>(req.nearSize());
for (int i = 0; i < req.nearSize(); i++) nearEvicted.add(req.nearKey(i));
}
if (nearEvicted != null) {
dhtRes = new GridDhtAtomicUpdateResponse(ctx.cacheId(), req.partition(), req.futureId(), ctx.deploymentEnabled());
dhtRes.nearEvicted(nearEvicted);
}
}
try {
// TODO fire events only after successful fsync
if (ctx.shared().wal() != null)
ctx.shared().wal().flush(null, false);
} catch (StorageException e) {
if (dhtRes != null)
dhtRes.onError(new IgniteCheckedException(e));
if (nearRes != null)
nearRes.onClassError(e);
} catch (IgniteCheckedException e) {
if (dhtRes != null)
dhtRes.onError(e);
if (nearRes != null)
nearRes.onClassError(e);
}
if (nearRes != null)
sendDhtNearResponse(req, nearRes);
if (dhtRes == null && req.replyWithoutDelay()) {
dhtRes = new GridDhtAtomicUpdateResponse(ctx.cacheId(), req.partition(), req.futureId(), ctx.deploymentEnabled());
}
if (dhtRes != null)
sendDhtPrimaryResponse(nodeId, req, dhtRes);
else
sendDeferredUpdateResponse(req.partition(), nodeId, req.futureId());
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtAtomicCache method getAllAsync0.
/**
* Entry point to all public API get methods.
*
* @param keys Keys.
* @param forcePrimary Force primary flag.
* @param taskName Task name.
* @param deserializeBinary Deserialize binary flag.
* @param expiryPlc Expiry policy.
* @param skipVals Skip values flag.
* @param skipStore Skip store flag.
* @param needVer Need version.
* @return Get future.
*/
private IgniteInternalFuture<Map<K, V>> getAllAsync0(@Nullable Collection<KeyCacheObject> keys, boolean forcePrimary, String taskName, boolean deserializeBinary, boolean recovery, ReadRepairStrategy readRepairStrategy, @Nullable ExpiryPolicy expiryPlc, boolean skipVals, boolean skipStore, boolean needVer) {
AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
final IgniteCacheExpiryPolicy expiry = skipVals ? null : expiryPolicy(expiryPlc);
final boolean evt = !skipVals;
if (readRepairStrategy != null) {
return new GridNearReadRepairCheckOnlyFuture(ctx, ctx.cacheKeysView(keys), readRepairStrategy, !skipStore, taskName, deserializeBinary, recovery, expiry, skipVals, needVer, false, null).multi();
}
// Optimisation: try to resolve value locally and escape 'get future' creation.
if (!forcePrimary && ctx.config().isReadFromBackup() && ctx.affinityNode() && ctx.group().topology().lostPartitions().isEmpty()) {
ctx.shared().database().checkpointReadLock();
try {
Map<K, V> locVals = U.newHashMap(keys.size());
boolean success = true;
boolean readNoEntry = ctx.readNoEntry(expiry, false);
// Optimistically expect that all keys are available locally (avoid creation of get future).
for (KeyCacheObject key : keys) {
if (readNoEntry) {
CacheDataRow row = ctx.offheap().read(ctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
ctx.addResult(locVals, key, row.value(), skipVals, false, deserializeBinary, true, null, row.version(), 0, 0, needVer, U.deploymentClassLoader(ctx.kernalContext(), U.contextDeploymentClassLoaderId(ctx.kernalContext())));
if (evt) {
ctx.events().readEvent(key, null, null, row.value(), taskName, !deserializeBinary);
}
} else
success = false;
} else
success = false;
} else {
GridCacheEntryEx entry = null;
while (true) {
try {
entry = entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, null, taskName, expiry, true, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, null, taskName, expiry, !deserializeBinary);
}
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
if (isNew && entry.markObsoleteIfEmpty(nextVersion()))
removeEntry(entry);
success = false;
} else {
ctx.addResult(locVals, key, v, skipVals, false, deserializeBinary, true, getRes, ver, 0, 0, needVer, U.deploymentClassLoader(ctx.kernalContext(), U.contextDeploymentClassLoaderId(ctx.kernalContext())));
}
} else
success = false;
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, retry.
} catch (GridDhtInvalidPartitionException ignored) {
success = false;
// While.
break;
} finally {
if (entry != null)
entry.touch();
}
}
}
if (!success)
break;
else if (!skipVals && ctx.statisticsEnabled())
metrics0().onRead(true);
}
if (success) {
sendTtlUpdateRequest(expiry);
return new GridFinishedFuture<>(locVals);
}
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
} finally {
ctx.shared().database().checkpointReadUnlock();
}
}
if (expiry != null)
expiry.reset();
// Either reload or not all values are available locally.
GridPartitionedGetFuture<K, V> fut = new GridPartitionedGetFuture<>(ctx, keys, !skipStore, forcePrimary, taskName, deserializeBinary, recovery, expiry, skipVals, needVer, false, null, null, null);
fut.init(topVer);
return fut;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method lockAllAsyncInternal.
/**
* Acquires locks in partitioned cache.
*
* @param keys Keys to lock.
* @param timeout Lock timeout.
* @param txx Transaction.
* @param isInvalidate Invalidate flag.
* @param isRead Read flag.
* @param retval Return value flag.
* @param isolation Transaction isolation.
* @param createTtl TTL for create operation.
* @param accessTtl TTL for read operation.
* @param filter Optional filter.
* @param skipStore Skip store flag.
* @return Lock future.
*/
public GridDhtFuture<Boolean> lockAllAsyncInternal(@Nullable Collection<KeyCacheObject> keys, long timeout, IgniteTxLocalEx txx, boolean isInvalidate, boolean isRead, boolean retval, TransactionIsolation isolation, long createTtl, long accessTtl, CacheEntryPredicate[] filter, boolean skipStore, boolean keepBinary) {
if (keys == null || keys.isEmpty())
return new GridDhtFinishedFuture<>(true);
GridDhtTxLocalAdapter tx = (GridDhtTxLocalAdapter) txx;
assert tx != null;
GridDhtLockFuture fut = new GridDhtLockFuture(ctx, tx.nearNodeId(), tx.nearXidVersion(), tx.topologyVersion(), keys.size(), isRead, retval, timeout, tx, tx.threadId(), createTtl, accessTtl, filter, skipStore, keepBinary);
if (// Possible in case of cancellation or timeout or rollback.
fut.isDone())
return fut;
for (KeyCacheObject key : keys) {
try {
while (true) {
GridDhtCacheEntry entry = entryExx(key, tx.topologyVersion());
try {
fut.addEntry(entry);
// Possible in case of cancellation or time out or rollback.
if (fut.isDone())
return fut;
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when adding lock (will retry): " + entry);
} catch (GridDistributedLockCancelledException e) {
if (log.isDebugEnabled())
log.debug("Failed to add entry [err=" + e + ", entry=" + entry + ']');
return new GridDhtFinishedFuture<>(e);
}
}
} catch (GridDhtInvalidPartitionException e) {
fut.addInvalidPartition(ctx, e.partition());
if (log.isDebugEnabled())
log.debug("Added invalid partition to DHT lock future [part=" + e.partition() + ", fut=" + fut + ']');
}
}
if (!fut.isDone()) {
ctx.mvcc().addFuture(fut);
fut.map();
}
return fut;
}
Aggregations