use of org.apache.ignite.internal.processors.cache.CacheStoppedException in project ignite by apache.
the class GridDhtColocatedLockFuture method mapOnTopology.
/**
* Acquires topology future and checks it completeness under the read lock. If it is not complete,
* will asynchronously wait for it's completeness and then try again.
*
* @param remap Remap flag.
* @param c Optional closure to run after map.
*/
private void mapOnTopology(final boolean remap, @Nullable final Runnable c) {
// We must acquire topology snapshot from the topology version future.
cctx.topology().readLock();
final GridDhtTopologyFuture fut;
final boolean finished;
try {
if (cctx.topology().stopping()) {
onDone(cctx.shared().cache().isCacheRestarting(cctx.name()) ? new IgniteCacheRestartingException(cctx.name()) : new CacheStoppedException(cctx.name()));
return;
}
fut = cctx.topologyVersionFuture();
finished = fut.isDone();
if (finished) {
Throwable err = fut.validateCache(cctx, recovery, read, null, keys);
if (err != null) {
onDone(err);
return;
}
AffinityTopologyVersion topVer = fut.topologyVersion();
if (remap) {
if (tx != null)
tx.onRemap(topVer, true);
synchronized (this) {
this.topVer = topVer;
}
} else {
if (tx != null)
tx.topologyVersion(topVer);
synchronized (this) {
if (this.topVer == null)
this.topVer = topVer;
}
}
if (!remap)
cctx.mvcc().addFuture(this);
}
} finally {
cctx.topology().readUnlock();
}
if (finished) {
map(keys, remap, false);
if (c != null)
c.run();
markInitialized();
} else {
cctx.time().waitAsync(fut, tx == null ? 0 : tx.remainingTime(), (e, timedOut) -> {
if (errorOrTimeoutOnTopologyVersion(e, timedOut))
return;
try {
mapOnTopology(remap, c);
} finally {
cctx.shared().txContextReset();
}
});
}
}
use of org.apache.ignite.internal.processors.cache.CacheStoppedException in project ignite by apache.
the class GridNearAtomicSingleUpdateFuture method mapOnTopology.
/**
* {@inheritDoc}
*/
@Override
protected void mapOnTopology() {
AffinityTopologyVersion topVer;
if (cache.topology().stopping()) {
completeFuture(null, cctx.shared().cache().isCacheRestarting(cache.name()) ? new IgniteCacheRestartingException(cache.name()) : new CacheStoppedException(cache.name()), null);
return;
}
GridDhtTopologyFuture fut = cache.topology().topologyVersionFuture();
if (fut.isDone()) {
Throwable err = fut.validateCache(cctx, recovery, /*read*/
false, key, null);
if (err != null) {
completeFuture(null, err, null);
return;
}
topVer = fut.topologyVersion();
} else {
assert !topLocked : this;
fut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> t) {
cctx.kernalContext().closure().runLocalSafe(new GridPlainRunnable() {
@Override
public void run() {
mapOnTopology();
}
});
}
});
return;
}
map(topVer);
}
use of org.apache.ignite.internal.processors.cache.CacheStoppedException in project ignite by apache.
the class GridNearAtomicUpdateFuture method mapOnTopology.
/**
* {@inheritDoc}
*/
@Override
protected void mapOnTopology() {
AffinityTopologyVersion topVer;
if (cache.topology().stopping()) {
completeFuture(null, cctx.shared().cache().isCacheRestarting(cache.name()) ? new IgniteCacheRestartingException(cache.name()) : new CacheStoppedException(cache.name()), null);
return;
}
GridDhtTopologyFuture fut = cache.topology().topologyVersionFuture();
if (fut.isDone()) {
Throwable err = fut.validateCache(cctx, recovery, false, null, keys);
if (err != null) {
completeFuture(null, err, null);
return;
}
topVer = fut.topologyVersion();
} else {
assert !topLocked : this;
fut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> t) {
cctx.kernalContext().closure().runLocalSafe(new GridPlainRunnable() {
@Override
public void run() {
mapOnTopology();
}
});
}
});
return;
}
map(topVer, remapKeys);
}
use of org.apache.ignite.internal.processors.cache.CacheStoppedException in project ignite by apache.
the class PartitionsEvictManager method onCacheGroupStopped.
/**
* Stops eviction process for group.
*
* Method awaits last offered partition eviction.
*
* @param grp Group context.
*/
public void onCacheGroupStopped(CacheGroupContext grp) {
// Must keep context in the map to avoid race with subsequent clearing request after the call to this method.
GroupEvictionContext grpEvictionCtx = evictionGroupsMap.computeIfAbsent(grp.groupId(), p -> new GroupEvictionContext(grp));
grpEvictionCtx.stop(new CacheStoppedException(grp.cacheOrGroupName()));
}
use of org.apache.ignite.internal.processors.cache.CacheStoppedException in project ignite by apache.
the class GridDhtAtomicCache method updateAllAsyncInternal0.
/**
* Executes local update after preloader fetched values.
*
* @param node Node.
* @param req Update request.
* @param completionCb Completion callback.
*/
private void updateAllAsyncInternal0(final ClusterNode node, final GridNearAtomicAbstractUpdateRequest req, final UpdateReplyClosure completionCb) {
GridNearAtomicUpdateResponse res = new GridNearAtomicUpdateResponse(ctx.cacheId(), node.id(), req.futureId(), req.partition(), false, ctx.deploymentEnabled());
assert !req.returnValue() || (req.operation() == TRANSFORM || req.size() == 1);
GridDhtAtomicAbstractUpdateFuture dhtFut = null;
IgniteCacheExpiryPolicy expiry = null;
boolean needTaskName = ctx.events().isRecordable(EVT_CACHE_OBJECT_READ) || ctx.events().isRecordable(EVT_CACHE_OBJECT_PUT) || ctx.events().isRecordable(EVT_CACHE_OBJECT_REMOVED);
String taskName = needTaskName ? ctx.kernalContext().task().resolveTaskName(req.taskNameHash()) : null;
ctx.shared().database().checkpointReadLock();
try {
ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
// If batch store update is enabled, we need to lock all entries.
// First, need to acquire locks on cache entries, then check filter.
List<GridDhtCacheEntry> locked = lockEntries(req, req.topologyVersion());
;
Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
DhtAtomicUpdateResult updDhtRes = new DhtAtomicUpdateResult();
try {
while (true) {
try {
GridDhtPartitionTopology top = topology();
top.readLock();
try {
if (top.stopping()) {
if (ctx.shared().cache().isCacheRestarting(name()))
res.addFailedKeys(req.keys(), new IgniteCacheRestartingException(name()));
else
res.addFailedKeys(req.keys(), new CacheStoppedException(name()));
completionCb.apply(req, res);
return;
}
boolean remap = false;
// external transaction or explicit lock.
if (!req.topologyLocked()) {
AffinityTopologyVersion waitVer = top.topologyVersionFuture().initialVersion();
// No need to remap if next future version is compatible.
boolean compatible = waitVer.isBetween(req.lastAffinityChangedTopologyVersion(), req.topologyVersion());
// Can not wait for topology future since it will break
// GridNearAtomicCheckUpdateRequest processing.
remap = !compatible && !top.topologyVersionFuture().isDone() || needRemap(req.topologyVersion(), top.readyTopologyVersion());
}
if (!remap) {
update(node, locked, req, res, updDhtRes, taskName);
dhtFut = updDhtRes.dhtFuture();
deleted = updDhtRes.deleted();
expiry = updDhtRes.expiryPolicy();
} else
// Should remap all keys.
res.remapTopologyVersion(top.lastTopologyChangeVersion());
} finally {
top.readUnlock();
}
// Must be done outside topology read lock to avoid deadlocks.
if (res.returnValue() != null)
res.returnValue().marshalResult(ctx);
break;
} catch (UnregisteredClassException ex) {
IgniteCacheObjectProcessor cacheObjProc = ctx.cacheObjects();
assert cacheObjProc instanceof CacheObjectBinaryProcessorImpl;
((CacheObjectBinaryProcessorImpl) cacheObjProc).binaryContext().registerClass(ex.cls(), true, false);
} catch (UnregisteredBinaryTypeException ex) {
if (ex.future() != null) {
// Wait for the future that couldn't be processed because of
// IgniteThread#isForbiddenToRequestBinaryMetadata flag being true. Usually this means
// that awaiting for the future right there would lead to potential deadlock if
// continuous queries are used in parallel with entry processor.
ex.future().get();
// Retry and don't update current binary metadata, because it most likely already exists.
continue;
}
IgniteCacheObjectProcessor cacheObjProc = ctx.cacheObjects();
assert cacheObjProc instanceof CacheObjectBinaryProcessorImpl;
((CacheObjectBinaryProcessorImpl) cacheObjProc).binaryContext().updateMetadata(ex.typeId(), ex.binaryMetadata(), false);
}
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry should not become obsolete while holding lock.";
e.printStackTrace();
} finally {
if (locked != null)
unlockEntries(locked, req.topologyVersion());
// Enqueue if necessary after locks release.
if (deleted != null) {
assert !deleted.isEmpty();
assert ctx.deferredDelete() : this;
for (IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion> e : deleted) ctx.onDeferredDelete(e.get1(), e.get2());
}
// TODO fire events only after successful fsync
if (ctx.shared().wal() != null)
ctx.shared().wal().flush(null, false);
}
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Caught invalid partition exception for cache entry (will remap update request): " + req);
res.remapTopologyVersion(ctx.topology().lastTopologyChangeVersion());
} catch (Throwable e) {
// At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is
// an attempt to use cleaned resources.
U.error(log, "Unexpected exception during cache update", e);
res.addFailedKeys(req.keys(), e);
completionCb.apply(req, res);
if (e instanceof Error)
throw (Error) e;
return;
} finally {
ctx.shared().database().checkpointReadUnlock();
}
if (res.remapTopologyVersion() != null) {
assert dhtFut == null;
completionCb.apply(req, res);
} else {
if (dhtFut != null)
dhtFut.map(node, res.returnValue(), res, completionCb);
}
if (req.writeSynchronizationMode() != FULL_ASYNC)
req.cleanup(!node.isLocal());
sendTtlUpdateRequest(expiry);
}
Aggregations