use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class GridNearAtomicSingleUpdateFuture method mapOnTopology.
/**
* {@inheritDoc}
*/
@Override
protected void mapOnTopology() {
AffinityTopologyVersion topVer;
if (cache.topology().stopping()) {
completeFuture(null, cctx.shared().cache().isCacheRestarting(cache.name()) ? new IgniteCacheRestartingException(cache.name()) : new CacheStoppedException(cache.name()), null);
return;
}
GridDhtTopologyFuture fut = cache.topology().topologyVersionFuture();
if (fut.isDone()) {
Throwable err = fut.validateCache(cctx, recovery, /*read*/
false, key, null);
if (err != null) {
completeFuture(null, err, null);
return;
}
topVer = fut.topologyVersion();
} else {
assert !topLocked : this;
fut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> t) {
cctx.kernalContext().closure().runLocalSafe(new GridPlainRunnable() {
@Override
public void run() {
mapOnTopology();
}
});
}
});
return;
}
map(topVer);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class GridNearAtomicUpdateFuture method mapOnTopology.
/**
* {@inheritDoc}
*/
@Override
protected void mapOnTopology() {
AffinityTopologyVersion topVer;
if (cache.topology().stopping()) {
completeFuture(null, cctx.shared().cache().isCacheRestarting(cache.name()) ? new IgniteCacheRestartingException(cache.name()) : new CacheStoppedException(cache.name()), null);
return;
}
GridDhtTopologyFuture fut = cache.topology().topologyVersionFuture();
if (fut.isDone()) {
Throwable err = fut.validateCache(cctx, recovery, false, null, keys);
if (err != null) {
completeFuture(null, err, null);
return;
}
topVer = fut.topologyVersion();
} else {
assert !topLocked : this;
fut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> t) {
cctx.kernalContext().closure().runLocalSafe(new GridPlainRunnable() {
@Override
public void run() {
mapOnTopology();
}
});
}
});
return;
}
map(topVer, remapKeys);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class GridNearTxAbstractEnlistFuture method init.
/**
*/
public void init() {
if (timeout < 0) {
// Time is out.
onDone(timeoutException());
return;
} else if (timeout > 0)
timeoutObj = new LockTimeoutObject();
while (true) {
IgniteInternalFuture<?> fut = tx.lockFuture();
if (fut == GridDhtTxLocalAdapter.ROLLBACK_FUT) {
onDone(tx.timedOut() ? tx.timeoutException() : tx.rollbackException());
return;
} else if (fut != null) {
// Wait for previous future.
assert fut instanceof GridNearTxAbstractEnlistFuture || fut instanceof GridDhtTxAbstractEnlistFuture : fut;
// Terminate this future if parent future is terminated by rollback.
if (!fut.isDone()) {
fut.listen(new IgniteInClosure<IgniteInternalFuture>() {
@Override
public void apply(IgniteInternalFuture fut) {
if (fut.error() != null)
onDone(fut.error());
}
});
} else if (fut.error() != null)
onDone(fut.error());
break;
} else if (tx.updateLockFuture(null, this))
break;
}
boolean added = cctx.mvcc().addFuture(this);
assert added : this;
if (isDone()) {
cctx.mvcc().removeFuture(futId);
return;
}
try {
tx.addActiveCache(cctx, false);
} catch (IgniteCheckedException e) {
onDone(e);
return;
}
if (timeoutObj != null)
cctx.time().addTimeoutObject(timeoutObj);
// Obtain the topology version to use.
long threadId = Thread.currentThread().getId();
AffinityTopologyVersion topVer = cctx.mvcc().lastExplicitLockTopologyVersion(threadId);
// If there is another system transaction in progress, use it's topology version to prevent deadlock.
if (topVer == null && tx.system())
topVer = cctx.tm().lockedTopologyVersion(threadId, tx);
if (topVer != null)
tx.topologyVersion(topVer);
if (topVer == null)
topVer = tx.topologyVersionSnapshot();
if (topVer != null) {
for (GridDhtTopologyFuture fut : cctx.shared().exchange().exchangeFutures()) {
if (fut.exchangeDone() && fut.topologyVersion().equals(topVer)) {
Throwable err = null;
// Before cache validation, make sure that this topology future is already completed.
try {
fut.get();
} catch (IgniteCheckedException e) {
err = fut.error();
}
if (err == null)
err = fut.validateCache(cctx, false, false, null, null);
if (err != null) {
onDone(err);
return;
}
break;
}
}
if (this.topVer == null)
this.topVer = topVer;
map(true);
return;
}
mapOnTopology();
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class CacheMvccAbstractBasicCoordinatorFailoverTest method checkCoordinatorsLeft.
/**
* @param num Number of coordinators to stop.
* @throws Exception If failed.
*/
@SuppressWarnings("unchecked")
private void checkCoordinatorsLeft(int num, boolean stopCrdFirst) throws Exception {
disableScheduledVacuum = true;
final int DATA_NODES = 3;
final int NODES = num + DATA_NODES;
nodeAttr = CRD_ATTR;
// Do not use startMultithreaded here.
startGrids(num);
nodeAttr = null;
startGridsMultiThreaded(num, DATA_NODES);
List<Ignite> victims = new ArrayList<>(num);
List<Ignite> survivors = new ArrayList<>(DATA_NODES);
for (int i = 0; i < NODES; i++) {
if (i < num)
victims.add(grid(i));
else
survivors.add(grid(i));
}
if (log.isInfoEnabled()) {
log.info("Nodes to be stopped [" + victims.stream().map(n -> n.cluster().localNode().id().toString()).collect(Collectors.joining(", ")) + ']');
log.info("Nodes not to be stopped [" + survivors.stream().map(n -> n.cluster().localNode().id().toString()).collect(Collectors.joining(", ")) + ']');
}
Ignite nearNode = survivors.get(0);
if (persistence)
nearNode.cluster().active(true);
CacheConfiguration ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, DATA_NODES - 1, DFLT_PARTITION_COUNT).setNodeFilter(new CoordinatorNodeFilter());
IgniteCache cache = nearNode.createCache(ccfg);
try (Transaction tx = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int key = 0; key < 10; key++) cache.put(key, 1);
tx.commit();
}
List<Thread> stopThreads = victims.stream().map(v -> new Thread(() -> stopGrid(v.name()))).collect(Collectors.toList());
ScanQuery<Object, Object> scan = new ScanQuery<>();
QueryCursor<Cache.Entry<Object, Object>> cur = survivors.get(0).cache(DEFAULT_CACHE_NAME).query(scan);
Iterator<Cache.Entry<Object, Object>> it = cur.iterator();
assertTrue(it.hasNext());
assertEquals(1, it.next().getValue());
if (log.isInfoEnabled())
log.info("Start stopping nodes.");
// Stop nodes and join threads.
if (stopCrdFirst) {
for (Thread t : stopThreads) t.start();
} else {
// We should stop the oldest node last.
GridCachePartitionExchangeManager exch = ((IgniteEx) survivors.get(1)).context().cache().context().exchange();
GridDhtTopologyFuture lastFinished = exch.lastFinishedFuture();
for (int i = 1; i < stopThreads.size(); i++) stopThreads.get(i).start();
while (lastFinished == exch.lastTopologyFuture()) doSleep(1);
stopThreads.get(0).start();
}
for (Thread t : stopThreads) t.join();
if (log.isInfoEnabled())
log.info("All nodes stopped.");
assertTrue(it.hasNext());
assertEquals(1, it.next().getValue());
for (Ignite node : survivors) {
for (int key = 0; key < 10; key++) assertEquals(1, node.cache(DEFAULT_CACHE_NAME).get(key));
}
try (Transaction tx = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int key = 0; key < 10; key++) cache.put(key, 2);
tx.commit();
} catch (Exception e) {
stopAllGrids(true);
fail(X.getFullStackTrace(e));
}
for (Ignite node : survivors) {
for (int key = 0; key < 10; key++) assertEquals(2, node.cache(DEFAULT_CACHE_NAME).get(key));
}
try (Transaction tx = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int key = 0; key < 10; key++) cache.put(key, 3);
tx.commit();
} catch (Exception e) {
stopAllGrids(true);
fail(X.getFullStackTrace(e));
}
for (Ignite node : survivors) {
for (int key = 0; key < 10; key++) assertEquals(3, node.cache(DEFAULT_CACHE_NAME).get(key));
}
while (it.hasNext()) assertEquals(1, (int) it.next().getValue());
cur.close();
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class GridCacheAdapter method getAllAsync0.
/**
* @param keys Keys.
* @param readerArgs Near cache reader will be added if not null.
* @param readThrough Read-through flag.
* @param checkTx Check local transaction flag.
* @param taskName Task name/
* @param deserializeBinary Deserialize binary flag.
* @param expiry Expiry policy.
* @param skipVals Skip values flag.
* @param keepCacheObjects Keep cache objects.
* @param needVer If {@code true} returns values as tuples containing value and version.
* @param txLbl Transaction label.
* @param mvccSnapshot MVCC snapshot.
* @return Future.
*/
protected final <K1, V1> IgniteInternalFuture<Map<K1, V1>> getAllAsync0(@Nullable final Collection<KeyCacheObject> keys, @Nullable final ReaderArguments readerArgs, final boolean readThrough, boolean checkTx, final String taskName, final boolean deserializeBinary, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean skipVals, final boolean keepCacheObjects, final boolean recovery, final ReadRepairStrategy readRepairStrategy, final boolean needVer, @Nullable String txLbl, MvccSnapshot mvccSnapshot) {
if (F.isEmpty(keys))
return new GridFinishedFuture<>(Collections.<K1, V1>emptyMap());
GridNearTxLocal tx = null;
if (checkTx) {
try {
checkJta();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
tx = checkCurrentTx();
}
if (ctx.mvccEnabled() || tx == null || tx.implicit()) {
assert (mvccSnapshot == null) == !ctx.mvccEnabled();
Map<KeyCacheObject, EntryGetResult> misses = null;
Set<GridCacheEntryEx> newLocalEntries = null;
final AffinityTopologyVersion topVer = tx == null ? ctx.affinity().affinityTopologyVersion() : tx.topologyVersion();
ctx.shared().database().checkpointReadLock();
try {
int keysSize = keys.size();
GridDhtTopologyFuture topFut = ctx.shared().exchange().lastFinishedFuture();
Throwable ex = topFut != null ? topFut.validateCache(ctx, recovery, /*read*/
true, null, keys) : null;
if (ex != null)
return new GridFinishedFuture<>(ex);
final Map<K1, V1> map = keysSize == 1 ? (Map<K1, V1>) new IgniteBiTuple<>() : U.<K1, V1>newHashMap(keysSize);
final boolean storeEnabled = !skipVals && readThrough && ctx.readThrough();
boolean readNoEntry = ctx.readNoEntry(expiry, readerArgs != null);
for (KeyCacheObject key : keys) {
while (true) {
try {
EntryGetResult res = null;
boolean evt = !skipVals;
boolean updateMetrics = !skipVals;
GridCacheEntryEx entry = null;
boolean skipEntry = readNoEntry;
if (readNoEntry) {
CacheDataRow row = mvccSnapshot != null ? ctx.offheap().mvccRead(ctx, key, mvccSnapshot) : ctx.offheap().read(ctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime != 0) {
if (expireTime > U.currentTimeMillis()) {
res = new EntryGetWithTtlResult(row.value(), row.version(), false, expireTime, 0);
} else
skipEntry = false;
} else
res = new EntryGetResult(row.value(), row.version(), false);
}
if (res != null) {
if (evt) {
ctx.events().readEvent(key, null, txLbl, row.value(), taskName, !deserializeBinary);
}
if (updateMetrics && ctx.statisticsEnabled())
ctx.cache().metrics0().onRead(true);
} else if (storeEnabled)
skipEntry = false;
}
if (!skipEntry) {
boolean isNewLocalEntry = this.map.getEntry(ctx, key) == null;
entry = entryEx(key);
if (entry == null) {
if (!skipVals && ctx.statisticsEnabled())
ctx.cache().metrics0().onRead(false);
break;
}
if (isNewLocalEntry) {
if (newLocalEntries == null)
newLocalEntries = new HashSet<>();
newLocalEntries.add(entry);
}
if (storeEnabled) {
res = entry.innerGetAndReserveForLoad(updateMetrics, evt, taskName, expiry, !deserializeBinary, readerArgs);
assert res != null;
if (res.value() == null) {
if (misses == null)
misses = new HashMap<>();
misses.put(key, res);
res = null;
}
} else {
res = entry.innerGetVersioned(null, null, updateMetrics, evt, null, taskName, expiry, !deserializeBinary, readerArgs);
if (res == null)
entry.touch();
}
}
if (res != null) {
ctx.addResult(map, key, res, skipVals, keepCacheObjects, deserializeBinary, true, needVer);
if (entry != null && (tx == null || (!tx.implicit() && tx.isolation() == READ_COMMITTED)))
entry.touch();
if (keysSize == 1)
// Safe to return because no locks are required in READ_COMMITTED mode.
return new GridFinishedFuture<>(map);
}
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in getAllAsync(..) method (will retry): " + key);
}
}
}
if (storeEnabled && misses != null) {
final Map<KeyCacheObject, EntryGetResult> loadKeys = misses;
final IgniteTxLocalAdapter tx0 = tx;
final Collection<KeyCacheObject> loaded = new HashSet<>();
return new GridEmbeddedFuture(ctx.closures().callLocalSafe(ctx.projectSafe(new GPC<Map<K1, V1>>() {
@Override
public Map<K1, V1> call() throws Exception {
ctx.store().loadAll(null, /*tx*/
loadKeys.keySet(), new CI2<KeyCacheObject, Object>() {
@Override
public void apply(KeyCacheObject key, Object val) {
EntryGetResult res = loadKeys.get(key);
if (res == null || val == null)
return;
loaded.add(key);
CacheObject cacheVal = ctx.toCacheObject(val);
while (true) {
GridCacheEntryEx entry = null;
try {
ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
} catch (IgniteCheckedException e) {
// Wrap errors (will be unwrapped).
throw new GridClosureException(e);
}
ctx.shared().database().checkpointReadLock();
try {
entry = entryEx(key);
entry.unswap();
GridCacheVersion newVer = nextVersion();
EntryGetResult verVal = entry.versionedValue(cacheVal, res.version(), newVer, expiry, readerArgs);
if (log.isDebugEnabled())
log.debug("Set value loaded from store into entry [" + "oldVer=" + res.version() + ", newVer=" + verVal.version() + ", " + "entry=" + entry + ']');
// Don't put key-value pair into result map if value is null.
if (verVal.value() != null) {
ctx.addResult(map, key, verVal, skipVals, keepCacheObjects, deserializeBinary, true, needVer);
} else {
ctx.addResult(map, key, new EntryGetResult(cacheVal, res.version()), skipVals, keepCacheObjects, deserializeBinary, false, needVer);
}
if (tx0 == null || (!tx0.implicit() && tx0.isolation() == READ_COMMITTED))
entry.touch();
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry during getAllAsync (will retry): " + entry);
} catch (IgniteCheckedException e) {
// Wrap errors (will be unwrapped).
throw new GridClosureException(e);
} finally {
ctx.shared().database().checkpointReadUnlock();
}
}
}
});
clearReservationsIfNeeded(topVer, loadKeys, loaded, tx0);
return map;
}
}), true), new C2<Map<K, V>, Exception, IgniteInternalFuture<Map<K, V>>>() {
@Override
public IgniteInternalFuture<Map<K, V>> apply(Map<K, V> map, Exception e) {
if (e != null) {
clearReservationsIfNeeded(topVer, loadKeys, loaded, tx0);
return new GridFinishedFuture<>(e);
}
if (tx0 == null || (!tx0.implicit() && tx0.isolation() == READ_COMMITTED)) {
Collection<KeyCacheObject> notFound = new HashSet<>(loadKeys.keySet());
notFound.removeAll(loaded);
// Touch entries that were not found in store.
for (KeyCacheObject key : notFound) {
GridCacheEntryEx entry = peekEx(key);
if (entry != null)
entry.touch();
}
}
// There were no misses.
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
}
}, new C2<Map<K1, V1>, Exception, Map<K1, V1>>() {
@Override
public Map<K1, V1> apply(Map<K1, V1> loaded, Exception e) {
if (e == null)
map.putAll(loaded);
return map;
}
});
} else
// Misses can be non-zero only if store is enabled.
assert misses == null;
return new GridFinishedFuture<>(map);
} catch (RuntimeException | AssertionError e) {
if (misses != null) {
for (KeyCacheObject key0 : misses.keySet()) {
GridCacheEntryEx entry = peekEx(key0);
if (entry != null)
entry.touch();
}
}
if (newLocalEntries != null) {
for (GridCacheEntryEx entry : newLocalEntries) removeEntry(entry);
}
return new GridFinishedFuture<>(e);
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
} finally {
ctx.shared().database().checkpointReadUnlock();
}
} else {
return asyncOp(tx, new AsyncOp<Map<K1, V1>>(keys) {
@Override
public IgniteInternalFuture<Map<K1, V1>> op(GridNearTxLocal tx, AffinityTopologyVersion readyTopVer) {
return tx.getAllAsync(ctx, readyTopVer, keys, deserializeBinary, skipVals, false, !readThrough, recovery, readRepairStrategy, needVer);
}
}, ctx.operationContextPerCall(), /*retry*/
false);
}
}
Aggregations