use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class GridCacheAdapter method getAllAsync0.
/**
* @param keys Keys.
* @param readerArgs Near cache reader will be added if not null.
* @param readThrough Read-through flag.
* @param checkTx Check local transaction flag.
* @param subjId Subject ID.
* @param taskName Task name/
* @param deserializeBinary Deserialize binary flag.
* @param expiry Expiry policy.
* @param skipVals Skip values flag.
* @param keepCacheObjects Keep cache objects.
* @param needVer If {@code true} returns values as tuples containing value and version.
* @return Future.
*/
protected final <K1, V1> IgniteInternalFuture<Map<K1, V1>> getAllAsync0(@Nullable final Collection<KeyCacheObject> keys, @Nullable final ReaderArguments readerArgs, final boolean readThrough, boolean checkTx, @Nullable final UUID subjId, final String taskName, final boolean deserializeBinary, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean skipVals, final boolean keepCacheObjects, final boolean recovery, final boolean needVer) {
if (F.isEmpty(keys))
return new GridFinishedFuture<>(Collections.<K1, V1>emptyMap());
GridNearTxLocal tx = null;
if (checkTx) {
try {
checkJta();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
tx = ctx.tm().threadLocalTx(ctx);
}
if (tx == null || tx.implicit()) {
Map<KeyCacheObject, EntryGetResult> misses = null;
Set<GridCacheEntryEx> newLocalEntries = null;
final AffinityTopologyVersion topVer = tx == null ? ctx.affinity().affinityTopologyVersion() : tx.topologyVersion();
try {
int keysSize = keys.size();
GridDhtTopologyFuture topFut = ctx.shared().exchange().lastFinishedFuture();
Throwable ex = topFut != null ? topFut.validateCache(ctx, recovery, /*read*/
true, null, keys) : null;
if (ex != null)
return new GridFinishedFuture<>(ex);
final Map<K1, V1> map = keysSize == 1 ? (Map<K1, V1>) new IgniteBiTuple<>() : U.<K1, V1>newHashMap(keysSize);
final boolean storeEnabled = !skipVals && readThrough && ctx.readThrough();
boolean readNoEntry = ctx.readNoEntry(expiry, readerArgs != null);
for (KeyCacheObject key : keys) {
while (true) {
try {
EntryGetResult res = null;
boolean evt = !skipVals;
boolean updateMetrics = !skipVals;
GridCacheEntryEx entry = null;
boolean skipEntry = readNoEntry;
if (readNoEntry) {
CacheDataRow row = ctx.offheap().read(ctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime != 0) {
if (expireTime > U.currentTimeMillis()) {
res = new EntryGetWithTtlResult(row.value(), row.version(), false, expireTime, 0);
} else
skipEntry = false;
} else
res = new EntryGetResult(row.value(), row.version(), false);
}
if (res != null) {
if (evt) {
ctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
if (updateMetrics && ctx.statisticsEnabled())
ctx.cache().metrics0().onRead(true);
} else if (storeEnabled)
skipEntry = false;
}
if (!skipEntry) {
boolean isNewLocalEntry = this.map.getEntry(ctx, key) == null;
entry = entryEx(key);
if (entry == null) {
if (!skipVals && ctx.statisticsEnabled())
ctx.cache().metrics0().onRead(false);
break;
}
if (isNewLocalEntry) {
if (newLocalEntries == null)
newLocalEntries = new HashSet<>();
newLocalEntries.add(entry);
}
if (storeEnabled) {
res = entry.innerGetAndReserveForLoad(updateMetrics, evt, subjId, taskName, expiry, !deserializeBinary, readerArgs);
assert res != null;
if (res.value() == null) {
if (misses == null)
misses = new HashMap<>();
misses.put(key, res);
res = null;
}
} else {
res = entry.innerGetVersioned(null, null, updateMetrics, evt, subjId, null, taskName, expiry, !deserializeBinary, readerArgs);
if (res == null)
ctx.evicts().touch(entry, topVer);
}
}
if (res != null) {
ctx.addResult(map, key, res, skipVals, keepCacheObjects, deserializeBinary, true, needVer);
if (entry != null && (tx == null || (!tx.implicit() && tx.isolation() == READ_COMMITTED)))
ctx.evicts().touch(entry, topVer);
if (keysSize == 1)
// Safe to return because no locks are required in READ_COMMITTED mode.
return new GridFinishedFuture<>(map);
}
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in getAllAsync(..) method (will retry): " + key);
}
}
}
if (storeEnabled && misses != null) {
final Map<KeyCacheObject, EntryGetResult> loadKeys = misses;
final IgniteTxLocalAdapter tx0 = tx;
final Collection<KeyCacheObject> loaded = new HashSet<>();
return new GridEmbeddedFuture(ctx.closures().callLocalSafe(ctx.projectSafe(new GPC<Map<K1, V1>>() {
@Override
public Map<K1, V1> call() throws Exception {
ctx.store().loadAll(null, /*tx*/
loadKeys.keySet(), new CI2<KeyCacheObject, Object>() {
@Override
public void apply(KeyCacheObject key, Object val) {
EntryGetResult res = loadKeys.get(key);
if (res == null || val == null)
return;
loaded.add(key);
CacheObject cacheVal = ctx.toCacheObject(val);
while (true) {
GridCacheEntryEx entry = null;
try {
ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
} catch (IgniteCheckedException e) {
// Wrap errors (will be unwrapped).
throw new GridClosureException(e);
}
ctx.shared().database().checkpointReadLock();
try {
entry = entryEx(key);
entry.unswap();
EntryGetResult verVal = entry.versionedValue(cacheVal, res.version(), null, expiry, readerArgs);
if (log.isDebugEnabled())
log.debug("Set value loaded from store into entry [" + "oldVer=" + res.version() + ", newVer=" + verVal.version() + ", " + "entry=" + entry + ']');
// Don't put key-value pair into result map if value is null.
if (verVal.value() != null) {
ctx.addResult(map, key, verVal, skipVals, keepCacheObjects, deserializeBinary, true, needVer);
}
if (tx0 == null || (!tx0.implicit() && tx0.isolation() == READ_COMMITTED))
ctx.evicts().touch(entry, topVer);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry during getAllAsync (will retry): " + entry);
} catch (IgniteCheckedException e) {
// Wrap errors (will be unwrapped).
throw new GridClosureException(e);
} finally {
ctx.shared().database().checkpointReadUnlock();
}
}
}
});
clearReservationsIfNeeded(topVer, loadKeys, loaded, tx0);
return map;
}
}), true), new C2<Map<K, V>, Exception, IgniteInternalFuture<Map<K, V>>>() {
@Override
public IgniteInternalFuture<Map<K, V>> apply(Map<K, V> map, Exception e) {
if (e != null) {
clearReservationsIfNeeded(topVer, loadKeys, loaded, tx0);
return new GridFinishedFuture<>(e);
}
if (tx0 == null || (!tx0.implicit() && tx0.isolation() == READ_COMMITTED)) {
Collection<KeyCacheObject> notFound = new HashSet<>(loadKeys.keySet());
notFound.removeAll(loaded);
// Touch entries that were not found in store.
for (KeyCacheObject key : notFound) {
GridCacheEntryEx entry = peekEx(key);
if (entry != null)
ctx.evicts().touch(entry, topVer);
}
}
// There were no misses.
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
}
}, new C2<Map<K1, V1>, Exception, Map<K1, V1>>() {
@Override
public Map<K1, V1> apply(Map<K1, V1> loaded, Exception e) {
if (e == null)
map.putAll(loaded);
return map;
}
});
} else
// Misses can be non-zero only if store is enabled.
assert misses == null;
return new GridFinishedFuture<>(map);
} catch (RuntimeException | AssertionError e) {
if (misses != null) {
for (KeyCacheObject key0 : misses.keySet()) ctx.evicts().touch(peekEx(key0), topVer);
}
if (newLocalEntries != null) {
for (GridCacheEntryEx entry : newLocalEntries) removeEntry(entry);
}
return new GridFinishedFuture<>(e);
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
} else {
return asyncOp(tx, new AsyncOp<Map<K1, V1>>(keys) {
@Override
public IgniteInternalFuture<Map<K1, V1>> op(GridNearTxLocal tx, AffinityTopologyVersion readyTopVer) {
return tx.getAllAsync(ctx, readyTopVer, keys, deserializeBinary, skipVals, false, !readThrough, recovery, needVer);
}
}, ctx.operationContextPerCall(), /*retry*/
false);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class IgniteTxHandler method prepareNearTx.
/**
* @param nearNode Node that initiated transaction.
* @param req Near prepare request.
* @return Prepare future or {@code null} if need retry operation.
*/
@Nullable
private IgniteInternalFuture<GridNearTxPrepareResponse> prepareNearTx(final ClusterNode nearNode, final GridNearTxPrepareRequest req) {
IgniteTxEntry firstEntry;
try {
IgniteTxEntry firstWrite = unmarshal(req.writes());
IgniteTxEntry firstRead = unmarshal(req.reads());
firstEntry = firstWrite != null ? firstWrite : firstRead;
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
assert firstEntry != null : req;
GridDhtTxLocal tx = null;
GridCacheVersion mappedVer = ctx.tm().mappedVersion(req.version());
if (mappedVer != null) {
tx = ctx.tm().tx(mappedVer);
if (tx == null)
U.warn(log, "Missing local transaction for mapped near version [nearVer=" + req.version() + ", mappedVer=" + mappedVer + ']');
else {
if (req.concurrency() == PESSIMISTIC)
tx.nearFutureId(req.futureId());
}
} else {
GridDhtPartitionTopology top = null;
if (req.firstClientRequest()) {
assert req.concurrency() == OPTIMISTIC : req;
assert CU.clientNode(nearNode) : nearNode;
top = firstEntry.context().topology();
top.readLock();
if (req.allowWaitTopologyFuture()) {
GridDhtTopologyFuture topFut = top.topologyVersionFuture();
if (!topFut.isDone()) {
top.readUnlock();
return null;
}
}
}
try {
if (top != null) {
boolean retry = false;
GridDhtTopologyFuture topFut = top.topologyVersionFuture();
if (!req.allowWaitTopologyFuture() && !topFut.isDone()) {
retry = true;
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Topology change is in progress, need remap transaction [" + "txId=" + req.version() + ", node=" + nearNode.id() + ", reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
}
}
if (!retry && needRemap(req.topologyVersion(), top.readyTopologyVersion(), req)) {
retry = true;
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Topology version mismatch for near prepare, need remap transaction [" + "txId=" + req.version() + ", node=" + nearNode.id() + ", reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
}
}
if (retry) {
GridNearTxPrepareResponse res = new GridNearTxPrepareResponse(req.partition(), req.version(), req.futureId(), req.miniId(), req.version(), req.version(), null, null, top.lastTopologyChangeVersion(), req.onePhaseCommit(), req.deployInfo() != null);
try {
ctx.io().send(nearNode, res, req.policy());
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Sent remap response for near prepare [txId=" + req.version() + ", node=" + nearNode.id() + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Failed to send remap response for near prepare, node failed [" + "txId=" + req.version() + ", node=" + nearNode.id() + ']');
}
} catch (IgniteCheckedException e) {
U.error(txPrepareMsgLog, "Failed to send remap response for near prepare " + "[txId=" + req.version() + ", node=" + nearNode.id() + ", req=" + req + ']', e);
}
return new GridFinishedFuture<>(res);
}
assert topFut.isDone();
}
tx = new GridDhtTxLocal(ctx, req.topologyVersion(), nearNode.id(), req.version(), req.futureId(), req.miniId(), req.threadId(), req.implicitSingle(), req.implicitSingle(), req.system(), req.explicitLock(), req.policy(), req.concurrency(), req.isolation(), req.timeout(), req.isInvalidate(), true, req.onePhaseCommit(), req.txSize(), req.transactionNodes(), req.subjectId(), req.taskNameHash());
tx = ctx.tm().onCreated(null, tx);
if (tx != null)
tx.topologyVersion(req.topologyVersion());
else
U.warn(log, "Failed to create local transaction (was transaction rolled back?) [xid=" + req.version() + ", req=" + req + ']');
} finally {
if (tx != null)
req.txState(tx.txState());
if (top != null)
top.readUnlock();
}
}
if (tx != null) {
req.txState(tx.txState());
if (req.explicitLock())
tx.explicitLock(true);
tx.transactionNodes(req.transactionNodes());
if (req.near())
tx.nearOnOriginatingNode(true);
if (req.onePhaseCommit()) {
assert req.last() : req;
tx.onePhaseCommit(true);
}
if (req.needReturnValue())
tx.needReturnValue(true);
IgniteInternalFuture<GridNearTxPrepareResponse> fut = tx.prepareAsync(req);
if (tx.isRollbackOnly() && !tx.commitOnPrepare()) {
if (tx.state() != TransactionState.ROLLED_BACK && tx.state() != TransactionState.ROLLING_BACK)
tx.rollbackDhtLocalAsync();
}
final GridDhtTxLocal tx0 = tx;
fut.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> txFut) {
try {
txFut.get();
} catch (IgniteCheckedException e) {
// Just in case.
tx0.setRollbackOnly();
if (!X.hasCause(e, IgniteTxOptimisticCheckedException.class) && !X.hasCause(e, IgniteFutureCancelledException.class) && !ctx.kernalContext().isStopping())
U.error(log, "Failed to prepare DHT transaction: " + tx0, e);
}
}
});
return fut;
} else
return new GridFinishedFuture<>((GridNearTxPrepareResponse) null);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class GridCommonAbstractTest method awaitPartitionMapExchange.
/**
* @param waitEvicts If {@code true} will wait for evictions finished.
* @param waitNode2PartUpdate If {@code true} will wait for nodes node2part info update finished.
* @param nodes Optional nodes. If {@code null} method will wait for all nodes, for non null collection nodes will
* be filtered
* @param printPartState If {@code true} will print partition state if evictions not happened.
* @throws InterruptedException If interrupted.
*/
@SuppressWarnings("BusyWait")
protected void awaitPartitionMapExchange(boolean waitEvicts, boolean waitNode2PartUpdate, @Nullable Collection<ClusterNode> nodes, boolean printPartState) throws InterruptedException {
long timeout = getPartitionMapExchangeTimeout();
long startTime = -1;
Set<String> names = new HashSet<>();
Ignite crd = null;
for (Ignite g : G.allGrids()) {
ClusterNode node = g.cluster().localNode();
if (crd == null || node.order() < crd.cluster().localNode().order()) {
crd = g;
if (node.order() == 1)
break;
}
}
if (crd == null)
return;
AffinityTopologyVersion waitTopVer = ((IgniteKernal) crd).context().discovery().topologyVersionEx();
if (waitTopVer.topologyVersion() <= 0)
waitTopVer = new AffinityTopologyVersion(1, 0);
for (Ignite g : G.allGrids()) {
if (nodes != null && !nodes.contains(g.cluster().localNode()))
continue;
IgniteKernal g0 = (IgniteKernal) g;
names.add(g0.configuration().getIgniteInstanceName());
if (startTime != -1) {
if (startTime != g0.context().discovery().gridStartTime())
fail("Found nodes from different clusters, probable some test does not stop nodes " + "[allNodes=" + names + ']');
} else
startTime = g0.context().discovery().gridStartTime();
if (g.cluster().localNode().isDaemon())
continue;
IgniteInternalFuture<?> exchFut = g0.context().cache().context().exchange().affinityReadyFuture(waitTopVer);
if (exchFut != null && !exchFut.isDone()) {
try {
exchFut.get(timeout);
} catch (IgniteCheckedException e) {
log.error("Failed to wait for exchange [topVer=" + waitTopVer + ", node=" + g0.name() + ']', e);
}
}
for (IgniteCacheProxy<?, ?> c : g0.context().cache().jcaches()) {
CacheConfiguration cfg = c.context().config();
if (cfg == null)
continue;
if (cfg.getCacheMode() != LOCAL && cfg.getRebalanceMode() != NONE && g.cluster().nodes().size() > 1) {
AffinityFunction aff = cfg.getAffinity();
GridDhtCacheAdapter<?, ?> dht = dht(c);
GridDhtPartitionTopology top = dht.topology();
for (int p = 0; p < aff.partitions(); p++) {
long start = 0;
for (int i = 0; ; i++) {
boolean match = false;
GridCachePartitionExchangeManager<?, ?> exchMgr = dht.context().shared().exchange();
AffinityTopologyVersion readyVer = exchMgr.readyAffinityVersion();
// Otherwise, there may be an assertion when printing top.readyTopologyVersion().
try {
IgniteInternalFuture<?> fut = exchMgr.affinityReadyFuture(readyVer);
if (fut != null)
fut.get();
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
if (readyVer.topologyVersion() > 0 && c.context().started()) {
// Must map on updated version of topology.
Collection<ClusterNode> affNodes = dht.context().affinity().assignment(readyVer).idealAssignment().get(p);
int affNodesCnt = affNodes.size();
GridDhtTopologyFuture topFut = top.topologyVersionFuture();
Collection<ClusterNode> owners = (topFut != null && topFut.isDone()) ? top.owners(p, AffinityTopologyVersion.NONE) : Collections.<ClusterNode>emptyList();
int ownerNodesCnt = owners.size();
GridDhtLocalPartition loc = top.localPartition(p, readyVer, false);
if (affNodesCnt != ownerNodesCnt || !affNodes.containsAll(owners) || (waitEvicts && loc != null && loc.state() != GridDhtPartitionState.OWNING)) {
LT.warn(log(), "Waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", p=" + p + ", affNodesCnt=" + affNodesCnt + ", ownersCnt=" + ownerNodesCnt + ", affNodes=" + F.nodeIds(affNodes) + ", owners=" + F.nodeIds(owners) + ", topFut=" + topFut + ", locNode=" + g.cluster().localNode() + ']');
} else
match = true;
} else {
LT.warn(log(), "Waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", started=" + dht.context().started() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']');
}
if (!match) {
if (i == 0)
start = System.currentTimeMillis();
if (System.currentTimeMillis() - start > timeout) {
U.dumpThreads(log);
if (printPartState)
printPartitionState(c);
throw new IgniteException("Timeout of waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']');
}
// Busy wait.
Thread.sleep(20);
continue;
}
if (i > 0)
log().warning("Finished waiting for topology map update [igniteInstanceName=" + g.name() + ", p=" + p + ", duration=" + (System.currentTimeMillis() - start) + "ms]");
break;
}
}
if (waitNode2PartUpdate) {
long start = System.currentTimeMillis();
boolean failed = true;
while (failed) {
failed = false;
for (GridDhtPartitionMap pMap : top.partitionMap(true).values()) {
if (failed)
break;
for (Map.Entry entry : pMap.entrySet()) {
if (System.currentTimeMillis() - start > timeout) {
U.dumpThreads(log);
throw new IgniteException("Timeout of waiting for partition state update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", locNode=" + g.cluster().localNode() + ']');
}
if (entry.getValue() != GridDhtPartitionState.OWNING) {
LT.warn(log(), "Waiting for correct partition state part=" + entry.getKey() + ", should be OWNING [state=" + entry.getValue() + "], node=" + g.name() + ", cache=" + c.getName());
// Busy wait.
Thread.sleep(200);
failed = true;
break;
}
}
}
}
}
}
}
}
log.info("awaitPartitionMapExchange finished");
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class IgniteDiagnosticMessage method dumpExchangeInfo.
/**
* @param sb String builder.
* @param ctx Context.
*/
static void dumpExchangeInfo(StringBuilder sb, GridKernalContext ctx) {
GridCachePartitionExchangeManager exchMgr = ctx.cache().context().exchange();
GridDhtTopologyFuture fut = exchMgr.lastTopologyFuture();
sb.append("Partitions exchange info [readyVer=").append(exchMgr.readyAffinityVersion()).append(']').append(U.nl()).append("Last initialized exchange future: ").append(fut);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture in project ignite by apache.
the class GridDhtColocatedLockFuture method mapOnTopology.
/**
* Acquires topology future and checks it completeness under the read lock. If it is not complete,
* will asynchronously wait for it's completeness and then try again.
*
* @param remap Remap flag.
* @param c Optional closure to run after map.
*/
private void mapOnTopology(final boolean remap, @Nullable final Runnable c) {
// We must acquire topology snapshot from the topology version future.
cctx.topology().readLock();
final GridDhtTopologyFuture fut;
final boolean finished;
try {
if (cctx.topology().stopping()) {
onDone(cctx.shared().cache().isCacheRestarting(cctx.name()) ? new IgniteCacheRestartingException(cctx.name()) : new CacheStoppedException(cctx.name()));
return;
}
fut = cctx.topologyVersionFuture();
finished = fut.isDone();
if (finished) {
Throwable err = fut.validateCache(cctx, recovery, read, null, keys);
if (err != null) {
onDone(err);
return;
}
AffinityTopologyVersion topVer = fut.topologyVersion();
if (remap) {
if (tx != null)
tx.onRemap(topVer, true);
synchronized (this) {
this.topVer = topVer;
}
} else {
if (tx != null)
tx.topologyVersion(topVer);
synchronized (this) {
if (this.topVer == null)
this.topVer = topVer;
}
}
if (!remap)
cctx.mvcc().addFuture(this);
}
} finally {
cctx.topology().readUnlock();
}
if (finished) {
map(keys, remap, false);
if (c != null)
c.run();
markInitialized();
} else {
cctx.time().waitAsync(fut, tx == null ? 0 : tx.remainingTime(), (e, timedOut) -> {
if (errorOrTimeoutOnTopologyVersion(e, timedOut))
return;
try {
mapOnTopology(remap, c);
} finally {
cctx.shared().txContextReset();
}
});
}
}
Aggregations