use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class CachePartitionStateTest method checkNodePartitions.
/**
* @param assign Assignments.
* @param clusterNode Node.
* @param cacheName Cache name.
* @param expState Expected partitions state.
*/
private void checkNodePartitions(AffinityAssignment assign, ClusterNode clusterNode, String cacheName, GridDhtPartitionState expState) {
Affinity<Object> aff = ignite(0).affinity(cacheName);
Set<Integer> nodeParts = new HashSet<>();
nodeParts.addAll(assign.primaryPartitions(clusterNode.id()));
nodeParts.addAll(assign.backupPartitions(clusterNode.id()));
log.info("Test state [node=" + clusterNode.id() + ", cache=" + cacheName + ", parts=" + nodeParts.size() + ", state=" + expState + ']');
if (grid(0).context().discovery().cacheAffinityNode(clusterNode, cacheName))
assertFalse(nodeParts.isEmpty());
boolean check = false;
for (Ignite node : G.allGrids()) {
GridCacheAdapter cache = ((IgniteKernal) node).context().cache().internalCache(cacheName);
if (cache != null) {
check = true;
GridDhtPartitionTopology top = cache.context().topology();
GridDhtPartitionMap partsMap = top.partitions(clusterNode.id());
for (int p = 0; p < aff.partitions(); p++) {
if (nodeParts.contains(p)) {
assertNotNull(partsMap);
GridDhtPartitionState state = partsMap.get(p);
assertEquals("Unexpected state [checkNode=" + clusterNode.id() + ", node=" + node.name() + ", state=" + state + ']', expState, partsMap.get(p));
} else {
if (partsMap != null) {
GridDhtPartitionState state = partsMap.get(p);
assertTrue("Unexpected state [checkNode=" + clusterNode.id() + ", node=" + node.name() + ", state=" + state + ']', state == null || state == EVICTED);
}
}
}
} else {
assertEquals(0, aff.primaryPartitions(((IgniteKernal) node).localNode()).length);
assertEquals(0, aff.backupPartitions(((IgniteKernal) node).localNode()).length);
}
}
assertTrue(check);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class CacheContinuousQueryFailoverAbstractSelfTest method waitRebalanceFinished.
/**
* @param ignite Ignite.
* @param topVer Major topology version.
* @param minorVer Minor topology version.
* @throws Exception If failed.
*/
private void waitRebalanceFinished(Ignite ignite, long topVer, int minorVer) throws Exception {
final AffinityTopologyVersion topVer0 = new AffinityTopologyVersion(topVer, minorVer);
final GridDhtPartitionTopology top = ((IgniteKernal) ignite).context().cache().context().cacheContext(CU.cacheId(DEFAULT_CACHE_NAME)).topology();
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return top.rebalanceFinished(topVer0);
}
}, 5000);
assertTrue(top.rebalanceFinished(topVer0));
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridDhtAtomicCache method updateAllAsyncInternal0.
/**
* Executes local update after preloader fetched values.
*
* @param node Node.
* @param req Update request.
* @param completionCb Completion callback.
*/
private void updateAllAsyncInternal0(final ClusterNode node, final GridNearAtomicAbstractUpdateRequest req, final UpdateReplyClosure completionCb) {
GridNearAtomicUpdateResponse res = new GridNearAtomicUpdateResponse(ctx.cacheId(), node.id(), req.futureId(), req.partition(), false, ctx.deploymentEnabled());
assert !req.returnValue() || (req.operation() == TRANSFORM || req.size() == 1);
GridDhtAtomicAbstractUpdateFuture dhtFut = null;
IgniteCacheExpiryPolicy expiry = null;
boolean needTaskName = ctx.events().isRecordable(EVT_CACHE_OBJECT_READ) || ctx.events().isRecordable(EVT_CACHE_OBJECT_PUT) || ctx.events().isRecordable(EVT_CACHE_OBJECT_REMOVED);
String taskName = needTaskName ? ctx.kernalContext().task().resolveTaskName(req.taskNameHash()) : null;
ctx.shared().database().checkpointReadLock();
try {
ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
// If batch store update is enabled, we need to lock all entries.
// First, need to acquire locks on cache entries, then check filter.
List<GridDhtCacheEntry> locked = lockEntries(req, req.topologyVersion());
;
Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
DhtAtomicUpdateResult updDhtRes = new DhtAtomicUpdateResult();
try {
while (true) {
try {
GridDhtPartitionTopology top = topology();
top.readLock();
try {
if (top.stopping()) {
if (ctx.shared().cache().isCacheRestarting(name()))
res.addFailedKeys(req.keys(), new IgniteCacheRestartingException(name()));
else
res.addFailedKeys(req.keys(), new CacheStoppedException(name()));
completionCb.apply(req, res);
return;
}
boolean remap = false;
// external transaction or explicit lock.
if (!req.topologyLocked()) {
AffinityTopologyVersion waitVer = top.topologyVersionFuture().initialVersion();
// No need to remap if next future version is compatible.
boolean compatible = waitVer.isBetween(req.lastAffinityChangedTopologyVersion(), req.topologyVersion());
// Can not wait for topology future since it will break
// GridNearAtomicCheckUpdateRequest processing.
remap = !compatible && !top.topologyVersionFuture().isDone() || needRemap(req.topologyVersion(), top.readyTopologyVersion());
}
if (!remap) {
update(node, locked, req, res, updDhtRes, taskName);
dhtFut = updDhtRes.dhtFuture();
deleted = updDhtRes.deleted();
expiry = updDhtRes.expiryPolicy();
} else
// Should remap all keys.
res.remapTopologyVersion(top.lastTopologyChangeVersion());
} finally {
top.readUnlock();
}
// Must be done outside topology read lock to avoid deadlocks.
if (res.returnValue() != null)
res.returnValue().marshalResult(ctx);
break;
} catch (UnregisteredClassException ex) {
IgniteCacheObjectProcessor cacheObjProc = ctx.cacheObjects();
assert cacheObjProc instanceof CacheObjectBinaryProcessorImpl;
((CacheObjectBinaryProcessorImpl) cacheObjProc).binaryContext().registerClass(ex.cls(), true, false);
} catch (UnregisteredBinaryTypeException ex) {
if (ex.future() != null) {
// Wait for the future that couldn't be processed because of
// IgniteThread#isForbiddenToRequestBinaryMetadata flag being true. Usually this means
// that awaiting for the future right there would lead to potential deadlock if
// continuous queries are used in parallel with entry processor.
ex.future().get();
// Retry and don't update current binary metadata, because it most likely already exists.
continue;
}
IgniteCacheObjectProcessor cacheObjProc = ctx.cacheObjects();
assert cacheObjProc instanceof CacheObjectBinaryProcessorImpl;
((CacheObjectBinaryProcessorImpl) cacheObjProc).binaryContext().updateMetadata(ex.typeId(), ex.binaryMetadata(), false);
}
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry should not become obsolete while holding lock.";
e.printStackTrace();
} finally {
if (locked != null)
unlockEntries(locked, req.topologyVersion());
// Enqueue if necessary after locks release.
if (deleted != null) {
assert !deleted.isEmpty();
assert ctx.deferredDelete() : this;
for (IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion> e : deleted) ctx.onDeferredDelete(e.get1(), e.get2());
}
// TODO fire events only after successful fsync
if (ctx.shared().wal() != null)
ctx.shared().wal().flush(null, false);
}
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Caught invalid partition exception for cache entry (will remap update request): " + req);
res.remapTopologyVersion(ctx.topology().lastTopologyChangeVersion());
} catch (Throwable e) {
// At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is
// an attempt to use cleaned resources.
U.error(log, "Unexpected exception during cache update", e);
res.addFailedKeys(req.keys(), e);
completionCb.apply(req, res);
if (e instanceof Error)
throw (Error) e;
return;
} finally {
ctx.shared().database().checkpointReadUnlock();
}
if (res.remapTopologyVersion() != null) {
assert dhtFut == null;
completionCb.apply(req, res);
} else {
if (dhtFut != null)
dhtFut.map(node, res.returnValue(), res, completionCb);
}
if (req.writeSynchronizationMode() != FULL_ASYNC)
req.cleanup(!node.isLocal());
sendTtlUpdateRequest(expiry);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridDhtTxPrepareFuture method map.
/**
* @param entry Transaction entry.
*/
private void map(IgniteTxEntry entry) throws IgniteTxRollbackCheckedException {
if (entry.cached().isLocal())
return;
GridDhtCacheEntry cached = (GridDhtCacheEntry) entry.cached();
GridCacheContext cacheCtx = entry.context();
GridDhtCacheAdapter<?, ?> dht = cacheCtx.isNear() ? cacheCtx.near().dht() : cacheCtx.dht();
ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(entry);
if (expiry != null && (entry.op() == READ || entry.op() == NOOP)) {
entry.op(NOOP);
entry.ttl(CU.toTtl(expiry.getExpiryForAccess()));
}
while (true) {
try {
List<ClusterNode> dhtNodes = dht.topology().nodes(cached.partition(), tx.topologyVersion());
GridDhtPartitionTopology top = cacheCtx.topology();
GridDhtLocalPartition part = top.localPartition(cached.partition());
if (part != null && !part.primary(top.readyTopologyVersion())) {
log.warning("Failed to map a transaction on outdated topology, rolling back " + "[tx=" + CU.txString(tx) + ", readyTopVer=" + top.readyTopologyVersion() + ", lostParts=" + top.lostPartitions() + ", part=" + part.toString() + ']');
throw new IgniteTxRollbackCheckedException("Failed to map a transaction on outdated " + "topology, please try again [timeout=" + tx.timeout() + ", tx=" + CU.txString(tx) + ']');
}
assert !dhtNodes.isEmpty() && dhtNodes.get(0).id().equals(cctx.localNodeId()) : "cacheId=" + cacheCtx.cacheId() + ", localNode = " + cctx.localNodeId() + ", dhtNodes = " + dhtNodes;
if (log.isDebugEnabled())
log.debug("Mapping entry to DHT nodes [nodes=" + U.toShortString(dhtNodes) + ", entry=" + entry + ']');
for (int i = 1; i < dhtNodes.size(); i++) {
ClusterNode node = dhtNodes.get(i);
addMapping(entry, node, dhtMap);
}
Collection<UUID> readers = cached.readers();
if (!F.isEmpty(readers)) {
for (UUID readerId : readers) {
if (readerId.equals(tx.nearNodeId()))
continue;
ClusterNode readerNode = cctx.discovery().node(readerId);
if (readerNode == null || canSkipNearReader(dht, readerNode, dhtNodes))
continue;
if (log.isDebugEnabled())
log.debug("Mapping entry to near node [node=" + readerNode + ", entry=" + entry + ']');
addMapping(entry, readerNode, nearMap);
}
} else if (log.isDebugEnabled())
log.debug("Entry has no near readers: " + entry);
break;
} catch (GridCacheEntryRemovedException ignore) {
cached = dht.entryExx(entry.key(), tx.topologyVersion());
entry.cached(cached);
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method initTxTopologyVersion.
/**
* @param nodeId Near node id.
* @param nearNode Near node.
* @param nearLockVer Near lock version.
* @param nearFutId Near future id.
* @param nearMiniId Near mini-future id.
* @param firstClientReq First client request flag.
* @param topVer Topology version.
* @param nearThreadId Near node thread id.
* @param timeout Timeout.
* @param txTaskNameHash Transaction task name hash.
* @param snapshot Mvcc snapsht.
* @return Transaction.
*/
public GridDhtTxLocal initTxTopologyVersion(UUID nodeId, ClusterNode nearNode, GridCacheVersion nearLockVer, IgniteUuid nearFutId, int nearMiniId, boolean firstClientReq, AffinityTopologyVersion topVer, long nearThreadId, long timeout, int txTaskNameHash, MvccSnapshot snapshot) throws IgniteException, IgniteCheckedException {
assert ctx.affinityNode();
if (txLockMsgLog.isDebugEnabled()) {
txLockMsgLog.debug("Received near enlist request [txId=" + nearLockVer + ", node=" + nodeId + ']');
}
if (nearNode == null) {
U.warn(txLockMsgLog, "Received near enlist request from unknown node (will ignore) [txId=" + nearLockVer + ", node=" + nodeId + ']');
return null;
}
GridDhtTxLocal tx = null;
GridCacheVersion dhtVer = ctx.tm().mappedVersion(nearLockVer);
if (dhtVer != null)
tx = ctx.tm().tx(dhtVer);
GridDhtPartitionTopology top = null;
if (tx == null) {
if (firstClientReq) {
assert nearNode.isClient();
top = topology();
top.readLock();
GridDhtTopologyFuture topFut = top.topologyVersionFuture();
boolean done = topFut.isDone();
if (!done || !(topFut.topologyVersion().compareTo(topVer) >= 0 && ctx.shared().exchange().lastAffinityChangedTopologyVersion(topFut.initialVersion()).compareTo(topVer) <= 0)) {
// TODO IGNITE-7164 Wait for topology change, remap client TX in case affinity was changed.
top.readUnlock();
throw new ClusterTopologyException("Topology was changed. Please retry on stable topology.");
}
}
try {
tx = new GridDhtTxLocal(ctx.shared(), topVer, nearNode.id(), nearLockVer, nearFutId, nearMiniId, nearThreadId, false, false, ctx.systemTx(), false, ctx.ioPolicy(), PESSIMISTIC, REPEATABLE_READ, timeout, false, false, false, -1, null, securitySubjectId(ctx), txTaskNameHash, null, null);
// if (req.syncCommit())
tx.syncMode(FULL_SYNC);
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !tx.init()) {
String msg = "Failed to acquire lock (transaction has been completed): " + nearLockVer;
U.warn(log, msg);
try {
if (tx != null)
tx.rollbackDhtLocal();
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to rollback the transaction: " + tx, ex);
}
throw new IgniteCheckedException(msg);
}
tx.mvccSnapshot(snapshot);
tx.topologyVersion(topVer);
} finally {
if (top != null)
top.readUnlock();
}
}
ctx.tm().txContext(tx);
return tx;
}
Aggregations