use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology in project ignite by apache.
the class GridCachePartitionExchangeManager method refreshPartitions.
/**
* Partition refresh callback.
* For coordinator causes {@link GridDhtPartitionsFullMessage FullMessages} send,
* for non coordinator - {@link GridDhtPartitionsSingleMessage SingleMessages} send
*/
public void refreshPartitions() {
// TODO https://issues.apache.org/jira/browse/IGNITE-6857
if (cctx.snapshot().snapshotOperationInProgress()) {
scheduleResendPartitions();
return;
}
ClusterNode oldest = cctx.discovery().oldestAliveServerNode(AffinityTopologyVersion.NONE);
if (oldest == null) {
if (log.isDebugEnabled())
log.debug("Skip partitions refresh, there are no server nodes [loc=" + cctx.localNodeId() + ']');
return;
}
if (log.isDebugEnabled())
log.debug("Refreshing partitions [oldest=" + oldest.id() + ", loc=" + cctx.localNodeId() + ']');
// If this is the oldest node.
if (oldest.id().equals(cctx.localNodeId())) {
// Check rebalance state & send CacheAffinityChangeMessage if need.
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (!grp.isLocal()) {
GridDhtPartitionTopology top = grp.topology();
if (top != null)
cctx.affinity().checkRebalanceState(top, grp.groupId());
}
}
GridDhtPartitionsExchangeFuture lastFut = lastInitializedFut;
// No need to send to nodes which did not finish their first exchange.
AffinityTopologyVersion rmtTopVer = lastFut != null ? (lastFut.isDone() ? lastFut.topologyVersion() : lastFut.initialVersion()) : AffinityTopologyVersion.NONE;
Collection<ClusterNode> rmts = cctx.discovery().remoteAliveNodesWithCaches(rmtTopVer);
if (log.isDebugEnabled())
log.debug("Refreshing partitions from oldest node: " + cctx.localNodeId());
sendAllPartitions(rmts, rmtTopVer);
} else {
if (log.isDebugEnabled())
log.debug("Refreshing local partitions from non-oldest node: " + cctx.localNodeId());
sendLocalPartitions(oldest, null);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology in project ignite by apache.
the class GridDhtAtomicCache method update.
/**
* @param node Node.
* @param locked Entries.
* @param req Request.
* @param res Response.
* @return Operation result.
* @throws GridCacheEntryRemovedException If got obsolete entry.
*/
private DhtAtomicUpdateResult update(ClusterNode node, List<GridDhtCacheEntry> locked, GridNearAtomicAbstractUpdateRequest req, GridNearAtomicUpdateResponse res) throws GridCacheEntryRemovedException {
GridDhtPartitionTopology top = topology();
String taskName = ctx.kernalContext().task().resolveTaskName(req.taskNameHash());
boolean hasNear = req.nearCache();
// Assign next version for update inside entries lock.
GridCacheVersion ver = ctx.versions().next(top.readyTopologyVersion());
if (hasNear)
res.nearVersion(ver);
if (msgLog.isDebugEnabled()) {
msgLog.debug("Assigned update version [futId=" + req.futureId() + ", writeVer=" + ver + ']');
}
assert ver != null : "Got null version for update request: " + req;
boolean sndPrevVal = !top.rebalanceFinished(req.topologyVersion());
GridDhtAtomicAbstractUpdateFuture dhtFut = createDhtFuture(ver, req);
IgniteCacheExpiryPolicy expiry = expiryPolicy(req.expiry());
GridCacheReturn retVal = null;
DhtAtomicUpdateResult updRes;
if (// Several keys ...
req.size() > 1 && writeThrough() && // and store is enabled ...
!req.skipStore() && // and this is not local store ...
!ctx.store().isLocal() && // (conflict resolver should be used for local store)
!// and no DR.
ctx.dr().receiveEnabled()) {
// This method can only be used when there are no replicated entries in the batch.
updRes = updateWithBatch(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
dhtFut = updRes.dhtFuture();
if (req.operation() == TRANSFORM)
retVal = updRes.returnValue();
} else {
updRes = updateSingle(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
retVal = updRes.returnValue();
dhtFut = updRes.dhtFuture();
}
if (retVal == null)
retVal = new GridCacheReturn(ctx, node.isLocal(), true, null, true);
res.returnValue(retVal);
if (dhtFut != null) {
if (req.writeSynchronizationMode() == PRIMARY_SYNC && // To avoid deadlock disable back-pressure for sender data node.
!ctx.discovery().cacheGroupAffinityNode(node, ctx.groupId()) && !dhtFut.isDone()) {
final IgniteRunnable tracker = GridNioBackPressureControl.threadTracker();
if (tracker != null && tracker instanceof GridNioMessageTracker) {
((GridNioMessageTracker) tracker).onMessageReceived();
dhtFut.listen(new IgniteInClosure<IgniteInternalFuture<Void>>() {
@Override
public void apply(IgniteInternalFuture<Void> fut) {
((GridNioMessageTracker) tracker).onMessageProcessed();
}
});
}
}
ctx.mvcc().addAtomicFuture(dhtFut.id(), dhtFut);
}
updRes.expiryPolicy(expiry);
return updRes;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology in project ignite by apache.
the class GridCommonAbstractTest method printPartitionState.
/**
* @param cacheName Cache name.
* @param firstParts Count partition for print (will be print first count partition).
*
* Print partitionState for cache.
*/
protected void printPartitionState(String cacheName, int firstParts) {
StringBuilder sb = new StringBuilder();
sb.append("----preload sync futures----\n");
for (Ignite ig : G.allGrids()) {
IgniteKernal k = ((IgniteKernal) ig);
IgniteInternalFuture<?> syncFut = k.internalCache(cacheName).preloader().syncFuture();
sb.append("nodeId=").append(k.context().localNodeId()).append(" isDone=").append(syncFut.isDone()).append("\n");
}
sb.append("----rebalance futures----\n");
for (Ignite ig : G.allGrids()) {
IgniteKernal k = ((IgniteKernal) ig);
IgniteInternalFuture<?> f = k.internalCache(cacheName).preloader().rebalanceFuture();
try {
sb.append("nodeId=").append(k.context().localNodeId()).append(" isDone=").append(f.isDone()).append(" res=").append(f.isDone() ? f.get() : "N/A").append(" topVer=").append((U.hasField(f, "topVer") ? String.valueOf(U.field(f, "topVer")) : "[unknown] may be it is finished future")).append("\n");
Map<UUID, T2<Long, Collection<Integer>>> remaining = U.field(f, "remaining");
sb.append("remaining:");
if (remaining.isEmpty())
sb.append("empty\n");
else
for (Map.Entry<UUID, T2<Long, Collection<Integer>>> e : remaining.entrySet()) sb.append("\nuuid=").append(e.getKey()).append(" startTime=").append(e.getValue().getKey()).append(" parts=").append(Arrays.toString(e.getValue().getValue().toArray())).append("\n");
} catch (Throwable e) {
log.error(e.getMessage());
}
}
sb.append("----partition state----\n");
for (Ignite g : G.allGrids()) {
IgniteKernal g0 = (IgniteKernal) g;
sb.append("localNodeId=").append(g0.localNode().id()).append(" grid=").append(g0.name()).append("\n");
IgniteCacheProxy<?, ?> cache = g0.context().cache().jcache(cacheName);
GridDhtCacheAdapter<?, ?> dht = dht(cache);
GridDhtPartitionTopology top = dht.topology();
int parts = firstParts == 0 ? cache.context().config().getAffinity().partitions() : firstParts;
for (int p = 0; p < parts; p++) {
AffinityTopologyVersion readyVer = dht.context().shared().exchange().readyAffinityVersion();
Collection<UUID> affNodes = F.nodeIds(dht.context().affinity().assignment(readyVer).idealAssignment().get(p));
GridDhtLocalPartition part = top.localPartition(p, AffinityTopologyVersion.NONE, false);
sb.append("local part=");
if (part != null)
sb.append(p).append(" state=").append(part.state());
else
sb.append(p).append(" is null");
sb.append(" isAffNode=").append(affNodes.contains(g0.localNode().id())).append("\n");
for (UUID nodeId : F.nodeIds(g0.context().discovery().allNodes())) {
if (!nodeId.equals(g0.localNode().id()))
sb.append(" nodeId=").append(nodeId).append(" part=").append(p).append(" state=").append(top.partitionState(nodeId, p)).append(" isAffNode=").append(affNodes.contains(nodeId)).append("\n");
}
}
sb.append("\n");
}
log.info("dump partitions state for <" + cacheName + ">:\n" + sb.toString());
}
Aggregations