use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method assignPartitionsStates.
/**
* @param cacheGroupsToResetOwners Set of cache groups which need to reset partitions state,
* null if reset partitions state for all cache groups needed
*/
private void assignPartitionsStates(Set<Integer> cacheGroupsToResetOwners) {
Map<String, List<SupplyPartitionInfo>> supplyInfoMap = log.isInfoEnabled() ? new ConcurrentHashMap<>() : null;
try {
U.doInParallel(cctx.kernalContext().pools().getSystemExecutorService(), nonLocalCacheGroupDescriptors(), grpDesc -> {
CacheGroupContext grpCtx = cctx.cache().cacheGroup(grpDesc.groupId());
GridDhtPartitionTopology top = grpCtx != null ? grpCtx.topology() : cctx.exchange().clientTopology(grpDesc.groupId(), events().discoveryCache());
if (CU.isPersistentCache(grpDesc.config(), cctx.gridConfig().getDataStorageConfiguration())) {
List<SupplyPartitionInfo> list;
if (cacheGroupsToResetOwners == null || cacheGroupsToResetOwners.contains(grpDesc.groupId()))
list = assignPartitionStates(top, true);
else
list = assignPartitionStates(top, false);
if (supplyInfoMap != null && !F.isEmpty(list))
supplyInfoMap.put(grpDesc.cacheOrGroupName(), list);
} else if (cacheGroupsToResetOwners == null)
assignPartitionSizes(top);
return null;
});
} catch (IgniteCheckedException e) {
throw new IgniteException("Failed to assign partition states", e);
}
if (!F.isEmpty(supplyInfoMap))
printPartitionRebalancingFully(supplyInfoMap);
timeBag.finishGlobalStage("Assign partitions states");
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method processSingleMessageOnCrdFinish.
/**
* @param msg Single message to process.
* @param messageAccumulator Message to store message which need to be sent after.
*/
private void processSingleMessageOnCrdFinish(GridDhtPartitionsSingleMessage msg, Map<Integer, CacheGroupAffinityMessage> messageAccumulator) {
for (Map.Entry<Integer, GridDhtPartitionMap> e : msg.partitions().entrySet()) {
Integer grpId = e.getKey();
CacheGroupContext grp = cctx.cache().cacheGroup(grpId);
GridDhtPartitionTopology top = grp != null ? grp.topology() : cctx.exchange().clientTopology(grpId, events().discoveryCache());
CachePartitionPartialCountersMap cntrs = msg.partitionUpdateCounters(grpId, top.partitions());
if (cntrs != null)
top.collectUpdateCounters(cntrs);
}
Collection<Integer> affReq = msg.cacheGroupsAffinityRequest();
if (affReq != null)
CacheGroupAffinityMessage.createAffinityMessages(cctx, exchCtx.events().topologyVersion(), affReq, messageAccumulator);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridDhtPreloader method generateAssignments.
/**
* {@inheritDoc}
*/
@Override
public GridDhtPreloaderAssignments generateAssignments(GridDhtPartitionExchangeId exchId, GridDhtPartitionsExchangeFuture exchFut) {
assert exchFut == null || exchFut.isDone();
// No assignments for disabled preloader.
GridDhtPartitionTopology top = grp.topology();
if (!grp.rebalanceEnabled())
return new GridDhtPreloaderAssignments(exchId, top.readyTopologyVersion(), false);
int partitions = grp.affinity().partitions();
AffinityTopologyVersion topVer = top.readyTopologyVersion();
assert exchFut == null || exchFut.context().events().topologyVersion().equals(top.readyTopologyVersion()) || exchFut.context().events().topologyVersion().equals(ctx.exchange().lastAffinityChangedTopologyVersion(top.readyTopologyVersion())) : "Topology version mismatch [exchId=" + exchId + ", grp=" + grp.name() + ", topVer=" + top.readyTopologyVersion() + ']';
GridDhtPreloaderAssignments assignments = new GridDhtPreloaderAssignments(exchId, topVer, exchFut != null && exchFut.affinityReassign());
AffinityAssignment aff = grp.affinity().cachedAffinity(topVer);
CachePartitionFullCountersMap countersMap = grp.topology().fullUpdateCounters();
for (int p = 0; p < partitions; p++) {
if (ctx.exchange().hasPendingServerExchange()) {
if (log.isDebugEnabled())
log.debug("Skipping assignments creation, exchange worker has pending assignments: " + exchId);
assignments.cancelled(true);
return assignments;
}
// If partition belongs to local node.
if (aff.get(p).contains(ctx.localNode())) {
GridDhtLocalPartition part = top.localPartition(p);
assert part != null;
assert part.id() == p;
// Do not rebalance OWNING or LOST partitions.
if (part.state() == OWNING || part.state() == LOST)
continue;
// State should be switched to MOVING during PME.
if (part.state() != MOVING) {
throw new AssertionError("Partition has invalid state for rebalance " + aff.topologyVersion() + " " + part);
}
ClusterNode histSupplier = null;
if (grp.persistenceEnabled() && exchFut != null) {
List<UUID> nodeIds = exchFut.partitionHistorySupplier(grp.groupId(), p, part.initialUpdateCounter());
if (!F.isEmpty(nodeIds))
histSupplier = ctx.discovery().node(nodeIds.get(p % nodeIds.size()));
}
if (histSupplier != null && !exchFut.isClearingPartition(grp, p)) {
assert grp.persistenceEnabled();
assert remoteOwners(p, topVer).contains(histSupplier) : remoteOwners(p, topVer);
GridDhtPartitionDemandMessage msg = assignments.get(histSupplier);
if (msg == null) {
assignments.put(histSupplier, msg = new GridDhtPartitionDemandMessage(top.updateSequence(), assignments.topologyVersion(), grp.groupId()));
}
// TODO FIXME https://issues.apache.org/jira/browse/IGNITE-11790
msg.partitions().addHistorical(p, part.initialUpdateCounter(), countersMap.updateCounter(p), partitions);
} else {
int partId = p;
List<ClusterNode> picked = remoteOwners(p, topVer, (node, owners) -> {
if (owners.size() == 1)
return true;
return exchFut == null || exchFut.isNodeApplicableForFullRebalance(node.id(), grp.groupId(), partId);
});
if (!picked.isEmpty()) {
ClusterNode n = picked.get(p % picked.size());
GridDhtPartitionDemandMessage msg = assignments.get(n);
if (msg == null) {
assignments.put(n, msg = new GridDhtPartitionDemandMessage(top.updateSequence(), assignments.topologyVersion(), grp.groupId()));
}
msg.partitions().addFull(p);
}
}
}
}
if (!assignments.isEmpty()) {
if (exchFut != null && exchFut.rebalanced()) {
GridDhtPartitionDemandMessage first = assignments.values().iterator().next();
GridDhtLocalPartition locPart = grp.topology().localPartition(first.partitions().all().iterator().next());
SB buf = new SB(1024);
buf.a("Unexpected rebalance on rebalanced cluster: assignments=");
buf.a(assignments);
buf.a(", locPart=");
if (locPart != null)
locPart.dumpDebugInfo(buf);
else
buf.a("NA");
throw new AssertionError(buf.toString());
}
ctx.database().lastCheckpointInapplicableForWalRebalance(grp.groupId());
}
return assignments;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class CacheDeferredDeleteQueueTest method testQueue.
/**
* @param atomicityMode Cache atomicity mode.
* @param nearCache {@code True} if need create near cache.
*
* @throws Exception If failed.
*/
private void testQueue(CacheAtomicityMode atomicityMode, boolean nearCache) throws Exception {
CacheConfiguration<Integer, Integer> ccfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME);
ccfg.setCacheMode(PARTITIONED);
ccfg.setAtomicityMode(atomicityMode);
ccfg.setWriteSynchronizationMode(FULL_SYNC);
ccfg.setBackups(1);
if (nearCache)
ccfg.setNearConfiguration(new NearCacheConfiguration<Integer, Integer>());
IgniteCache<Integer, Integer> cache = ignite(0).createCache(ccfg);
try {
final int KEYS = cache.getConfiguration(CacheConfiguration.class).getAffinity().partitions() * 3;
for (int i = 0; i < KEYS; i++) cache.put(i, i);
for (int i = 0; i < KEYS; i++) cache.remove(i);
boolean wait = GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
for (int i = 0; i < NODES; i++) {
final GridDhtPartitionTopology top = ((IgniteKernal) ignite(i)).context().cache().cache(DEFAULT_CACHE_NAME).context().topology();
for (GridDhtLocalPartition p : top.currentLocalPartitions()) {
Collection<Object> rmvQueue = GridTestUtils.getFieldValue(p, "rmvQueue");
if (!rmvQueue.isEmpty() || p.dataStore().fullSize() != 0)
return false;
}
}
return true;
}
}, 5000);
assertTrue("Failed to wait for rmvQueue cleanup.", wait);
} finally {
ignite(0).destroyCache(ccfg.getName());
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class IgnitePdsPartitionFilesDestroyTest method checkPartitionFiles.
/**
* If {@code exists} is {@code true}, checks that all partition files exist if partition has state EVICTED.
*
* If {@code exists} is {@code false}, checks that all partition files don't exist if partition is absent or has
* state EVICTED.
*
* @param ignite Node.
* @param exists If {@code true} method will check that partition file exists, in other case will check that file
* doesn't exist.
* @throws IgniteCheckedException If failed.
*/
private void checkPartitionFiles(IgniteEx ignite, boolean exists) throws IgniteCheckedException {
int evicted = 0;
GridDhtPartitionTopology top = ignite.cachex(DEFAULT_CACHE_NAME).context().topology();
for (int p = 0; p < PARTS_CNT; p++) {
GridDhtLocalPartition part = top.localPartition(p);
File partFile = partitionFile(ignite, DEFAULT_CACHE_NAME, p);
if (exists) {
if (part != null && part.state() == GridDhtPartitionState.EVICTED)
Assert.assertTrue("Partition file has deleted ahead of time: " + partFile, partFile.exists());
evicted++;
} else {
if (part == null || part.state() == GridDhtPartitionState.EVICTED)
Assert.assertTrue("Partition file has not deleted: " + partFile, !partFile.exists());
}
}
if (exists)
Assert.assertTrue("There should be at least 1 eviction", evicted > 0);
}
Aggregations