use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridCacheRebalancingSyncSelfTest method checkPartitionMapExchangeFinished.
/**
*/
public static void checkPartitionMapExchangeFinished() {
for (Ignite g : G.allGrids()) {
IgniteKernal g0 = (IgniteKernal) g;
for (IgniteCacheProxy<?, ?> c : g0.context().cache().jcaches()) {
CacheConfiguration cfg = c.context().config();
if (cfg.getCacheMode() != LOCAL && cfg.getRebalanceMode() != NONE) {
GridDhtCacheAdapter<?, ?> dht = dht(c);
GridDhtPartitionTopology top = dht.topology();
List<GridDhtLocalPartition> locs = top.localPartitions();
for (GridDhtLocalPartition loc : locs) {
GridDhtPartitionState actl = loc.state();
boolean res = GridDhtPartitionState.OWNING.equals(actl);
if (!res)
printPartitionState(c);
assertTrue("Wrong local partition state part=" + loc.id() + ", should be OWNING [state=" + actl + "], node=" + g0.name() + " cache=" + c.getName(), res);
Collection<ClusterNode> affNodes = g0.affinity(cfg.getName()).mapPartitionToPrimaryAndBackups(loc.id());
assertTrue(affNodes.contains(g0.localNode()));
}
for (Ignite remote : G.allGrids()) {
IgniteKernal remote0 = (IgniteKernal) remote;
IgniteCacheProxy<?, ?> remoteC = remote0.context().cache().jcache(cfg.getName());
GridDhtCacheAdapter<?, ?> remoteDht = dht(remoteC);
GridDhtPartitionTopology remoteTop = remoteDht.topology();
GridDhtPartitionMap pMap = remoteTop.partitionMap(true).get(((IgniteKernal) g).localNodeId());
assertEquals(pMap.size(), locs.size());
for (Map.Entry entry : pMap.entrySet()) {
assertTrue("Wrong remote partition state part=" + entry.getKey() + ", should be OWNING [state=" + entry.getValue() + "], node=" + remote.name() + " cache=" + c.getName(), entry.getValue() == GridDhtPartitionState.OWNING);
}
for (GridDhtLocalPartition loc : locs) assertTrue(pMap.containsKey(loc.id()));
}
}
}
}
log.info("checkPartitionMapExchangeFinished finished");
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class IgniteClientCacheStartFailoverTest method testRebalanceState.
/**
* @throws Exception If failed.
*/
@Test
public void testRebalanceState() throws Exception {
final int SRVS = 3;
startGrids(SRVS);
List<String> cacheNames = startCaches(ignite(0), 100);
Ignite c = startClientGrid(SRVS);
assertTrue(c.configuration().isClientMode());
awaitPartitionMapExchange();
TestRecordingCommunicationSpi.spi(ignite(0)).blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode clusterNode, Message msg) {
return msg instanceof GridDhtPartitionsFullMessage && ((GridDhtPartitionsFullMessage) msg).exchangeId() == null;
}
});
startGrid(SRVS + 1);
for (String cacheName : cacheNames) c.cache(cacheName);
// Will switch to ideal topology but some partitions are not evicted yet.
for (int i = 0; i < SRVS + 1; i++) {
AffinityTopologyVersion topVer = new AffinityTopologyVersion(SRVS + 2, 1);
IgniteKernal node = (IgniteKernal) ignite(i);
for (String cacheName : cacheNames) {
GridDhtPartitionTopology top = node.cachex(cacheName).context().topology();
waitForReadyTopology(top, topVer);
assertEquals(topVer, top.readyTopologyVersion());
}
}
TestRecordingCommunicationSpi.spi(ignite(0)).stopBlock();
// Trigger eviction.
awaitPartitionMapExchange();
for (int i = 0; i < SRVS + 1; i++) {
final AffinityTopologyVersion topVer = new AffinityTopologyVersion(SRVS + 2, 1);
final IgniteKernal node = (IgniteKernal) ignite(i);
for (String cacheName : cacheNames) {
final GridDhtPartitionTopology top = node.cachex(cacheName).context().topology();
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return top.rebalanceFinished(topVer);
}
}, 5000);
assertTrue(top.rebalanceFinished(topVer));
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class CacheRentingStateRepairTest method testRentingStateRepairAfterRestart.
/**
* Tests partition is properly evicted when node is restarted in the middle of the eviction.
*/
@Test
public void testRentingStateRepairAfterRestart() throws Exception {
try {
IgniteEx g0 = startGrid(0);
g0.cluster().baselineAutoAdjustEnabled(false);
startGrid(1);
g0.cluster().active(true);
awaitPartitionMapExchange();
List<Integer> parts = evictingPartitionsAfterJoin(g0, g0.cache(DEFAULT_CACHE_NAME), 20);
int delayEvictPart = parts.get(0);
int k = 0;
while (g0.affinity(DEFAULT_CACHE_NAME).partition(k) != delayEvictPart) k++;
g0.cache(DEFAULT_CACHE_NAME).put(k, k);
GridDhtPartitionTopology top = dht(g0.cache(DEFAULT_CACHE_NAME)).topology();
GridDhtLocalPartition part = top.localPartition(delayEvictPart);
assertNotNull(part);
// Prevent eviction.
part.reserve();
startGrid(2);
g0.cluster().setBaselineTopology(3);
// Wait until all is evicted except first partition.
assertTrue("Failed to wait for partition eviction: reservedPart=" + part.id() + ", otherParts=" + top.localPartitions().stream().map(p -> "[id=" + p.id() + ", state=" + p.state() + ']').collect(Collectors.toList()), waitForCondition(() -> {
for (int i = 0; i < parts.size(); i++) {
if (delayEvictPart == i)
// Skip reserved partition.
continue;
Integer p = parts.get(i);
@Nullable GridDhtLocalPartition locPart = top.localPartition(p);
assertNotNull(locPart);
if (locPart.state() != GridDhtPartitionState.EVICTED)
return false;
}
return true;
}, 5000));
/**
* Force renting state before node stop.
* This also could be achieved by stopping node just after RENTING state is set.
*/
part.setState(GridDhtPartitionState.RENTING);
assertEquals(GridDhtPartitionState.RENTING, part.state());
stopGrid(0);
g0 = startGrid(0);
awaitPartitionMapExchange();
part = dht(g0.cache(DEFAULT_CACHE_NAME)).topology().localPartition(delayEvictPart);
assertNotNull(part);
final GridDhtLocalPartition finalPart = part;
CountDownLatch evictLatch = new CountDownLatch(1);
part.rent().listen(new IgniteInClosure<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> fut) {
assertEquals(GridDhtPartitionState.EVICTED, finalPart.state());
evictLatch.countDown();
}
});
assertTrue("Failed to wait for partition eviction after restart", evictLatch.await(5_000, TimeUnit.MILLISECONDS));
awaitPartitionMapExchange(true, true, null);
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class CachePartitionLossWithRestartsTest method waitForDetection.
/**
* @param node Node.
* @param topVer Topology version.
*/
private GridDhtPartitionTopology waitForDetection(IgniteEx node, AffinityTopologyVersion topVer) throws Exception {
GridCacheSharedContext<Object, Object> cctx = node.context().cache().context();
CacheGroupDescriptor desc = cctx.affinity().cacheGroups().get(CU.cacheId(DEFAULT_CACHE_NAME));
CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId());
GridDhtPartitionTopology top = grp != null ? grp.topology() : cctx.exchange().clientTopology(desc.groupId(), null);
cctx.exchange().affinityReadyFuture(topVer).get();
cctx.exchange().lastTopologyFuture().get();
return top;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class CacheBaselineTopologyTest method testTopologyChangesWithFixedBaseline.
/**
* @throws Exception If failed.
*/
@Test
public void testTopologyChangesWithFixedBaseline() throws Exception {
startGrids(NODE_COUNT);
IgniteEx ignite = grid(0);
ignite.cluster().baselineAutoAdjustEnabled(false);
ignite.cluster().active(true);
awaitPartitionMapExchange();
Map<ClusterNode, Ignite> nodes = new HashMap<>();
for (int i = 0; i < NODE_COUNT; i++) {
Ignite ig = grid(i);
nodes.put(ig.cluster().localNode(), ig);
}
IgniteCache<Integer, Integer> cache = ignite.createCache(new CacheConfiguration<Integer, Integer>().setName(CACHE_NAME).setCacheMode(PARTITIONED).setBackups(1).setPartitionLossPolicy(READ_ONLY_SAFE));
int key = -1;
for (int k = 0; k < 100_000; k++) {
if (!ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(k).contains(ignite.localNode())) {
key = k;
break;
}
}
assert key >= 0;
int part = ignite.affinity(CACHE_NAME).partition(key);
Collection<ClusterNode> initialMapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert initialMapping.size() == 2 : initialMapping;
ignite.cluster().setBaselineTopology(baselineNodes(nodes.keySet()));
awaitPartitionMapExchange();
cache.put(key, 1);
Collection<ClusterNode> mapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert initialMapping.size() == mapping.size() : mapping;
assert initialMapping.containsAll(mapping) : mapping;
IgniteEx newIgnite = startGrid(4);
awaitPartitionMapExchange();
mapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert initialMapping.size() == mapping.size() : mapping;
assert initialMapping.containsAll(mapping) : mapping;
mapping = newIgnite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert initialMapping.size() == mapping.size() : mapping;
assert initialMapping.containsAll(mapping) : mapping;
Set<String> stoppedNodeNames = new HashSet<>();
ClusterNode node = mapping.iterator().next();
stoppedNodeNames.add(nodes.get(node).name());
nodes.get(node).close();
nodes.remove(node);
awaitPartitionMapExchange(true, true, null);
mapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert mapping.size() == 1 : mapping;
assert initialMapping.containsAll(mapping);
node = mapping.iterator().next();
stoppedNodeNames.add(nodes.get(node).name());
nodes.get(node).close();
nodes.remove(node);
awaitPartitionMapExchange();
mapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert mapping.isEmpty() : mapping;
GridDhtPartitionTopology topology = ignite.cachex(CACHE_NAME).context().topology();
assert topology.lostPartitions().contains(part);
for (String nodeName : stoppedNodeNames) {
startGrid(nodeName);
}
assert ignite.cluster().nodes().size() == NODE_COUNT + 1;
mapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert initialMapping.size() == mapping.size() : mapping;
for (ClusterNode n1 : initialMapping) {
boolean found = false;
for (ClusterNode n2 : mapping) {
if (n2.consistentId().equals(n1.consistentId())) {
found = true;
break;
}
}
assert found;
}
ignite.resetLostPartitions(Collections.singleton(CACHE_NAME));
cache.put(key, 2);
}
Aggregations