use of org.apache.ignite.cache.PartitionLossPolicy.READ_ONLY_SAFE in project ignite by apache.
the class CacheBaselineTopologyTest method testBaselineTopologyChanges.
/**
* @throws Exception If failed.
*/
private void testBaselineTopologyChanges(boolean fromClient) throws Exception {
startGrids(NODE_COUNT);
IgniteEx ignite;
if (fromClient)
ignite = startClientGrid(NODE_COUNT + 10);
else
ignite = grid(0);
ignite.cluster().baselineAutoAdjustEnabled(false);
ignite.cluster().active(true);
awaitPartitionMapExchange();
Map<ClusterNode, Ignite> nodes = new HashMap<>();
for (int i = 0; i < NODE_COUNT; i++) {
Ignite ig = grid(i);
nodes.put(ig.cluster().localNode(), ig);
}
ignite.createCache(new CacheConfiguration<Integer, Integer>().setName(CACHE_NAME).setCacheMode(PARTITIONED).setBackups(1).setPartitionLossPolicy(READ_ONLY_SAFE));
manualCacheRebalancing(ignite, CACHE_NAME);
int key = -1;
for (int k = 0; k < 100_000; k++) {
if (!ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(k).contains(ignite.localNode())) {
key = k;
break;
}
}
assert key >= 0;
Collection<ClusterNode> initialMapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert initialMapping.size() == 2 : initialMapping;
ignite.cluster().setBaselineTopology(baselineNodes(nodes.keySet()));
Set<String> stoppedNodeNames = new HashSet<>();
ClusterNode node = initialMapping.iterator().next();
stoppedNodeNames.add(nodes.get(node).name());
nodes.get(node).close();
nodes.remove(node);
awaitPartitionMapExchange();
Collection<ClusterNode> mapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert mapping.size() == 1 : mapping;
assert initialMapping.containsAll(mapping);
Set<ClusterNode> blt2 = new HashSet<>(ignite.cluster().nodes());
ignite.cluster().setBaselineTopology(baselineNodes(blt2.stream().filter(n -> !n.isClient()).collect(Collectors.toSet())));
awaitPartitionMapExchange();
Collection<ClusterNode> initialMapping2 = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert initialMapping2.size() == 2 : initialMapping2;
Ignite newIgnite = startGrid(NODE_COUNT);
awaitPartitionMapExchange();
mapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert mapping.size() == initialMapping2.size() : mapping;
assert mapping.containsAll(initialMapping2);
assert ignite.affinity(CACHE_NAME).primaryPartitions(newIgnite.cluster().localNode()).length == 0;
Set<ClusterNode> blt3 = new HashSet<>(ignite.cluster().nodes());
ignite.cluster().setBaselineTopology(baselineNodes(blt3.stream().filter(n -> !n.isClient()).collect(Collectors.toSet())));
awaitPartitionMapExchange();
Collection<ClusterNode> initialMapping3 = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert initialMapping3.size() == 2;
assert ignite.affinity(CACHE_NAME).primaryPartitions(newIgnite.cluster().localNode()).length > 0;
newIgnite = startGrid(NODE_COUNT + 1);
awaitPartitionMapExchange();
mapping = ignite.affinity(CACHE_NAME).mapKeyToPrimaryAndBackups(key);
assert mapping.size() == initialMapping3.size() : mapping;
assert mapping.containsAll(initialMapping3);
assert ignite.affinity(CACHE_NAME).primaryPartitions(newIgnite.cluster().localNode()).length == 0;
ignite.cluster().setBaselineTopology(null);
awaitPartitionMapExchange();
assert ignite.affinity(CACHE_NAME).primaryPartitions(newIgnite.cluster().localNode()).length > 0;
}
Aggregations