use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class CacheContinuousQueryFailoverAbstractSelfTest method testRebalanceVersion.
/**
* @throws Exception If failed.
*/
@Test
public void testRebalanceVersion() throws Exception {
IgniteEx ignite0 = startGrid(0);
int minorVer = ignite0.configuration().isLateAffinityAssignment() ? 1 : 0;
boolean replicated = ignite0.context().cache().context().cacheContext(CU.cacheId(DEFAULT_CACHE_NAME)).isReplicated();
GridDhtPartitionTopology top0 = ignite0.context().cache().context().cacheContext(CU.cacheId(DEFAULT_CACHE_NAME)).topology();
assertTrue(top0.rebalanceFinished(new AffinityTopologyVersion(1)));
assertFalse(top0.rebalanceFinished(new AffinityTopologyVersion(2)));
Ignite ignite1 = startGrid(1);
GridDhtPartitionTopology top1 = ((IgniteKernal) ignite1).context().cache().context().cacheContext(CU.cacheId(DEFAULT_CACHE_NAME)).topology();
waitRebalanceFinished(ignite0, 2, minorVer);
waitRebalanceFinished(ignite1, 2, minorVer);
assertFalse(top0.rebalanceFinished(new AffinityTopologyVersion(3)));
assertFalse(top1.rebalanceFinished(new AffinityTopologyVersion(3)));
Ignite ignite2 = startGrid(2);
GridDhtPartitionTopology top2 = ((IgniteKernal) ignite2).context().cache().context().cacheContext(CU.cacheId(DEFAULT_CACHE_NAME)).topology();
waitRebalanceFinished(ignite0, 3, minorVer);
waitRebalanceFinished(ignite1, 3, minorVer);
waitRebalanceFinished(ignite2, 3, minorVer);
assertFalse(top0.rebalanceFinished(new AffinityTopologyVersion(4)));
assertFalse(top1.rebalanceFinished(new AffinityTopologyVersion(4)));
assertFalse(top2.rebalanceFinished(new AffinityTopologyVersion(4)));
Ignite ignite3 = startClientGrid(3);
GridDhtPartitionTopology top3 = ((IgniteKernal) ignite3).context().cache().context().cacheContext(CU.cacheId(DEFAULT_CACHE_NAME)).topology();
assertTrue(top0.rebalanceFinished(new AffinityTopologyVersion(4)));
assertTrue(top1.rebalanceFinished(new AffinityTopologyVersion(4)));
assertTrue(top2.rebalanceFinished(new AffinityTopologyVersion(4)));
assertTrue(top3.rebalanceFinished(new AffinityTopologyVersion(4)));
stopGrid(1);
waitRebalanceFinished(ignite0, 5, replicated ? 0 : minorVer);
waitRebalanceFinished(ignite2, 5, replicated ? 0 : minorVer);
waitRebalanceFinished(ignite3, 5, replicated ? 0 : minorVer);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridCacheDhtPreloadDelayedSelfTest method testAutomaticPreloadWithEmptyCache.
/**
* @throws Exception If failed.
*/
@Test
public void testAutomaticPreloadWithEmptyCache() throws Exception {
preloadMode = SYNC;
delay = 0;
Collection<Ignite> ignites = new ArrayList<>();
try {
for (int i = 0; i < 5; i++) {
ignites.add(startGrid(i));
if (persistenceEnabled()) {
if (i == 0)
grid(0).cluster().active(true);
else
resetBaselineTopology();
}
awaitPartitionMapExchange(true, true, null, false);
for (Ignite g : ignites) {
info(">>> Checking affinity for grid: " + g.name());
GridDhtPartitionTopology top = topology(g);
GridDhtPartitionFullMap fullMap = top.partitionMap(true);
for (Map.Entry<UUID, GridDhtPartitionMap> fe : fullMap.entrySet()) {
UUID nodeId = fe.getKey();
GridDhtPartitionMap m = fe.getValue();
for (Map.Entry<Integer, GridDhtPartitionState> e : m.entrySet()) {
int p = e.getKey();
GridDhtPartitionState state = e.getValue();
Collection<ClusterNode> nodes = affinityNodes(g, p);
Collection<UUID> nodeIds = U.nodeIds(nodes);
assert nodeIds.contains(nodeId) : "Invalid affinity mapping [nodeId=" + nodeId + ", part=" + p + ", state=" + state + ", igniteInstanceName=" + G.ignite(nodeId).name() + ", affNames=" + U.nodes2names(nodes) + ", affIds=" + nodeIds + ']';
}
}
}
}
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridCacheDhtPreloadStartStopSelfTest method checkNodes.
/**
* @param keyCnt Key count.
* @param nodeCnt Node count.
* @throws Exception If failed.
*/
private void checkNodes(int keyCnt, int nodeCnt) throws Exception {
try {
Ignite g1 = startGrid(0);
IgniteCache<Integer, String> c1 = g1.cache(DEFAULT_CACHE_NAME);
putKeys(c1, keyCnt);
checkKeys(c1, keyCnt);
Collection<Ignite> ignites = new LinkedList<>();
startGrids(nodeCnt, 1, ignites);
// Check all nodes.
for (Ignite g : ignites) {
IgniteCache<Integer, String> c = g.cache(DEFAULT_CACHE_NAME);
checkKeys(c, keyCnt);
}
info(">>> Finished checking nodes [keyCnt=" + keyCnt + ", nodeCnt=" + nodeCnt + ']');
stopGrids(ignites);
GridDhtCacheAdapter<Integer, String> dht = dht(c1);
info(">>> Waiting for preload futures...");
GridCachePartitionExchangeManager<Object, Object> exchMgr = ((IgniteKernal) g1).context().cache().context().exchange();
// Wait for exchanges to complete.
for (IgniteInternalFuture<?> fut : exchMgr.exchangeFutures()) fut.get();
Affinity<Integer> aff = affinity(c1);
for (int i = 0; i < keyCnt; i++) {
if (aff.mapPartitionToPrimaryAndBackups(aff.partition(i)).contains(g1.cluster().localNode())) {
GridDhtPartitionTopology top = dht.topology();
for (GridDhtLocalPartition p : top.localPartitions()) assertEquals("Invalid partition state for partition: " + p, OWNING, p.state());
}
}
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridCacheDhtPreloadSelfTest method checkActivePartitionTransfer.
/**
* @param keyCnt Key count.
* @param nodeCnt Node count.
* @param sameCoord Same coordinator flag.
* @param shuffle Shuffle flag.
* @throws Exception If failed.
*/
private void checkActivePartitionTransfer(int keyCnt, int nodeCnt, boolean sameCoord, boolean shuffle) throws Exception {
try {
Ignite ignite1 = startGrid(0);
IgniteCache<Integer, String> cache1 = ignite1.cache(DEFAULT_CACHE_NAME);
putKeys(cache1, keyCnt);
checkKeys(cache1, keyCnt, F.asList(ignite1));
List<Ignite> ignites = new ArrayList<>(nodeCnt + 1);
startGrids(nodeCnt, 1, ignites);
// Check all nodes.
for (Ignite g : ignites) {
IgniteCache<Integer, String> c = g.cache(DEFAULT_CACHE_NAME);
checkKeys(c, keyCnt, ignites);
}
if (shuffle)
Collections.shuffle(ignites);
if (sameCoord)
// Add last.
ignites.add(ignite1);
else
// Add first.
ignites.add(0, ignite1);
if (!sameCoord && shuffle)
Collections.shuffle(ignites);
checkActiveState(ignites);
info(">>> Finished checking nodes [keyCnt=" + keyCnt + ", nodeCnt=" + nodeCnt + ", grids=" + U.grids2names(ignites) + ']');
Ignite last = F.last(ignites);
for (Iterator<Ignite> it = ignites.iterator(); it.hasNext(); ) {
Ignite g = it.next();
if (!it.hasNext()) {
assert last == g;
break;
}
checkActiveState(ignites);
it.remove();
info("Before grid stop [name=" + g.name() + ", fullTop=" + top2string(ignites));
stopGrid(g.name());
info("After grid stop [name=" + g.name() + ", fullTop=" + top2string(ignites));
// Check all left nodes.
checkActiveState(ignites);
// Need wait, otherwise test logic is broken if EVT_NODE_FAILED exchanges are merged.
awaitPartitionMapExchange();
}
info("Finished waiting for preload futures.");
assert last != null;
IgniteCache<Integer, String> lastCache = last.cache(DEFAULT_CACHE_NAME);
GridDhtCacheAdapter<Integer, String> dht = dht(lastCache);
Affinity<Integer> aff = affinity(lastCache);
info("Finished waiting for all exchange futures...");
for (int i = 0; i < keyCnt; i++) {
if (aff.mapPartitionToPrimaryAndBackups(aff.partition(i)).contains(last.cluster().localNode())) {
GridDhtPartitionTopology top = dht.topology();
for (GridDhtLocalPartition p : top.localPartitions()) {
Collection<ClusterNode> moving = top.moving(p.id());
assert moving.isEmpty() : "Nodes with partition in moving state [part=" + p + ", moving=" + moving + ']';
assert OWNING == p.state() : "Invalid partition state for partition [part=" + p + ", map=" + top.partitionMap(false) + ']';
}
}
}
checkActiveState(ignites);
} catch (Error | Exception e) {
error("Test failed.", e);
throw e;
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridCacheDhtPreloadSelfTest method checkNodes.
/**
* @param keyCnt Key count.
* @param nodeCnt Node count.
* @param sameCoord Same coordinator flag.
* @param shuffle Shuffle flag.
* @throws Exception If failed.
*/
private void checkNodes(int keyCnt, int nodeCnt, boolean sameCoord, boolean shuffle) throws Exception {
try {
Ignite ignite1 = startGrid(0);
IgniteCache<Integer, String> cache1 = ignite1.cache(DEFAULT_CACHE_NAME);
putKeys(cache1, keyCnt);
checkKeys(cache1, keyCnt, F.asList(ignite1));
List<Ignite> ignites = new ArrayList<>(nodeCnt + 1);
startGrids(nodeCnt, 1, ignites);
// Check all nodes.
for (Ignite g : ignites) {
IgniteCache<Integer, String> c = g.cache(DEFAULT_CACHE_NAME);
checkKeys(c, keyCnt, ignites);
}
if (shuffle)
Collections.shuffle(ignites);
if (sameCoord)
// Add last.
ignites.add(ignite1);
else
// Add first.
ignites.add(0, ignite1);
if (!sameCoord && shuffle)
Collections.shuffle(ignites);
info(">>> Finished checking nodes [keyCnt=" + keyCnt + ", nodeCnt=" + nodeCnt + ", grids=" + U.grids2names(ignites) + ']');
Ignite last = null;
for (Iterator<Ignite> it = ignites.iterator(); it.hasNext(); ) {
Ignite g = it.next();
if (!it.hasNext()) {
last = g;
break;
}
final UUID nodeId = g.cluster().localNode().id();
it.remove();
info("Before grid stop [name=" + g.name() + ", fullTop=" + top2string(ignites));
stopGrid(g.name());
info(">>> Waiting for preload futures [leftNode=" + g.name() + ", remaining=" + U.grids2names(ignites) + ']');
awaitPartitionMapExchange();
info("After grid stop [name=" + g.name() + ", fullTop=" + top2string(ignites));
// Check all left nodes.
for (Ignite gg : ignites) {
IgniteCache<Integer, String> c = gg.cache(DEFAULT_CACHE_NAME);
checkKeys(c, keyCnt, ignites);
}
}
assert last != null;
IgniteCache<Integer, String> lastCache = last.cache(DEFAULT_CACHE_NAME);
GridDhtCacheAdapter<Integer, String> dht = dht(lastCache);
Affinity<Integer> aff = affinity(lastCache);
for (int i = 0; i < keyCnt; i++) {
if (aff.mapPartitionToPrimaryAndBackups(aff.partition(i)).contains(last.cluster().localNode())) {
GridDhtPartitionTopology top = dht.topology();
for (GridDhtLocalPartition p : top.localPartitions()) {
Collection<ClusterNode> moving = top.moving(p.id());
assert moving.isEmpty() : "Nodes with partition in moving state [part=" + p + ", moving=" + moving + ']';
assert OWNING == p.state() : "Invalid partition state for partition [part=" + p + ", map=" + top.partitionMap(false) + ']';
}
}
}
} catch (Error | Exception e) {
error("Test failed.", e);
throw e;
} finally {
stopAllGrids();
}
}
Aggregations