use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class IgnitePdsPartitionFilesDestroyTest method checkPartitionFiles.
/**
* If {@code exists} is {@code true}, checks that all partition files exist if partition has state EVICTED.
*
* If {@code exists} is {@code false}, checks that all partition files don't exist if partition is absent or has
* state EVICTED.
*
* @param ignite Node.
* @param exists If {@code true} method will check that partition file exists, in other case will check that file
* doesn't exist.
* @throws IgniteCheckedException If failed.
*/
private void checkPartitionFiles(IgniteEx ignite, boolean exists) throws IgniteCheckedException {
int evicted = 0;
GridDhtPartitionTopology top = ignite.cachex(DEFAULT_CACHE_NAME).context().topology();
for (int p = 0; p < PARTS_CNT; p++) {
GridDhtLocalPartition part = top.localPartition(p);
File partFile = partitionFile(ignite, DEFAULT_CACHE_NAME, p);
if (exists) {
if (part != null && part.state() == GridDhtPartitionState.EVICTED)
Assert.assertTrue("Partition file has deleted ahead of time: " + partFile, partFile.exists());
evicted++;
} else {
if (part == null || part.state() == GridDhtPartitionState.EVICTED)
Assert.assertTrue("Partition file has not deleted: " + partFile, !partFile.exists());
}
}
if (exists)
Assert.assertTrue("There should be at least 1 eviction", evicted > 0);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class IgnitePdsPartitionFilesDestroyTest method testDestroyWhenPartitionsAreEmpty.
/**
* Test destroy when partition files are empty and there are no pages for checkpoint.
*
* @throws Exception If failed.
*/
@Test
public void testDestroyWhenPartitionsAreEmpty() throws Exception {
IgniteEx crd = (IgniteEx) startGrids(2);
crd.cluster().active(true);
forceCheckpoint();
// Evict arbitrary partition.
List<GridDhtLocalPartition> parts = crd.cachex(DEFAULT_CACHE_NAME).context().topology().localPartitions();
for (GridDhtLocalPartition part : parts) if (part.state() != GridDhtPartitionState.EVICTED) {
part.rent().get();
break;
}
// This checkpoint has no pages to write, but has one partition file to destroy.
forceCheckpoint(crd);
checkPartitionFiles(crd, false);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class GridCacheDhtPreloadStartStopSelfTest method checkNodes.
/**
* @param keyCnt Key count.
* @param nodeCnt Node count.
* @throws Exception If failed.
*/
private void checkNodes(int keyCnt, int nodeCnt) throws Exception {
try {
Ignite g1 = startGrid(0);
IgniteCache<Integer, String> c1 = g1.cache(DEFAULT_CACHE_NAME);
putKeys(c1, keyCnt);
checkKeys(c1, keyCnt);
Collection<Ignite> ignites = new LinkedList<>();
startGrids(nodeCnt, 1, ignites);
// Check all nodes.
for (Ignite g : ignites) {
IgniteCache<Integer, String> c = g.cache(DEFAULT_CACHE_NAME);
checkKeys(c, keyCnt);
}
info(">>> Finished checking nodes [keyCnt=" + keyCnt + ", nodeCnt=" + nodeCnt + ']');
stopGrids(ignites);
GridDhtCacheAdapter<Integer, String> dht = dht(c1);
info(">>> Waiting for preload futures...");
GridCachePartitionExchangeManager<Object, Object> exchMgr = ((IgniteKernal) g1).context().cache().context().exchange();
// Wait for exchanges to complete.
for (IgniteInternalFuture<?> fut : exchMgr.exchangeFutures()) fut.get();
Affinity<Integer> aff = affinity(c1);
for (int i = 0; i < keyCnt; i++) {
if (aff.mapPartitionToPrimaryAndBackups(aff.partition(i)).contains(g1.cluster().localNode())) {
GridDhtPartitionTopology top = dht.topology();
for (GridDhtLocalPartition p : top.localPartitions()) assertEquals("Invalid partition state for partition: " + p, OWNING, p.state());
}
}
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class GridCacheDhtPreloadSelfTest method checkActivePartitionTransfer.
/**
* @param keyCnt Key count.
* @param nodeCnt Node count.
* @param sameCoord Same coordinator flag.
* @param shuffle Shuffle flag.
* @throws Exception If failed.
*/
private void checkActivePartitionTransfer(int keyCnt, int nodeCnt, boolean sameCoord, boolean shuffle) throws Exception {
try {
Ignite ignite1 = startGrid(0);
IgniteCache<Integer, String> cache1 = ignite1.cache(DEFAULT_CACHE_NAME);
putKeys(cache1, keyCnt);
checkKeys(cache1, keyCnt, F.asList(ignite1));
List<Ignite> ignites = new ArrayList<>(nodeCnt + 1);
startGrids(nodeCnt, 1, ignites);
// Check all nodes.
for (Ignite g : ignites) {
IgniteCache<Integer, String> c = g.cache(DEFAULT_CACHE_NAME);
checkKeys(c, keyCnt, ignites);
}
if (shuffle)
Collections.shuffle(ignites);
if (sameCoord)
// Add last.
ignites.add(ignite1);
else
// Add first.
ignites.add(0, ignite1);
if (!sameCoord && shuffle)
Collections.shuffle(ignites);
checkActiveState(ignites);
info(">>> Finished checking nodes [keyCnt=" + keyCnt + ", nodeCnt=" + nodeCnt + ", grids=" + U.grids2names(ignites) + ']');
Ignite last = F.last(ignites);
for (Iterator<Ignite> it = ignites.iterator(); it.hasNext(); ) {
Ignite g = it.next();
if (!it.hasNext()) {
assert last == g;
break;
}
checkActiveState(ignites);
it.remove();
info("Before grid stop [name=" + g.name() + ", fullTop=" + top2string(ignites));
stopGrid(g.name());
info("After grid stop [name=" + g.name() + ", fullTop=" + top2string(ignites));
// Check all left nodes.
checkActiveState(ignites);
// Need wait, otherwise test logic is broken if EVT_NODE_FAILED exchanges are merged.
awaitPartitionMapExchange();
}
info("Finished waiting for preload futures.");
assert last != null;
IgniteCache<Integer, String> lastCache = last.cache(DEFAULT_CACHE_NAME);
GridDhtCacheAdapter<Integer, String> dht = dht(lastCache);
Affinity<Integer> aff = affinity(lastCache);
info("Finished waiting for all exchange futures...");
for (int i = 0; i < keyCnt; i++) {
if (aff.mapPartitionToPrimaryAndBackups(aff.partition(i)).contains(last.cluster().localNode())) {
GridDhtPartitionTopology top = dht.topology();
for (GridDhtLocalPartition p : top.localPartitions()) {
Collection<ClusterNode> moving = top.moving(p.id());
assert moving.isEmpty() : "Nodes with partition in moving state [part=" + p + ", moving=" + moving + ']';
assert OWNING == p.state() : "Invalid partition state for partition [part=" + p + ", map=" + top.partitionMap(false) + ']';
}
}
}
checkActiveState(ignites);
} catch (Error | Exception e) {
error("Test failed.", e);
throw e;
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class GridCacheDhtPreloadSelfTest method checkNodes.
/**
* @param keyCnt Key count.
* @param nodeCnt Node count.
* @param sameCoord Same coordinator flag.
* @param shuffle Shuffle flag.
* @throws Exception If failed.
*/
private void checkNodes(int keyCnt, int nodeCnt, boolean sameCoord, boolean shuffle) throws Exception {
try {
Ignite ignite1 = startGrid(0);
IgniteCache<Integer, String> cache1 = ignite1.cache(DEFAULT_CACHE_NAME);
putKeys(cache1, keyCnt);
checkKeys(cache1, keyCnt, F.asList(ignite1));
List<Ignite> ignites = new ArrayList<>(nodeCnt + 1);
startGrids(nodeCnt, 1, ignites);
// Check all nodes.
for (Ignite g : ignites) {
IgniteCache<Integer, String> c = g.cache(DEFAULT_CACHE_NAME);
checkKeys(c, keyCnt, ignites);
}
if (shuffle)
Collections.shuffle(ignites);
if (sameCoord)
// Add last.
ignites.add(ignite1);
else
// Add first.
ignites.add(0, ignite1);
if (!sameCoord && shuffle)
Collections.shuffle(ignites);
info(">>> Finished checking nodes [keyCnt=" + keyCnt + ", nodeCnt=" + nodeCnt + ", grids=" + U.grids2names(ignites) + ']');
Ignite last = null;
for (Iterator<Ignite> it = ignites.iterator(); it.hasNext(); ) {
Ignite g = it.next();
if (!it.hasNext()) {
last = g;
break;
}
final UUID nodeId = g.cluster().localNode().id();
it.remove();
info("Before grid stop [name=" + g.name() + ", fullTop=" + top2string(ignites));
stopGrid(g.name());
info(">>> Waiting for preload futures [leftNode=" + g.name() + ", remaining=" + U.grids2names(ignites) + ']');
awaitPartitionMapExchange();
info("After grid stop [name=" + g.name() + ", fullTop=" + top2string(ignites));
// Check all left nodes.
for (Ignite gg : ignites) {
IgniteCache<Integer, String> c = gg.cache(DEFAULT_CACHE_NAME);
checkKeys(c, keyCnt, ignites);
}
}
assert last != null;
IgniteCache<Integer, String> lastCache = last.cache(DEFAULT_CACHE_NAME);
GridDhtCacheAdapter<Integer, String> dht = dht(lastCache);
Affinity<Integer> aff = affinity(lastCache);
for (int i = 0; i < keyCnt; i++) {
if (aff.mapPartitionToPrimaryAndBackups(aff.partition(i)).contains(last.cluster().localNode())) {
GridDhtPartitionTopology top = dht.topology();
for (GridDhtLocalPartition p : top.localPartitions()) {
Collection<ClusterNode> moving = top.moving(p.id());
assert moving.isEmpty() : "Nodes with partition in moving state [part=" + p + ", moving=" + moving + ']';
assert OWNING == p.state() : "Invalid partition state for partition [part=" + p + ", map=" + top.partitionMap(false) + ']';
}
}
}
} catch (Error | Exception e) {
error("Test failed.", e);
throw e;
} finally {
stopAllGrids();
}
}
Aggregations