use of org.apache.ignite.testframework.junits.WithSystemProperty in project ignite by apache.
the class GridCacheRebalancingWithAsyncClearingTest method testPartitionClearingNotBlockExchange.
/**
* Test that partition clearing doesn't block partitions map exchange.
*
* @throws Exception If failed.
*/
@Test
@WithSystemProperty(key = IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE, value = "1")
public void testPartitionClearingNotBlockExchange() throws Exception {
IgniteEx ig = (IgniteEx) startGrids(3);
ig.cluster().active(true);
// High number of keys triggers long partition eviction.
final int keysCnt = SF.applyLB(300_000, 10_000);
try (IgniteDataStreamer<Integer, Integer> ds = ig.dataStreamer(CACHE_NAME)) {
log.info("Writing initial data...");
ds.allowOverwrite(true);
for (int k = 1; k <= keysCnt; k++) {
ds.addData(k, k);
if (k % 10_000 == 0)
log.info("Written " + k + " entities.");
}
log.info("Writing initial data finished.");
}
stopGrid(2);
awaitPartitionMapExchange();
try (IgniteDataStreamer<Integer, Integer> ds = ig.dataStreamer(CACHE_NAME)) {
log.info("Writing external data...");
ds.allowOverwrite(true);
for (int k = 1; k <= keysCnt; k++) {
ds.addData(k, 2 * k);
if (k % 10_000 == 0)
log.info("Written " + k + " entities.");
}
log.info("Writing external data finished.");
}
IgniteCache<Integer, Integer> cache = ig.cache(CACHE_NAME);
forceCheckpoint();
GridCachePartitionExchangeManager exchangeManager = ig.cachex(CACHE_NAME).context().shared().exchange();
long topVer = exchangeManager.lastTopologyFuture().topologyVersion().topologyVersion();
startGrid(2);
// Check that exchange future is completed and version is incremented
GridDhtPartitionsExchangeFuture fut1 = exchangeManager.lastTopologyFuture();
fut1.get();
Assert.assertEquals(topVer + 1, fut1.topologyVersion().topologyVersion());
// Check that additional exchange didn't influence on asynchronous partitions eviction.
boolean asyncClearingIsRunning = false;
for (int p = 0; p < PARTITIONS_CNT; p++) {
GridDhtLocalPartition part = grid(2).cachex(CACHE_NAME).context().topology().localPartition(p);
if (part != null && part.state() == GridDhtPartitionState.MOVING && part.isClearing()) {
asyncClearingIsRunning = true;
break;
}
}
Assert.assertTrue("Async clearing is not running at the moment", asyncClearingIsRunning);
// Check that stopping & starting node didn't break rebalance process.
stopGrid(1);
startGrid(1);
// Wait for rebalance on all nodes.
for (Ignite ignite : G.allGrids()) ignite.cache(CACHE_NAME).rebalance().get();
// Check no data loss.
for (int k = 1; k <= keysCnt; k++) {
Integer val = cache.get(k);
Assert.assertNotNull("Value for " + k + " is null", val);
Assert.assertEquals("Check failed for " + k + " " + val, 2 * k, (int) val);
}
}
use of org.apache.ignite.testframework.junits.WithSystemProperty in project ignite by apache.
the class TxPartitionCounterStateConsistencyTest method testClearVersion.
/**
* Tests if a clear version comparison works properly during a partition clearing caused by full preloading.
*/
@Test
@WithSystemProperty(key = IGNITE_PREFER_WAL_REBALANCE, value = "false")
public void testClearVersion() throws Exception {
backups = 1;
IgniteEx g0 = startGrid(0);
IgniteEx g1 = startGrid(1);
g0.cluster().state(ClusterState.ACTIVE);
IgniteCache<Integer, Integer> cache = g0.cache(DEFAULT_CACHE_NAME);
int[] parts = g0.affinity(DEFAULT_CACHE_NAME).primaryPartitions(g0.localNode());
int prim = parts[0];
List<Integer> keys = partitionKeys(cache, prim, 2_000, 0);
long topVer = g0.cachex(DEFAULT_CACHE_NAME).context().topology().readyTopologyVersion().topologyVersion();
// Move the version forward.
int c = 100_000;
// Simulating load by incrementing local version.
GridCacheVersion next = null;
while (c-- > 0) next = g0.context().cache().context().versions().next(topVer);
assertTrue(next.order() - U.currentTimeMillis() > 60_000);
for (Integer key : keys) cache.put(key, key);
assertPartitionsSame(idleVerify(g0, DEFAULT_CACHE_NAME));
g1.close();
for (Integer key : keys) cache.remove(key);
g1 = startGrid(1);
g1.close();
g1 = startGrid(1);
awaitPartitionMapExchange();
assertPartitionsSame(idleVerify(g0, DEFAULT_CACHE_NAME));
}
use of org.apache.ignite.testframework.junits.WithSystemProperty in project ignite by apache.
the class PartitionEvictionOrderTest method testSyncCachesEvictedAtFirst.
/**
* Tests that {@link CacheRebalanceMode#SYNC} caches are evicted at first.
*/
@Test
@WithSystemProperty(key = IgniteSystemProperties.IGNITE_EVICTION_PERMITS, value = "1")
@WithSystemProperty(key = IGNITE_PDS_WAL_REBALANCE_THRESHOLD, value = "500_000")
public void testSyncCachesEvictedAtFirst() throws Exception {
IgniteEx node0 = startGrid(0);
node0.cluster().state(ACTIVE);
IgniteEx node1 = startGrid(1);
node0.cluster().setBaselineTopology(node1.cluster().topologyVersion());
GridCacheAdapter<Object, Object> utilCache0 = grid(0).context().cache().internalCache(CU.UTILITY_CACHE_NAME);
IgniteCache<Object, Object> cache = node0.getOrCreateCache(DEFAULT_CACHE_NAME);
for (int i = 0; i < 1000; i++) {
utilCache0.put(i, i);
cache.put(i, i);
}
awaitPartitionMapExchange();
stopGrid(0);
GridCacheAdapter<Object, Object> utilCache1 = grid(1).context().cache().internalCache(CU.UTILITY_CACHE_NAME);
IgniteInternalCache<Object, Object> cache2 = grid(1).context().cache().cache(DEFAULT_CACHE_NAME);
for (int i = 0; i < 2000; i++) {
try {
cache2.put(i, i + 1);
utilCache1.put(i, i + 1);
} catch (IgniteCheckedException e) {
e.printStackTrace();
}
}
List<T2<Integer, Integer>> evictionOrder = Collections.synchronizedList(new ArrayList<>());
TestDependencyResolver rslvr = new TestDependencyResolver(new DependencyResolver() {
@Override
public <T> T resolve(T instance) {
if (instance instanceof GridDhtPartitionTopologyImpl) {
GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) instance;
top.partitionFactory((ctx, grp, id, recovery) -> new GridDhtLocalPartition(ctx, grp, id, recovery) {
@Override
public long clearAll(EvictionContext evictionCtx) throws NodeStoppingException {
evictionOrder.add(new T2<>(grp.groupId(), id));
return super.clearAll(evictionCtx);
}
});
}
return instance;
}
});
startGrid(0, rslvr);
awaitPartitionMapExchange(true, true, null);
assertEquals(utilCache0.affinity().partitions() + grid(0).cachex(DEFAULT_CACHE_NAME).affinity().partitions(), evictionOrder.size());
for (int i = 0; i < utilCache0.affinity().partitions(); i++) assertEquals(CU.UTILITY_CACHE_GROUP_ID, evictionOrder.get(i).get1().intValue());
}
use of org.apache.ignite.testframework.junits.WithSystemProperty in project ignite by apache.
the class WaitForBackupsOnShutdownSystemPropertyTest method testWaitForBackupsOnShutdownPropertyExists.
/**
* Check the message is printed if IGNITE_WAIT_FOR_BACKUPS_ON_SHUTDOWN is used.
*
* @throws Exception If failed.
*/
@Test
@WithSystemProperty(key = IGNITE_WAIT_FOR_BACKUPS_ON_SHUTDOWN, value = "false")
public void testWaitForBackupsOnShutdownPropertyExists() throws Exception {
LogListener lnsr = LogListener.matches("IGNITE_WAIT_FOR_BACKUPS_ON_SHUTDOWN system property " + "is deprecated and will be removed in a future version. Use ShutdownPolicy instead.").build();
listeningLog.registerListener(lnsr);
startGrid();
assertTrue("The message was not found", lnsr.check());
}
use of org.apache.ignite.testframework.junits.WithSystemProperty in project ignite by apache.
the class IgnitePdsPageReplacementDuringPartitionClearTest method testPageEvictionOnNodeStart.
/**
* @throws Exception if failed.
*/
@Test
@WithSystemProperty(key = GridCacheDatabaseSharedManager.IGNITE_PDS_CHECKPOINT_TEST_SKIP_SYNC, value = "true")
public void testPageEvictionOnNodeStart() throws Exception {
cleanPersistenceDir();
startGrids(2);
AtomicBoolean stop = new AtomicBoolean(false);
try {
Ignite ig = ignite(0);
ig.cluster().active(true);
ig.cluster().baselineAutoAdjustEnabled(false);
int last = loadDataUntilPageReplacement(ignite(0), ignite(1));
IgniteInternalFuture<?> fut = loadAsync(ig, stop, last);
EvictionListener evictLsnr = new EvictionListener();
ignite(0).events().localListen(evictLsnr, EVT_CACHE_REBALANCE_PART_UNLOADED);
ignite(1).events().localListen(evictLsnr, EVT_CACHE_REBALANCE_PART_UNLOADED);
IgniteEx igNew = startGrid(2);
info(">>>>>>>>>>>");
info(">>>>>>>>>>>");
info(">>>>>>>>>>>");
igNew.cluster().setBaselineTopology(3);
awaitPartitionMapExchange();
Map<ClusterNode, GridLongList> affinityAfter = allPartitions(igNew);
evictLsnr.waitPartitionsEvicted(igNew.cluster().localNode(), affinityAfter);
stop.set(true);
fut.get();
} finally {
stop.set(true);
stopAllGrids();
cleanPersistenceDir();
}
}
Aggregations