use of org.apache.ignite.testframework.ListeningTestLogger in project ignite by apache.
the class CacheDataRegionConfigurationTest method testWarningIfDynamicCacheOverheadExceedsThreshold.
/**
* Verifies that warning message is printed to the logs if user tries to start a dynamic cache in data region which
* overhead (e.g. metapages for partitions) occupies more space of the region than a defined threshold.
*
* @throws Exception If failed.
*/
@Test
public void testWarningIfDynamicCacheOverheadExceedsThreshold() throws Exception {
String filteredSrvName = "srv2";
int numOfPartitions = 512;
int partitionsMetaMemoryChunk = U.sizeInMegabytes(512 * DFLT_PAGE_SIZE);
DataRegionConfiguration smallRegionCfg = new DataRegionConfiguration();
smallRegionCfg.setName("smallRegion");
smallRegionCfg.setInitialSize(DFLT_MEM_PLC_SIZE);
smallRegionCfg.setMaxSize(DFLT_MEM_PLC_SIZE);
smallRegionCfg.setPersistenceEnabled(true);
// explicit default data region configuration to test possible NPE case
DataRegionConfiguration defaultRegionCfg = new DataRegionConfiguration();
defaultRegionCfg.setName("defaultRegion");
defaultRegionCfg.setInitialSize(DFLT_MEM_PLC_SIZE);
defaultRegionCfg.setMaxSize(DFLT_MEM_PLC_SIZE);
defaultRegionCfg.setPersistenceEnabled(true);
memCfg = new DataStorageConfiguration();
memCfg.setDefaultDataRegionConfiguration(defaultRegionCfg);
memCfg.setDataRegionConfigurations(smallRegionCfg);
// one hour to guarantee that checkpoint will be triggered by 'dirty pages amount' trigger
memCfg.setCheckpointFrequency(60 * 60 * 1000);
ListeningTestLogger srv0Logger = new ListeningTestLogger(false, null);
LogListener cacheGrpLsnr0 = matches("Cache group 'default' brings high overhead").build();
LogListener dataRegLsnr0 = matches("metainformation in data region 'defaultRegion'").build();
LogListener partsInfoLsnr0 = matches(numOfPartitions + " partitions, " + DFLT_PAGE_SIZE + " bytes per partition, " + partitionsMetaMemoryChunk + " MBs total").build();
srv0Logger.registerAllListeners(cacheGrpLsnr0, dataRegLsnr0, partsInfoLsnr0);
logger = srv0Logger;
IgniteEx ignite0 = startGrid("srv0");
ListeningTestLogger srv1Logger = new ListeningTestLogger(false, null);
LogListener cacheGrpLsnr1 = matches("Cache group 'default' brings high overhead").build();
LogListener dataRegLsnr1 = matches("metainformation in data region 'defaultRegion'").build();
LogListener partsInfoLsnr1 = matches(numOfPartitions + " partitions, " + DFLT_PAGE_SIZE + " bytes per partition, " + partitionsMetaMemoryChunk + " MBs total").build();
srv1Logger.registerAllListeners(cacheGrpLsnr1, dataRegLsnr1, partsInfoLsnr1);
logger = srv1Logger;
startGrid("srv1");
ListeningTestLogger srv2Logger = new ListeningTestLogger(false, null);
LogListener cacheGrpLsnr2 = matches("Cache group 'default' brings high overhead").build();
srv2Logger.registerListener(cacheGrpLsnr2);
logger = srv2Logger;
startGrid("srv2");
ignite0.cluster().active(true);
IgniteEx cl = startGrid("client01");
CacheConfiguration<Object, Object> manyPartitionsCache = new CacheConfiguration<>(DEFAULT_CACHE_NAME);
manyPartitionsCache.setAffinity(new RendezvousAffinityFunction(false, numOfPartitions));
manyPartitionsCache.setNodeFilter(new NodeNameNodeFilter(filteredSrvName));
manyPartitionsCache.setBackups(1);
cl.createCache(manyPartitionsCache);
// srv0 and srv1 print warning into the log as the threshold for cache in default cache group is broken
assertTrue(cacheGrpLsnr0.check());
assertTrue(dataRegLsnr0.check());
assertTrue(partsInfoLsnr0.check());
assertTrue(cacheGrpLsnr1.check());
assertTrue(dataRegLsnr1.check());
assertTrue(partsInfoLsnr1.check());
// srv2 doesn't print the warning as it is filtered by node filter from affinity nodes
assertFalse(cacheGrpLsnr2.check());
}
use of org.apache.ignite.testframework.ListeningTestLogger in project ignite by apache.
the class CacheDataRegionConfigurationTest method testNoWarningIfCacheConfigurationDoesntBreakThreshold.
/**
* Negative test: verifies that no warning is printed to logs if user starts static and dynamic caches
* in data region with enough capacity to host these caches;
* in other words, no thresholds for metapages ration are broken.
*
* @throws Exception If failed.
*/
@Test
public void testNoWarningIfCacheConfigurationDoesntBreakThreshold() throws Exception {
DataRegionConfiguration defaultRegionCfg = new DataRegionConfiguration();
defaultRegionCfg.setInitialSize(DFLT_MEM_PLC_SIZE);
defaultRegionCfg.setMaxSize(DFLT_MEM_PLC_SIZE);
defaultRegionCfg.setPersistenceEnabled(true);
memCfg = new DataStorageConfiguration();
memCfg.setDefaultDataRegionConfiguration(defaultRegionCfg);
// one hour to guarantee that checkpoint will be triggered by 'dirty pages amount' trigger
memCfg.setCheckpointFrequency(60 * 60 * 1000);
CacheConfiguration<Object, Object> fewPartitionsCache = new CacheConfiguration<>(DEFAULT_CACHE_NAME);
// 512 partitions are enough only if primary and backups count
fewPartitionsCache.setAffinity(new RendezvousAffinityFunction(false, 16));
fewPartitionsCache.setBackups(1);
ccfg = fewPartitionsCache;
ListeningTestLogger srv0Logger = new ListeningTestLogger(false, null);
LogListener cacheGrpLsnr0 = matches("Cache group 'default' brings high overhead").build();
LogListener dynamicGrpLsnr = matches("Cache group 'dynamicCache' brings high overhead").build();
srv0Logger.registerListener(cacheGrpLsnr0);
srv0Logger.registerListener(dynamicGrpLsnr);
logger = srv0Logger;
IgniteEx ignite0 = startGrid("srv0");
ignite0.cluster().active(true);
assertFalse(cacheGrpLsnr0.check());
ignite0.createCache(new CacheConfiguration<>("dynamicCache").setAffinity(new RendezvousAffinityFunction(false, 16)));
assertFalse(dynamicGrpLsnr.check());
}
use of org.apache.ignite.testframework.ListeningTestLogger in project ignite by apache.
the class CacheDataRegionConfigurationTest method testWarningIfStaticCacheOverheadExceedsThreshold.
/**
* Verifies that warning message is printed to the logs if user tries to start a static cache in data region which
* overhead (e.g. metapages for partitions) occupies more space of the region than a defined threshold (15%)
*
* @throws Exception If failed.
*/
@Test
public void testWarningIfStaticCacheOverheadExceedsThreshold() throws Exception {
DataRegionConfiguration smallRegionCfg = new DataRegionConfiguration();
int numOfPartitions = 512;
int partitionsMetaMemoryChunk = U.sizeInMegabytes(512 * DFLT_PAGE_SIZE);
smallRegionCfg.setInitialSize(DFLT_MEM_PLC_SIZE);
smallRegionCfg.setMaxSize(DFLT_MEM_PLC_SIZE);
smallRegionCfg.setPersistenceEnabled(true);
smallRegionCfg.setName("smallRegion");
memCfg = new DataStorageConfiguration();
memCfg.setDefaultDataRegionConfiguration(smallRegionCfg);
// one hour to guarantee that checkpoint will be triggered by 'dirty pages amount' trigger
memCfg.setCheckpointFrequency(60 * 60 * 1000);
CacheConfiguration<Object, Object> manyPartitionsCache = new CacheConfiguration<>(DEFAULT_CACHE_NAME);
// 512 partitions are enough only if primary and backups count
manyPartitionsCache.setAffinity(new RendezvousAffinityFunction(false, numOfPartitions));
manyPartitionsCache.setBackups(1);
ccfg = manyPartitionsCache;
ListeningTestLogger srv0Logger = new ListeningTestLogger(false, null);
LogListener cacheGrpLsnr0 = matches("Cache group 'default' brings high overhead").build();
LogListener dataRegLsnr0 = matches("metainformation in data region 'smallRegion'").build();
LogListener partsInfoLsnr0 = matches(numOfPartitions + " partitions, " + DFLT_PAGE_SIZE + " bytes per partition, " + partitionsMetaMemoryChunk + " MBs total").build();
srv0Logger.registerAllListeners(cacheGrpLsnr0, dataRegLsnr0, partsInfoLsnr0);
logger = srv0Logger;
IgniteEx ignite0 = startGrid("srv0");
ListeningTestLogger srv1Logger = new ListeningTestLogger(false, null);
LogListener cacheGrpLsnr1 = matches("Cache group 'default' brings high overhead").build();
LogListener dataRegLsnr1 = matches("metainformation in data region 'smallRegion'").build();
LogListener partsInfoLsnr1 = matches(numOfPartitions + " partitions, " + DFLT_PAGE_SIZE + " bytes per partition, " + partitionsMetaMemoryChunk + " MBs total").build();
srv1Logger.registerAllListeners(cacheGrpLsnr1, dataRegLsnr1, partsInfoLsnr1);
logger = srv1Logger;
startGrid("srv1");
ignite0.cluster().active(true);
// srv0 and srv1 print warning into the log as the threshold for cache in default cache group is broken
assertTrue(cacheGrpLsnr0.check());
assertTrue(dataRegLsnr0.check());
assertTrue(partsInfoLsnr0.check());
assertTrue(cacheGrpLsnr1.check());
assertTrue(dataRegLsnr1.check());
assertTrue(partsInfoLsnr1.check());
}
use of org.apache.ignite.testframework.ListeningTestLogger in project ignite by apache.
the class CacheDataRegionConfigurationTest method testWarningOnBaselineTopologyChange.
/**
* Verifies that warning is printed out to logs if after removing nodes from baseline
* some caches reach or cross dangerous limit of metainformation overhead per data region.
*
* @throws Exception If failed.
*/
@Test
public void testWarningOnBaselineTopologyChange() throws Exception {
DataRegionConfiguration defaultRegionCfg = new DataRegionConfiguration();
defaultRegionCfg.setInitialSize(DFLT_MEM_PLC_SIZE);
defaultRegionCfg.setMaxSize(DFLT_MEM_PLC_SIZE);
defaultRegionCfg.setPersistenceEnabled(true);
memCfg = new DataStorageConfiguration();
memCfg.setDefaultDataRegionConfiguration(defaultRegionCfg);
// one hour to guarantee that checkpoint will be triggered by 'dirty pages amount' trigger
memCfg.setCheckpointFrequency(60 * 60 * 1000);
ListeningTestLogger srv0Logger = new ListeningTestLogger(false, null);
LogListener cacheGrpLsnr0 = matches("Cache group 'default' brings high overhead").build();
srv0Logger.registerListener(cacheGrpLsnr0);
logger = srv0Logger;
IgniteEx ignite0 = startGrid("srv0");
ListeningTestLogger srv1Logger = new ListeningTestLogger(false, null);
LogListener cacheGrpLsnr1 = matches("Cache group 'default' brings high overhead").build();
srv1Logger.registerListener(cacheGrpLsnr1);
logger = srv1Logger;
startGrid("srv1");
ignite0.cluster().active(true);
ignite0.createCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME).setDataRegionName(defaultRegionCfg.getName()).setCacheMode(CacheMode.PARTITIONED).setAffinity(new RendezvousAffinityFunction(false, 512)));
assertFalse(cacheGrpLsnr0.check());
assertFalse(cacheGrpLsnr1.check());
stopGrid("srv1");
ignite0.cluster().baselineAutoAdjustEnabled(false);
long topVer = ignite0.cluster().topologyVersion();
ignite0.cluster().setBaselineTopology(topVer);
awaitPartitionMapExchange();
assertTrue(cacheGrpLsnr0.check());
}
use of org.apache.ignite.testframework.ListeningTestLogger in project ignite by apache.
the class CacheDataRegionConfigurationTest method testNoFailNodeIfUnknownDataRegion.
/**
* Test checks that nodes will not fall if you receive a request
* to create a cache with an unknown data region.
*
* @throws Exception If failed.
*/
@Test
public void testNoFailNodeIfUnknownDataRegion() throws Exception {
failureHnd = new StopNodeOrHaltFailureHandler();
ccfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME);
memCfg = new DataStorageConfiguration().setDefaultDataRegionConfiguration(new DataRegionConfiguration().setPersistenceEnabled(true));
LogListener logLsnr = matches("Possible failure suppressed accordingly to a configured handler").build();
logger = new ListeningTestLogger(false, log, logLsnr);
IgniteEx srvNode = startGrid(0);
String dataRegionName = "region";
IgniteConfiguration clientCfg = getConfiguration(getTestIgniteInstanceName(1)).setDataStorageConfiguration(new DataStorageConfiguration().setDataRegionConfigurations(new DataRegionConfiguration().setName(dataRegionName)).setDefaultDataRegionConfiguration(new DataRegionConfiguration().setPersistenceEnabled(true)));
IgniteEx clientNode = startClientGrid(optimize(clientCfg));
srvNode.cluster().active(true);
assertThrows(log, () -> {
clientNode.getOrCreateCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME + 1).setDataRegionName(dataRegionName));
return null;
}, CacheException.class, null);
assertThrows(log, () -> {
clientNode.getOrCreateCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME + 1).setDataRegionName(UUID.randomUUID().toString()));
return null;
}, CacheException.class, null);
IgniteCache<Object, Object> cacheSrv = srvNode.cache(DEFAULT_CACHE_NAME);
IgniteCache<Object, Object> cacheClient = clientNode.cache(DEFAULT_CACHE_NAME);
cacheSrv.put(1, 1);
assertEquals(1, cacheSrv.get(1));
cacheClient.put(2, 2);
assertEquals(2, cacheClient.get(2));
assertFalse(logLsnr.check());
}
Aggregations