use of org.apache.geode.cache.DiskWriteAttributesFactory in project geode by apache.
the class DiskWriteAttributesJUnitTest method testGetDefaultNonRollingAsync.
/**
* Test method for 'org.apache.geode.cache.DiskWriteAttributes.getDefaultNonRollingAsync()'
*/
@Test
public void testGetDefaultNonRollingAsync() {
DiskWriteAttributesFactory dwaf = new DiskWriteAttributesFactory();
dwaf.setRollOplogs(false);
DiskWriteAttributes dwa = dwaf.create();
assertTrue(!dwa.isSynchronous());
assertTrue(!dwa.isRollOplogs());
}
use of org.apache.geode.cache.DiskWriteAttributesFactory in project geode by apache.
the class DiskWriteAttributesJUnitTest method testGetDefaultRollingSync.
/**
* Test method for 'org.apache.geode.cache.DiskWriteAttributes.getDefaultRollingSync()'
*/
@Test
public void testGetDefaultRollingSync() {
DiskWriteAttributesFactory dwaf = new DiskWriteAttributesFactory();
dwaf.setSynchronous(true);
DiskWriteAttributes dwa = dwaf.create();
assertTrue(dwa.isSynchronous());
assertTrue(dwa.isRollOplogs());
}
use of org.apache.geode.cache.DiskWriteAttributesFactory in project geode by apache.
the class DiskWriteAttributesJUnitTest method testGetDefaultRollingAsync.
/**
* Test method for 'org.apache.geode.cache.DiskWriteAttributes.getDefaultRollingAsync()'
*/
@Test
public void testGetDefaultRollingAsync() {
DiskWriteAttributesFactory dwaf = new DiskWriteAttributesFactory();
DiskWriteAttributes dwa = dwaf.create();
assertTrue(!dwa.isSynchronous());
assertTrue(dwa.isRollOplogs());
}
use of org.apache.geode.cache.DiskWriteAttributesFactory in project geode by apache.
the class DiskWriteAttributesJUnitTest method testGetDefaultSync.
/**
* Test method for 'org.apache.geode.cache.DiskWriteAttributes.getDefaultSync()'
*/
@Test
public void testGetDefaultSync() {
DiskWriteAttributesFactory dwaf = new DiskWriteAttributesFactory();
dwaf.setSynchronous(true);
DiskWriteAttributes dwa = dwaf.create();
assertTrue(dwa.isSynchronous());
assertTrue(dwa.isRollOplogs());
}
use of org.apache.geode.cache.DiskWriteAttributesFactory in project geode by apache.
the class CacheXml66DUnitTest method testPartitionedRegionAttributesForEviction.
/**
* Tests that a Partitioned Region can be created with a named attributes set programmatically for
* ExpirationAttributes
*/
@Test
public void testPartitionedRegionAttributesForEviction() throws Exception {
final int redundantCopies = 1;
CacheCreation cache = new CacheCreation();
if (getGemFireVersion().equals(CacheXml.VERSION_6_0)) {
ResourceManagerCreation rm = new ResourceManagerCreation();
rm.setCriticalHeapPercentage(95);
cache.setResourceManagerCreation(rm);
}
RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
attrs.setStatisticsEnabled(true);
RegionAttributes rootAttrs = null;
ExpirationAttributes expiration = new ExpirationAttributes(60, ExpirationAction.DESTROY);
CacheXMLPartitionResolver partitionResolver = new CacheXMLPartitionResolver();
Properties params = new Properties();
params.setProperty("initial-index-value", "1000");
params.setProperty("secondary-index-value", "5000");
partitionResolver.init(params);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundantCopies);
paf.setTotalMaxMemory(500);
paf.setLocalMaxMemory(100);
paf.setPartitionResolver(partitionResolver);
AttributesFactory fac = new AttributesFactory(attrs);
// TODO: Move test back to using LRUHeap when config issues have settled
// if (getGemFireVersion().equals(CacheXml.GEMFIRE_6_0)) {
// fac.setEvictionAttributes(EvictionAttributes.createLRUHeapAttributes(null,
// EvictionAction.OVERFLOW_TO_DISK));
// } else {
fac.setEvictionAttributes(EvictionAttributes.createLRUMemoryAttributes(100, null, EvictionAction.OVERFLOW_TO_DISK));
// }
fac.setEntryTimeToLive(expiration);
fac.setEntryIdleTimeout(expiration);
DiskWriteAttributesFactory dwaf = new DiskWriteAttributesFactory();
dwaf.setSynchronous(true);
fac.setPartitionAttributes(paf.create());
rootAttrs = fac.create();
cache.createRegion("parRoot", rootAttrs);
Region r = cache.getRegion("parRoot");
assertNotNull(r);
assertEquals(r.getAttributes().getPartitionAttributes().getRedundantCopies(), redundantCopies);
assertEquals(r.getAttributes().getPartitionAttributes().getLocalMaxMemory(), 100);
assertEquals(r.getAttributes().getPartitionAttributes().getTotalMaxMemory(), 500);
assertEquals(r.getAttributes().getPartitionAttributes().getPartitionResolver(), partitionResolver);
assertEquals(r.getAttributes().getEntryIdleTimeout().getTimeout(), expiration.getTimeout());
assertEquals(r.getAttributes().getEntryTimeToLive().getTimeout(), expiration.getTimeout());
testXml(cache);
Cache c = getCache();
assertNotNull(c);
Region region = c.getRegion("parRoot");
assertNotNull(region);
RegionAttributes regionAttrs = region.getAttributes();
PartitionAttributes pa = regionAttrs.getPartitionAttributes();
EvictionAttributes ea = regionAttrs.getEvictionAttributes();
assertEquals(pa.getRedundantCopies(), 1);
assertEquals(pa.getLocalMaxMemory(), 100);
assertEquals(pa.getTotalMaxMemory(), 500);
assertNotNull(pa.getPartitionResolver().getClass());
assertEquals(pa.getPartitionResolver(), partitionResolver);
assertEquals(regionAttrs.getEntryIdleTimeout().getTimeout(), expiration.getTimeout());
assertEquals(regionAttrs.getEntryTimeToLive().getTimeout(), expiration.getTimeout());
// TODO: Move test back to using LRUHeap when config issues have settled
// if (getGemFireVersion().equals(CacheXml.GEMFIRE_6_0)) {
// assertIndexDetailsEquals(ea.getAlgorithm(),EvictionAlgorithm.LRU_HEAP);
// } else {
assertEquals(ea.getAlgorithm(), EvictionAlgorithm.LRU_MEMORY);
// }
assertEquals(ea.getAction(), EvictionAction.OVERFLOW_TO_DISK);
}
Aggregations